Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(777)

Side by Side Diff: src/mips/code-stubs-mips.cc

Issue 6993054: MIPS: Fixed FPU rounding checks and related errors in the Simulator. (Closed)
Patch Set: Created 9 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « no previous file | src/mips/constants-mips.h » ('j') | src/mips/constants-mips.h » ('J')
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 654 matching lines...) Expand 10 before | Expand all | Expand 10 after
665 // Save FCSR. 665 // Save FCSR.
666 __ cfc1(scratch1, FCSR); 666 __ cfc1(scratch1, FCSR);
667 // Disable FPU exceptions. 667 // Disable FPU exceptions.
668 __ ctc1(zero_reg, FCSR); 668 __ ctc1(zero_reg, FCSR);
669 __ trunc_w_d(single_scratch, double_dst); 669 __ trunc_w_d(single_scratch, double_dst);
670 // Retrieve FCSR. 670 // Retrieve FCSR.
671 __ cfc1(scratch2, FCSR); 671 __ cfc1(scratch2, FCSR);
672 // Restore FCSR. 672 // Restore FCSR.
673 __ ctc1(scratch1, FCSR); 673 __ ctc1(scratch1, FCSR);
674 674
675 // Check for inexact conversion. 675 // Check for inexact conversion or exception.
676 __ srl(scratch2, scratch2, kFCSRFlagShift); 676 __ And(scratch2, scratch2, kFCSRFlagMask);
677 __ And(scratch2, scratch2, (kFCSRFlagMask | kFCSRInexactFlagBit));
678 677
679 // Jump to not_int32 if the operation did not succeed. 678 // Jump to not_int32 if the operation did not succeed.
680 __ Branch(not_int32, ne, scratch2, Operand(zero_reg)); 679 __ Branch(not_int32, ne, scratch2, Operand(zero_reg));
681 680
682 if (destination == kCoreRegisters) { 681 if (destination == kCoreRegisters) {
683 __ Move(dst1, dst2, double_dst); 682 __ Move(dst1, dst2, double_dst);
684 } 683 }
685 684
686 } else { 685 } else {
687 ASSERT(!scratch1.is(object) && !scratch2.is(object)); 686 ASSERT(!scratch1.is(object) && !scratch2.is(object));
(...skipping 62 matching lines...) Expand 10 before | Expand all | Expand 10 after
750 // Save FCSR. 749 // Save FCSR.
751 __ cfc1(scratch1, FCSR); 750 __ cfc1(scratch1, FCSR);
752 // Disable FPU exceptions. 751 // Disable FPU exceptions.
753 __ ctc1(zero_reg, FCSR); 752 __ ctc1(zero_reg, FCSR);
754 __ trunc_w_d(double_scratch, double_scratch); 753 __ trunc_w_d(double_scratch, double_scratch);
755 // Retrieve FCSR. 754 // Retrieve FCSR.
756 __ cfc1(scratch2, FCSR); 755 __ cfc1(scratch2, FCSR);
757 // Restore FCSR. 756 // Restore FCSR.
758 __ ctc1(scratch1, FCSR); 757 __ ctc1(scratch1, FCSR);
759 758
760 // Check for inexact conversion. 759 // Check for inexact conversion or exception.
761 __ srl(scratch2, scratch2, kFCSRFlagShift); 760 __ And(scratch2, scratch2, kFCSRFlagMask);
762 __ And(scratch2, scratch2, (kFCSRFlagMask | kFCSRInexactFlagBit));
763 761
764 // Jump to not_int32 if the operation did not succeed. 762 // Jump to not_int32 if the operation did not succeed.
765 __ Branch(not_int32, ne, scratch2, Operand(zero_reg)); 763 __ Branch(not_int32, ne, scratch2, Operand(zero_reg));
766 // Get the result in the destination register. 764 // Get the result in the destination register.
767 __ mfc1(dst, double_scratch); 765 __ mfc1(dst, double_scratch);
768 766
769 } else { 767 } else {
770 // Load the double value in the destination registers. 768 // Load the double value in the destination registers.
771 __ lw(scratch2, FieldMemOperand(object, HeapNumber::kExponentOffset)); 769 __ lw(scratch2, FieldMemOperand(object, HeapNumber::kExponentOffset));
772 __ lw(scratch1, FieldMemOperand(object, HeapNumber::kMantissaOffset)); 770 __ lw(scratch1, FieldMemOperand(object, HeapNumber::kMantissaOffset));
(...skipping 1186 matching lines...) Expand 10 before | Expand all | Expand 10 after
1959 1957
1960 void UnaryOpStub::GenerateHeapNumberStubBitNot(MacroAssembler* masm) { 1958 void UnaryOpStub::GenerateHeapNumberStubBitNot(MacroAssembler* masm) {
1961 Label non_smi, slow; 1959 Label non_smi, slow;
1962 GenerateSmiCodeBitNot(masm, &non_smi); 1960 GenerateSmiCodeBitNot(masm, &non_smi);
1963 __ bind(&non_smi); 1961 __ bind(&non_smi);
1964 GenerateHeapNumberCodeBitNot(masm, &slow); 1962 GenerateHeapNumberCodeBitNot(masm, &slow);
1965 __ bind(&slow); 1963 __ bind(&slow);
1966 GenerateTypeTransition(masm); 1964 GenerateTypeTransition(masm);
1967 } 1965 }
1968 1966
1967
1969 void UnaryOpStub::GenerateHeapNumberCodeSub(MacroAssembler* masm, 1968 void UnaryOpStub::GenerateHeapNumberCodeSub(MacroAssembler* masm,
1970 Label* slow) { 1969 Label* slow) {
1971 EmitCheckForHeapNumber(masm, a0, a1, t2, slow); 1970 EmitCheckForHeapNumber(masm, a0, a1, t2, slow);
1972 // a0 is a heap number. Get a new heap number in a1. 1971 // a0 is a heap number. Get a new heap number in a1.
1973 if (mode_ == UNARY_OVERWRITE) { 1972 if (mode_ == UNARY_OVERWRITE) {
1974 __ lw(a2, FieldMemOperand(a0, HeapNumber::kExponentOffset)); 1973 __ lw(a2, FieldMemOperand(a0, HeapNumber::kExponentOffset));
1975 __ Xor(a2, a2, Operand(HeapNumber::kSignMask)); // Flip sign. 1974 __ Xor(a2, a2, Operand(HeapNumber::kSignMask)); // Flip sign.
1976 __ sw(a2, FieldMemOperand(a0, HeapNumber::kExponentOffset)); 1975 __ sw(a2, FieldMemOperand(a0, HeapNumber::kExponentOffset));
1977 } else { 1976 } else {
1978 Label slow_allocate_heapnumber, heapnumber_allocated; 1977 Label slow_allocate_heapnumber, heapnumber_allocated;
(...skipping 791 matching lines...) Expand 10 before | Expand all | Expand 10 after
2770 // Save FCSR. 2769 // Save FCSR.
2771 __ cfc1(scratch1, FCSR); 2770 __ cfc1(scratch1, FCSR);
2772 // Disable FPU exceptions. 2771 // Disable FPU exceptions.
2773 __ ctc1(zero_reg, FCSR); 2772 __ ctc1(zero_reg, FCSR);
2774 __ trunc_w_d(single_scratch, f10); 2773 __ trunc_w_d(single_scratch, f10);
2775 // Retrieve FCSR. 2774 // Retrieve FCSR.
2776 __ cfc1(scratch2, FCSR); 2775 __ cfc1(scratch2, FCSR);
2777 // Restore FCSR. 2776 // Restore FCSR.
2778 __ ctc1(scratch1, FCSR); 2777 __ ctc1(scratch1, FCSR);
2779 2778
2780 // Check for inexact conversion. 2779 // Check for inexact conversion or exception.
2781 __ srl(scratch2, scratch2, kFCSRFlagShift);
2782 __ And(scratch2, scratch2, kFCSRFlagMask); 2780 __ And(scratch2, scratch2, kFCSRFlagMask);
2783 2781
2784 if (result_type_ <= BinaryOpIC::INT32) { 2782 if (result_type_ <= BinaryOpIC::INT32) {
2785 // If scratch2 != 0, result does not fit in a 32-bit integer. 2783 // If scratch2 != 0, result does not fit in a 32-bit integer.
2786 __ Branch(&transition, ne, scratch2, Operand(zero_reg)); 2784 __ Branch(&transition, ne, scratch2, Operand(zero_reg));
2787 } 2785 }
2788 2786
2789 // Check if the result fits in a smi. 2787 // Check if the result fits in a smi.
2790 __ mfc1(scratch1, single_scratch); 2788 __ mfc1(scratch1, single_scratch);
2791 __ Addu(scratch2, scratch1, Operand(0x40000000)); 2789 __ Addu(scratch2, scratch1, Operand(0x40000000));
(...skipping 3575 matching lines...) Expand 10 before | Expand all | Expand 10 after
6367 __ LeaveInternalFrame(); 6365 __ LeaveInternalFrame();
6368 // Compute the entry point of the rewritten stub. 6366 // Compute the entry point of the rewritten stub.
6369 __ Addu(a2, v0, Operand(Code::kHeaderSize - kHeapObjectTag)); 6367 __ Addu(a2, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
6370 // Restore registers. 6368 // Restore registers.
6371 __ pop(ra); 6369 __ pop(ra);
6372 __ pop(a0); 6370 __ pop(a0);
6373 __ pop(a1); 6371 __ pop(a1);
6374 __ Jump(a2); 6372 __ Jump(a2);
6375 } 6373 }
6376 6374
6375
6377 void DirectCEntryStub::Generate(MacroAssembler* masm) { 6376 void DirectCEntryStub::Generate(MacroAssembler* masm) {
6378 // No need to pop or drop anything, LeaveExitFrame will restore the old 6377 // No need to pop or drop anything, LeaveExitFrame will restore the old
6379 // stack, thus dropping the allocated space for the return value. 6378 // stack, thus dropping the allocated space for the return value.
6380 // The saved ra is after the reserved stack space for the 4 args. 6379 // The saved ra is after the reserved stack space for the 4 args.
6381 __ lw(t9, MemOperand(sp, kCArgsSlotsSize)); 6380 __ lw(t9, MemOperand(sp, kCArgsSlotsSize));
6382 6381
6383 if (FLAG_debug_code && EnableSlowAsserts()) { 6382 if (FLAG_debug_code && EnableSlowAsserts()) {
6384 // In case of an error the return address may point to a memory area 6383 // In case of an error the return address may point to a memory area
6385 // filled with kZapValue by the GC. 6384 // filled with kZapValue by the GC.
6386 // Dereference the address and check for this. 6385 // Dereference the address and check for this.
6387 __ lw(t0, MemOperand(t9)); 6386 __ lw(t0, MemOperand(t9));
6388 __ Assert(ne, "Received invalid return address.", t0, 6387 __ Assert(ne, "Received invalid return address.", t0,
6389 Operand(reinterpret_cast<uint32_t>(kZapValue))); 6388 Operand(reinterpret_cast<uint32_t>(kZapValue)));
6390 } 6389 }
6391 __ Jump(t9); 6390 __ Jump(t9);
6392 } 6391 }
6393 6392
6394 6393
6395 void DirectCEntryStub::GenerateCall(MacroAssembler* masm, 6394 void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
6396 ExternalReference function) { 6395 ExternalReference function) {
6397 __ li(t9, Operand(function)); 6396 __ li(t9, Operand(function));
6398 this->GenerateCall(masm, t9); 6397 this->GenerateCall(masm, t9);
6399 } 6398 }
6400 6399
6400
6401 void DirectCEntryStub::GenerateCall(MacroAssembler* masm, 6401 void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
6402 Register target) { 6402 Register target) {
6403 __ Move(t9, target); 6403 __ Move(t9, target);
6404 __ AssertStackIsAligned(); 6404 __ AssertStackIsAligned();
6405 // Allocate space for arg slots. 6405 // Allocate space for arg slots.
6406 __ Subu(sp, sp, kCArgsSlotsSize); 6406 __ Subu(sp, sp, kCArgsSlotsSize);
6407 6407
6408 // Block the trampoline pool through the whole function to make sure the 6408 // Block the trampoline pool through the whole function to make sure the
6409 // number of generated instructions is constant. 6409 // number of generated instructions is constant.
6410 Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm); 6410 Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm);
(...skipping 258 matching lines...) Expand 10 before | Expand all | Expand 10 after
6669 __ mov(result, zero_reg); 6669 __ mov(result, zero_reg);
6670 __ Ret(); 6670 __ Ret();
6671 } 6671 }
6672 6672
6673 6673
6674 #undef __ 6674 #undef __
6675 6675
6676 } } // namespace v8::internal 6676 } } // namespace v8::internal
6677 6677
6678 #endif // V8_TARGET_ARCH_MIPS 6678 #endif // V8_TARGET_ARCH_MIPS
OLDNEW
« no previous file with comments | « no previous file | src/mips/constants-mips.h » ('j') | src/mips/constants-mips.h » ('J')

Powered by Google App Engine
This is Rietveld 408576698