OLD | NEW |
1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 819 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
830 | 830 |
831 | 831 |
832 void MacroAssembler::Trunc_uw_d(FPURegister fd, FPURegister fs) { | 832 void MacroAssembler::Trunc_uw_d(FPURegister fd, FPURegister fs) { |
833 Trunc_uw_d(fs, t4); | 833 Trunc_uw_d(fs, t4); |
834 mtc1(t4, fd); | 834 mtc1(t4, fd); |
835 } | 835 } |
836 | 836 |
837 | 837 |
838 void MacroAssembler::Trunc_uw_d(FPURegister fd, Register rs) { | 838 void MacroAssembler::Trunc_uw_d(FPURegister fd, Register rs) { |
839 ASSERT(!fd.is(f22)); | 839 ASSERT(!fd.is(f22)); |
840 ASSERT(!rs.is(t6)); | 840 ASSERT(!rs.is(t8)); |
841 | 841 |
842 // Load 2^31 into f22. | 842 // Load 2^31 into f22. |
843 Or(t6, zero_reg, 0x80000000); | 843 Or(t8, zero_reg, 0x80000000); |
844 Cvt_d_uw(f22, t6); | 844 Cvt_d_uw(f22, t8); |
845 | 845 |
846 // Test if f22 > fd. | 846 // Test if f22 > fd. |
847 c(OLT, D, fd, f22); | 847 c(OLT, D, fd, f22); |
848 | 848 |
849 Label simple_convert; | 849 Label simple_convert; |
850 // If fd < 2^31 we can convert it normally. | 850 // If fd < 2^31 we can convert it normally. |
851 bc1t(&simple_convert); | 851 bc1t(&simple_convert); |
852 | 852 |
853 // First we subtract 2^31 from fd, then trunc it to rs | 853 // First we subtract 2^31 from fd, then trunc it to rs |
854 // and add 2^31 to rs. | 854 // and add 2^31 to rs. |
855 | 855 |
856 sub_d(f22, fd, f22); | 856 sub_d(f22, fd, f22); |
857 trunc_w_d(f22, f22); | 857 trunc_w_d(f22, f22); |
858 mfc1(rs, f22); | 858 mfc1(rs, f22); |
859 or_(rs, rs, t6); | 859 or_(rs, rs, t8); |
860 | 860 |
861 Label done; | 861 Label done; |
862 Branch(&done); | 862 Branch(&done); |
863 // Simple conversion. | 863 // Simple conversion. |
864 bind(&simple_convert); | 864 bind(&simple_convert); |
865 trunc_w_d(f22, fd); | 865 trunc_w_d(f22, fd); |
866 mfc1(rs, f22); | 866 mfc1(rs, f22); |
867 | 867 |
868 bind(&done); | 868 bind(&done); |
869 } | 869 } |
(...skipping 1686 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2556 AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); | 2556 AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); |
2557 sw(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset)); | 2557 sw(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset)); |
2558 } | 2558 } |
2559 | 2559 |
2560 | 2560 |
2561 void MacroAssembler::AllocateHeapNumberWithValue(Register result, | 2561 void MacroAssembler::AllocateHeapNumberWithValue(Register result, |
2562 FPURegister value, | 2562 FPURegister value, |
2563 Register scratch1, | 2563 Register scratch1, |
2564 Register scratch2, | 2564 Register scratch2, |
2565 Label* gc_required) { | 2565 Label* gc_required) { |
2566 LoadRoot(t6, Heap::kHeapNumberMapRootIndex); | 2566 LoadRoot(t8, Heap::kHeapNumberMapRootIndex); |
2567 AllocateHeapNumber(result, scratch1, scratch2, t6, gc_required); | 2567 AllocateHeapNumber(result, scratch1, scratch2, t8, gc_required); |
2568 sdc1(value, FieldMemOperand(result, HeapNumber::kValueOffset)); | 2568 sdc1(value, FieldMemOperand(result, HeapNumber::kValueOffset)); |
2569 } | 2569 } |
2570 | 2570 |
2571 | 2571 |
2572 // Copies a fixed number of fields of heap objects from src to dst. | 2572 // Copies a fixed number of fields of heap objects from src to dst. |
2573 void MacroAssembler::CopyFields(Register dst, | 2573 void MacroAssembler::CopyFields(Register dst, |
2574 Register src, | 2574 Register src, |
2575 RegList temps, | 2575 RegList temps, |
2576 int field_count) { | 2576 int field_count) { |
2577 ASSERT((temps & dst.bit()) == 0); | 2577 ASSERT((temps & dst.bit()) == 0); |
(...skipping 664 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3242 Register right, | 3242 Register right, |
3243 Register overflow_dst, | 3243 Register overflow_dst, |
3244 Register scratch) { | 3244 Register scratch) { |
3245 ASSERT(!dst.is(overflow_dst)); | 3245 ASSERT(!dst.is(overflow_dst)); |
3246 ASSERT(!dst.is(scratch)); | 3246 ASSERT(!dst.is(scratch)); |
3247 ASSERT(!overflow_dst.is(scratch)); | 3247 ASSERT(!overflow_dst.is(scratch)); |
3248 ASSERT(!overflow_dst.is(left)); | 3248 ASSERT(!overflow_dst.is(left)); |
3249 ASSERT(!overflow_dst.is(right)); | 3249 ASSERT(!overflow_dst.is(right)); |
3250 ASSERT(!left.is(right)); | 3250 ASSERT(!left.is(right)); |
3251 | 3251 |
3252 // TODO(kalmard) There must be a way to optimize dst == left and dst == right | |
3253 // cases. | |
3254 | |
3255 if (dst.is(left)) { | 3252 if (dst.is(left)) { |
3256 addu(overflow_dst, left, right); | 3253 mov(scratch, left); // Preserve left. |
3257 xor_(dst, overflow_dst, left); | 3254 addu(dst, left, right); // Left is overwritten. |
3258 xor_(scratch, overflow_dst, right); | 3255 xor_(scratch, dst, scratch); // Original left. |
3259 and_(scratch, scratch, dst); | 3256 xor_(overflow_dst, dst, right); |
3260 mov(dst, overflow_dst); | 3257 and_(overflow_dst, overflow_dst, scratch); |
3261 mov(overflow_dst, scratch); | |
3262 } else if (dst.is(right)) { | 3258 } else if (dst.is(right)) { |
3263 addu(overflow_dst, left, right); | 3259 mov(scratch, right); // Preserve right. |
3264 xor_(dst, overflow_dst, right); | 3260 addu(dst, left, right); // Right is overwritten. |
3265 xor_(scratch, overflow_dst, left); | 3261 xor_(scratch, dst, scratch); // Original right. |
3266 and_(scratch, scratch, dst); | 3262 xor_(overflow_dst, dst, left); |
3267 mov(dst, overflow_dst); | 3263 and_(overflow_dst, overflow_dst, scratch); |
3268 mov(overflow_dst, scratch); | |
3269 } else { | 3264 } else { |
3270 addu(dst, left, right); | 3265 addu(dst, left, right); |
3271 xor_(overflow_dst, dst, left); | 3266 xor_(overflow_dst, dst, left); |
3272 xor_(scratch, dst, right); | 3267 xor_(scratch, dst, right); |
3273 and_(overflow_dst, scratch, overflow_dst); | 3268 and_(overflow_dst, scratch, overflow_dst); |
3274 } | 3269 } |
3275 } | 3270 } |
3276 | 3271 |
3277 | 3272 |
3278 void MacroAssembler::SubuAndCheckForOverflow(Register dst, | 3273 void MacroAssembler::SubuAndCheckForOverflow(Register dst, |
3279 Register left, | 3274 Register left, |
3280 Register right, | 3275 Register right, |
3281 Register overflow_dst, | 3276 Register overflow_dst, |
3282 Register scratch) { | 3277 Register scratch) { |
3283 ASSERT(!dst.is(overflow_dst)); | 3278 ASSERT(!dst.is(overflow_dst)); |
3284 ASSERT(!dst.is(scratch)); | 3279 ASSERT(!dst.is(scratch)); |
3285 ASSERT(!overflow_dst.is(scratch)); | 3280 ASSERT(!overflow_dst.is(scratch)); |
3286 ASSERT(!overflow_dst.is(left)); | 3281 ASSERT(!overflow_dst.is(left)); |
3287 ASSERT(!overflow_dst.is(right)); | 3282 ASSERT(!overflow_dst.is(right)); |
3288 ASSERT(!left.is(right)); | 3283 ASSERT(!left.is(right)); |
3289 ASSERT(!scratch.is(left)); | 3284 ASSERT(!scratch.is(left)); |
3290 ASSERT(!scratch.is(right)); | 3285 ASSERT(!scratch.is(right)); |
3291 | 3286 |
3292 // TODO(kalmard) There must be a way to optimize dst == left and dst == right | |
3293 // cases. | |
3294 | |
3295 if (dst.is(left)) { | 3287 if (dst.is(left)) { |
3296 subu(overflow_dst, left, right); | 3288 mov(scratch, left); // Preserve left. |
3297 xor_(scratch, overflow_dst, left); | 3289 subu(dst, left, right); // Left is overwritten. |
3298 xor_(dst, left, right); | 3290 xor_(overflow_dst, dst, scratch); // scratch is original left. |
3299 and_(scratch, scratch, dst); | 3291 xor_(scratch, scratch, right); // scratch is original left. |
3300 mov(dst, overflow_dst); | 3292 and_(overflow_dst, scratch, overflow_dst); |
3301 mov(overflow_dst, scratch); | |
3302 } else if (dst.is(right)) { | 3293 } else if (dst.is(right)) { |
3303 subu(overflow_dst, left, right); | 3294 mov(scratch, right); // Preserve right. |
3304 xor_(dst, left, right); | 3295 subu(dst, left, right); // Right is overwritten. |
3305 xor_(scratch, overflow_dst, left); | 3296 xor_(overflow_dst, dst, left); |
3306 and_(scratch, scratch, dst); | 3297 xor_(scratch, left, scratch); // Original right. |
3307 mov(dst, overflow_dst); | 3298 and_(overflow_dst, scratch, overflow_dst); |
3308 mov(overflow_dst, scratch); | |
3309 } else { | 3299 } else { |
3310 subu(dst, left, right); | 3300 subu(dst, left, right); |
3311 xor_(overflow_dst, dst, left); | 3301 xor_(overflow_dst, dst, left); |
3312 xor_(scratch, left, right); | 3302 xor_(scratch, left, right); |
3313 and_(overflow_dst, scratch, overflow_dst); | 3303 and_(overflow_dst, scratch, overflow_dst); |
3314 } | 3304 } |
3315 } | 3305 } |
3316 | 3306 |
3317 | 3307 |
3318 void MacroAssembler::CallRuntime(const Runtime::Function* f, | 3308 void MacroAssembler::CallRuntime(const Runtime::Function* f, |
(...skipping 788 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4107 opcode == BGTZL); | 4097 opcode == BGTZL); |
4108 opcode = (cond == eq) ? BEQ : BNE; | 4098 opcode = (cond == eq) ? BEQ : BNE; |
4109 instr = (instr & ~kOpcodeMask) | opcode; | 4099 instr = (instr & ~kOpcodeMask) | opcode; |
4110 masm_.emit(instr); | 4100 masm_.emit(instr); |
4111 } | 4101 } |
4112 | 4102 |
4113 | 4103 |
4114 } } // namespace v8::internal | 4104 } } // namespace v8::internal |
4115 | 4105 |
4116 #endif // V8_TARGET_ARCH_MIPS | 4106 #endif // V8_TARGET_ARCH_MIPS |
OLD | NEW |