| OLD | NEW | 
|---|
| 1 // Copyright (c) 2013, the Dart project authors.  Please see the AUTHORS file | 1 // Copyright (c) 2013, the Dart project authors.  Please see the AUTHORS file | 
| 2 // for details. All rights reserved. Use of this source code is governed by a | 2 // for details. All rights reserved. Use of this source code is governed by a | 
| 3 // BSD-style license that can be found in the LICENSE file. | 3 // BSD-style license that can be found in the LICENSE file. | 
| 4 | 4 | 
| 5 #include "vm/globals.h"  // Needed here to get TARGET_ARCH_MIPS. | 5 #include "vm/globals.h"  // Needed here to get TARGET_ARCH_MIPS. | 
| 6 #if defined(TARGET_ARCH_MIPS) | 6 #if defined(TARGET_ARCH_MIPS) | 
| 7 | 7 | 
| 8 #include "vm/intrinsifier.h" | 8 #include "vm/intrinsifier.h" | 
| 9 | 9 | 
| 10 #include "vm/assembler.h" | 10 #include "vm/assembler.h" | 
| (...skipping 846 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
| 857   ASSERT(kSmiTagShift == 1); | 857   ASSERT(kSmiTagShift == 1); | 
| 858   ASSERT(kSmiTag == 0); | 858   ASSERT(kSmiTag == 0); | 
| 859   Label fall_through, overflow; | 859   Label fall_through, overflow; | 
| 860 | 860 | 
| 861   TestBothArgumentsSmis(assembler, &fall_through); | 861   TestBothArgumentsSmis(assembler, &fall_through); | 
| 862   __ BranchUnsignedGreater(T0, Smi::RawValue(Smi::kBits), &fall_through); | 862   __ BranchUnsignedGreater(T0, Smi::RawValue(Smi::kBits), &fall_through); | 
| 863   __ SmiUntag(T0); | 863   __ SmiUntag(T0); | 
| 864 | 864 | 
| 865   // Check for overflow by shifting left and shifting back arithmetically. | 865   // Check for overflow by shifting left and shifting back arithmetically. | 
| 866   // If the result is different from the original, there was overflow. | 866   // If the result is different from the original, there was overflow. | 
| 867   __ mov(T2, T1); | 867   __ sllv(TMP, T1, T0); | 
| 868   __ sllv(T1, T1, T0); | 868   __ srav(TMP, TMP, T0); | 
| 869   __ srlv(T1, T1, T0); | 869   __ bne(TMP, T1, &overflow); | 
| 870   __ bne(T1, T2, &overflow); |  | 
| 871 | 870 | 
| 872   // No overflow, result in V0. | 871   // No overflow, result in V0. | 
| 873   __ Ret(); | 872   __ Ret(); | 
| 874   __ delay_slot()->sllv(V0, T1, T0); | 873   __ delay_slot()->sllv(V0, T1, T0); | 
| 875 | 874 | 
| 876   __ Bind(&overflow); | 875   __ Bind(&overflow); | 
| 877   // Arguments are Smi but the shift produced an overflow to Mint. | 876   // Arguments are Smi but the shift produced an overflow to Mint. | 
| 878   __ bltz(T2, &fall_through); | 877   __ bltz(T1, &fall_through); | 
| 879   __ SmiUntag(T2); | 878   __ SmiUntag(T1); | 
| 880 | 879 | 
| 881   // Pull off high bits that will be shifted off of T2 by making a mask | 880   // Pull off high bits that will be shifted off of T1 by making a mask | 
| 882   // ((1 << T0) - 1), shifting it to the right, masking T2, then shifting back. | 881   // ((1 << T0) - 1), shifting it to the right, masking T1, then shifting back. | 
| 883   // high bits = (((1 << T0) - 1) << (32 - T0)) & T2) >> (32 - T0) | 882   // high bits = (((1 << T0) - 1) << (32 - T0)) & T1) >> (32 - T0) | 
| 884   // lo bits = T2 << T0 | 883   // lo bits = T1 << T0 | 
| 885   __ LoadImmediate(T3, 1); | 884   __ LoadImmediate(T3, 1); | 
| 886   __ sllv(T3, T3, T0);  // T3 <- T3 << T0 | 885   __ sllv(T3, T3, T0);  // T3 <- T3 << T0 | 
| 887   __ addiu(T3, T3, Immediate(-1));  // T3 <- T3 - 1 | 886   __ addiu(T3, T3, Immediate(-1));  // T3 <- T3 - 1 | 
| 888   __ addu(T4, ZR, T0);  // T4 <- -T0 | 887   __ subu(T4, ZR, T0);  // T4 <- -T0 | 
| 889   __ addiu(T4, T4, Immediate(32));  // T4 <- 32 - T0 | 888   __ addiu(T4, T4, Immediate(32));  // T4 <- 32 - T0 | 
| 890   __ sllv(T3, T3, T4);  // T3 <- T3 << T4 | 889   __ sllv(T3, T3, T4);  // T3 <- T3 << T4 | 
| 891   __ and_(T3, T3, T2);  // T3 <- T3 & T2 | 890   __ and_(T3, T3, T1);  // T3 <- T3 & T1 | 
| 892   __ srlv(T3, T3, T4);  // T3 <- T3 >> T4 | 891   __ srlv(T3, T3, T4);  // T3 <- T3 >> T4 | 
| 893   // Now T3 has the bits that fall off of T2 on a left shift. | 892   // Now T3 has the bits that fall off of T1 on a left shift. | 
| 894   __ sllv(T0, T2, T0);  // T0 gets low bits. | 893   __ sllv(T0, T1, T0);  // T0 gets low bits. | 
| 895 | 894 | 
| 896   const Class& mint_class = Class::Handle( | 895   const Class& mint_class = Class::Handle( | 
| 897       Isolate::Current()->object_store()->mint_class()); | 896       Isolate::Current()->object_store()->mint_class()); | 
| 898   __ TryAllocate(mint_class, &fall_through, V0); | 897   __ TryAllocate(mint_class, &fall_through, V0); | 
| 899 | 898 | 
| 900   __ sw(T0, FieldAddress(V0, Mint::value_offset())); | 899   __ sw(T0, FieldAddress(V0, Mint::value_offset())); | 
| 901   __ Ret(); | 900   __ Ret(); | 
| 902   __ delay_slot()->sw(T3, FieldAddress(V0, Mint::value_offset() + kWordSize)); | 901   __ delay_slot()->sw(T3, FieldAddress(V0, Mint::value_offset() + kWordSize)); | 
| 903   __ Bind(&fall_through); | 902   __ Bind(&fall_through); | 
| 904   return false; | 903   return false; | 
| (...skipping 459 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
| 1364   __ srl(T0, T0, 31);  // Get the sign bit down to bit 0 of T0. | 1363   __ srl(T0, T0, 31);  // Get the sign bit down to bit 0 of T0. | 
| 1365   __ andi(CMPRES, T0, Immediate(1));  // Check if the bit is set. | 1364   __ andi(CMPRES, T0, Immediate(1));  // Check if the bit is set. | 
| 1366   __ bne(T0, ZR, &is_true);  // Sign bit set. True. | 1365   __ bne(T0, ZR, &is_true);  // Sign bit set. True. | 
| 1367   __ b(&is_false); | 1366   __ b(&is_false); | 
| 1368   return true; | 1367   return true; | 
| 1369 } | 1368 } | 
| 1370 | 1369 | 
| 1371 | 1370 | 
| 1372 bool Intrinsifier::Double_toInt(Assembler* assembler) { | 1371 bool Intrinsifier::Double_toInt(Assembler* assembler) { | 
| 1373   __ lw(T0, Address(SP, 0 * kWordSize)); | 1372   __ lw(T0, Address(SP, 0 * kWordSize)); | 
| 1374   __ lwc1(F0, FieldAddress(T0, Double::value_offset())); | 1373   __ LoadDFromOffset(D0, T0, Double::value_offset() - kHeapObjectTag); | 
| 1375   __ lwc1(F1, FieldAddress(T0, Double::value_offset() + kWordSize)); | 1374 | 
| 1376   __ cvtwd(F2, D0); | 1375   __ cvtwd(F2, D0); | 
| 1377   __ mfc1(V0, F2); | 1376   __ mfc1(V0, F2); | 
|  | 1377 | 
| 1378   // Overflow is signaled with minint. | 1378   // Overflow is signaled with minint. | 
| 1379   Label fall_through; | 1379   Label fall_through; | 
| 1380   // Check for overflow and that it fits into Smi. | 1380   // Check for overflow and that it fits into Smi. | 
| 1381   __ BranchSignedLess(V0, 0xC0000000, &fall_through); | 1381   __ LoadImmediate(TMP, 0xC0000000); | 
|  | 1382   __ subu(CMPRES, V0, TMP); | 
|  | 1383   __ bltz(CMPRES, &fall_through); | 
| 1382   __ Ret(); | 1384   __ Ret(); | 
| 1383   __ delay_slot()->SmiTag(V0); | 1385   __ delay_slot()->SmiTag(V0); | 
| 1384   __ Bind(&fall_through); | 1386   __ Bind(&fall_through); | 
| 1385   return false; | 1387   return false; | 
| 1386 } | 1388 } | 
| 1387 | 1389 | 
| 1388 | 1390 | 
| 1389 bool Intrinsifier::Math_sqrt(Assembler* assembler) { | 1391 bool Intrinsifier::Math_sqrt(Assembler* assembler) { | 
| 1390   Label fall_through, is_smi, double_op; | 1392   Label fall_through, is_smi, double_op; | 
| 1391   TestLastArgumentIsDouble(assembler, &is_smi, &fall_through); | 1393   TestLastArgumentIsDouble(assembler, &is_smi, &fall_through); | 
| (...skipping 375 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
| 1767   __ Bind(&ok); | 1769   __ Bind(&ok); | 
| 1768   __ Ret(); | 1770   __ Ret(); | 
| 1769 | 1771 | 
| 1770   __ Bind(&fall_through); | 1772   __ Bind(&fall_through); | 
| 1771   return false; | 1773   return false; | 
| 1772 } | 1774 } | 
| 1773 | 1775 | 
| 1774 }  // namespace dart | 1776 }  // namespace dart | 
| 1775 | 1777 | 
| 1776 #endif  // defined TARGET_ARCH_MIPS | 1778 #endif  // defined TARGET_ARCH_MIPS | 
| OLD | NEW | 
|---|