OLD | NEW |
1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 378 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
389 if (destination == kCoreRegisters) { | 389 if (destination == kCoreRegisters) { |
390 __ Move(a2, a3, f14); | 390 __ Move(a2, a3, f14); |
391 __ Move(a0, a1, f12); | 391 __ Move(a0, a1, f12); |
392 } | 392 } |
393 } else { | 393 } else { |
394 ASSERT(destination == kCoreRegisters); | 394 ASSERT(destination == kCoreRegisters); |
395 // Write Smi from a0 to a3 and a2 in double format. | 395 // Write Smi from a0 to a3 and a2 in double format. |
396 __ mov(scratch1, a0); | 396 __ mov(scratch1, a0); |
397 ConvertToDoubleStub stub1(a3, a2, scratch1, scratch2); | 397 ConvertToDoubleStub stub1(a3, a2, scratch1, scratch2); |
398 __ push(ra); | 398 __ push(ra); |
399 __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET); | 399 __ Call(stub1.GetCode()); |
400 // Write Smi from a1 to a1 and a0 in double format. | 400 // Write Smi from a1 to a1 and a0 in double format. |
401 __ mov(scratch1, a1); | 401 __ mov(scratch1, a1); |
402 ConvertToDoubleStub stub2(a1, a0, scratch1, scratch2); | 402 ConvertToDoubleStub stub2(a1, a0, scratch1, scratch2); |
403 __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET); | 403 __ Call(stub2.GetCode()); |
404 __ pop(ra); | 404 __ pop(ra); |
405 } | 405 } |
406 } | 406 } |
407 | 407 |
408 | 408 |
409 void FloatingPointHelper::LoadOperands( | 409 void FloatingPointHelper::LoadOperands( |
410 MacroAssembler* masm, | 410 MacroAssembler* masm, |
411 FloatingPointHelper::Destination destination, | 411 FloatingPointHelper::Destination destination, |
412 Register heap_number_map, | 412 Register heap_number_map, |
413 Register scratch1, | 413 Register scratch1, |
(...skipping 61 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
475 if (destination == kCoreRegisters) { | 475 if (destination == kCoreRegisters) { |
476 // Load the converted smi to dst1 and dst2 in double format. | 476 // Load the converted smi to dst1 and dst2 in double format. |
477 __ Move(dst1, dst2, dst); | 477 __ Move(dst1, dst2, dst); |
478 } | 478 } |
479 } else { | 479 } else { |
480 ASSERT(destination == kCoreRegisters); | 480 ASSERT(destination == kCoreRegisters); |
481 // Write smi to dst1 and dst2 double format. | 481 // Write smi to dst1 and dst2 double format. |
482 __ mov(scratch1, object); | 482 __ mov(scratch1, object); |
483 ConvertToDoubleStub stub(dst2, dst1, scratch1, scratch2); | 483 ConvertToDoubleStub stub(dst2, dst1, scratch1, scratch2); |
484 __ push(ra); | 484 __ push(ra); |
485 __ Call(stub.GetCode(), RelocInfo::CODE_TARGET); | 485 __ Call(stub.GetCode()); |
486 __ pop(ra); | 486 __ pop(ra); |
487 } | 487 } |
488 | 488 |
489 __ bind(&done); | 489 __ bind(&done); |
490 } | 490 } |
491 | 491 |
492 | 492 |
493 void FloatingPointHelper::ConvertNumberToInt32(MacroAssembler* masm, | 493 void FloatingPointHelper::ConvertNumberToInt32(MacroAssembler* masm, |
494 Register object, | 494 Register object, |
495 Register dst, | 495 Register dst, |
(...skipping 604 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1100 __ ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset)); | 1100 __ ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset)); |
1101 } else { | 1101 } else { |
1102 // Load lhs to a double in a2, a3. | 1102 // Load lhs to a double in a2, a3. |
1103 __ lw(a3, FieldMemOperand(lhs, HeapNumber::kValueOffset + 4)); | 1103 __ lw(a3, FieldMemOperand(lhs, HeapNumber::kValueOffset + 4)); |
1104 __ lw(a2, FieldMemOperand(lhs, HeapNumber::kValueOffset)); | 1104 __ lw(a2, FieldMemOperand(lhs, HeapNumber::kValueOffset)); |
1105 | 1105 |
1106 // Write Smi from rhs to a1 and a0 in double format. t5 is scratch. | 1106 // Write Smi from rhs to a1 and a0 in double format. t5 is scratch. |
1107 __ mov(t6, rhs); | 1107 __ mov(t6, rhs); |
1108 ConvertToDoubleStub stub1(a1, a0, t6, t5); | 1108 ConvertToDoubleStub stub1(a1, a0, t6, t5); |
1109 __ push(ra); | 1109 __ push(ra); |
1110 __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET); | 1110 __ Call(stub1.GetCode()); |
1111 | 1111 |
1112 __ pop(ra); | 1112 __ pop(ra); |
1113 } | 1113 } |
1114 | 1114 |
1115 // We now have both loaded as doubles. | 1115 // We now have both loaded as doubles. |
1116 __ jmp(both_loaded_as_doubles); | 1116 __ jmp(both_loaded_as_doubles); |
1117 | 1117 |
1118 __ bind(&lhs_is_smi); | 1118 __ bind(&lhs_is_smi); |
1119 // Lhs is a Smi. Check whether the non-smi is a heap number. | 1119 // Lhs is a Smi. Check whether the non-smi is a heap number. |
1120 __ GetObjectType(rhs, t4, t4); | 1120 __ GetObjectType(rhs, t4, t4); |
(...skipping 14 matching lines...) Expand all Loading... |
1135 CpuFeatures::Scope scope(FPU); | 1135 CpuFeatures::Scope scope(FPU); |
1136 __ sra(at, lhs, kSmiTagSize); | 1136 __ sra(at, lhs, kSmiTagSize); |
1137 __ mtc1(at, f12); | 1137 __ mtc1(at, f12); |
1138 __ cvt_d_w(f12, f12); | 1138 __ cvt_d_w(f12, f12); |
1139 __ ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset)); | 1139 __ ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset)); |
1140 } else { | 1140 } else { |
1141 // Convert lhs to a double format. t5 is scratch. | 1141 // Convert lhs to a double format. t5 is scratch. |
1142 __ mov(t6, lhs); | 1142 __ mov(t6, lhs); |
1143 ConvertToDoubleStub stub2(a3, a2, t6, t5); | 1143 ConvertToDoubleStub stub2(a3, a2, t6, t5); |
1144 __ push(ra); | 1144 __ push(ra); |
1145 __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET); | 1145 __ Call(stub2.GetCode()); |
1146 __ pop(ra); | 1146 __ pop(ra); |
1147 // Load rhs to a double in a1, a0. | 1147 // Load rhs to a double in a1, a0. |
1148 if (rhs.is(a0)) { | 1148 if (rhs.is(a0)) { |
1149 __ lw(a1, FieldMemOperand(rhs, HeapNumber::kValueOffset + 4)); | 1149 __ lw(a1, FieldMemOperand(rhs, HeapNumber::kValueOffset + 4)); |
1150 __ lw(a0, FieldMemOperand(rhs, HeapNumber::kValueOffset)); | 1150 __ lw(a0, FieldMemOperand(rhs, HeapNumber::kValueOffset)); |
1151 } else { | 1151 } else { |
1152 __ lw(a0, FieldMemOperand(rhs, HeapNumber::kValueOffset)); | 1152 __ lw(a0, FieldMemOperand(rhs, HeapNumber::kValueOffset)); |
1153 __ lw(a1, FieldMemOperand(rhs, HeapNumber::kValueOffset + 4)); | 1153 __ lw(a1, FieldMemOperand(rhs, HeapNumber::kValueOffset + 4)); |
1154 } | 1154 } |
1155 } | 1155 } |
(...skipping 5724 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
6880 __ mov(result, zero_reg); | 6880 __ mov(result, zero_reg); |
6881 __ Ret(); | 6881 __ Ret(); |
6882 } | 6882 } |
6883 | 6883 |
6884 | 6884 |
6885 #undef __ | 6885 #undef __ |
6886 | 6886 |
6887 } } // namespace v8::internal | 6887 } } // namespace v8::internal |
6888 | 6888 |
6889 #endif // V8_TARGET_ARCH_MIPS | 6889 #endif // V8_TARGET_ARCH_MIPS |
OLD | NEW |