OLD | NEW |
1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 370 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
381 Register scratch2) { | 381 Register scratch2) { |
382 if (CpuFeatures::IsSupported(FPU)) { | 382 if (CpuFeatures::IsSupported(FPU)) { |
383 CpuFeatures::Scope scope(FPU); | 383 CpuFeatures::Scope scope(FPU); |
384 __ sra(scratch1, a0, kSmiTagSize); | 384 __ sra(scratch1, a0, kSmiTagSize); |
385 __ mtc1(scratch1, f14); | 385 __ mtc1(scratch1, f14); |
386 __ cvt_d_w(f14, f14); | 386 __ cvt_d_w(f14, f14); |
387 __ sra(scratch1, a1, kSmiTagSize); | 387 __ sra(scratch1, a1, kSmiTagSize); |
388 __ mtc1(scratch1, f12); | 388 __ mtc1(scratch1, f12); |
389 __ cvt_d_w(f12, f12); | 389 __ cvt_d_w(f12, f12); |
390 if (destination == kCoreRegisters) { | 390 if (destination == kCoreRegisters) { |
391 __ mfc1(a2, f14); | 391 __ Move(a2, a3, f14); |
392 __ mfc1(a3, f15); | 392 __ Move(a0, a1, f12); |
393 | |
394 __ mfc1(a0, f12); | |
395 __ mfc1(a1, f13); | |
396 } | 393 } |
397 } else { | 394 } else { |
398 ASSERT(destination == kCoreRegisters); | 395 ASSERT(destination == kCoreRegisters); |
399 // Write Smi from a0 to a3 and a2 in double format. | 396 // Write Smi from a0 to a3 and a2 in double format. |
400 __ mov(scratch1, a0); | 397 __ mov(scratch1, a0); |
401 ConvertToDoubleStub stub1(a3, a2, scratch1, scratch2); | 398 ConvertToDoubleStub stub1(a3, a2, scratch1, scratch2); |
402 __ push(ra); | 399 __ push(ra); |
403 __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET); | 400 __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET); |
404 // Write Smi from a1 to a1 and a0 in double format. | 401 // Write Smi from a1 to a1 and a0 in double format. |
405 __ mov(scratch1, a1); | 402 __ mov(scratch1, a1); |
(...skipping 65 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
471 // Handle loading a double from a smi. | 468 // Handle loading a double from a smi. |
472 __ bind(&is_smi); | 469 __ bind(&is_smi); |
473 if (CpuFeatures::IsSupported(FPU)) { | 470 if (CpuFeatures::IsSupported(FPU)) { |
474 CpuFeatures::Scope scope(FPU); | 471 CpuFeatures::Scope scope(FPU); |
475 // Convert smi to double using FPU instructions. | 472 // Convert smi to double using FPU instructions. |
476 __ SmiUntag(scratch1, object); | 473 __ SmiUntag(scratch1, object); |
477 __ mtc1(scratch1, dst); | 474 __ mtc1(scratch1, dst); |
478 __ cvt_d_w(dst, dst); | 475 __ cvt_d_w(dst, dst); |
479 if (destination == kCoreRegisters) { | 476 if (destination == kCoreRegisters) { |
480 // Load the converted smi to dst1 and dst2 in double format. | 477 // Load the converted smi to dst1 and dst2 in double format. |
481 __ mfc1(dst1, dst); | 478 __ Move(dst1, dst2, dst); |
482 __ mfc1(dst2, FPURegister::from_code(dst.code() + 1)); | |
483 } | 479 } |
484 } else { | 480 } else { |
485 ASSERT(destination == kCoreRegisters); | 481 ASSERT(destination == kCoreRegisters); |
486 // Write smi to dst1 and dst2 double format. | 482 // Write smi to dst1 and dst2 double format. |
487 __ mov(scratch1, object); | 483 __ mov(scratch1, object); |
488 ConvertToDoubleStub stub(dst2, dst1, scratch1, scratch2); | 484 ConvertToDoubleStub stub(dst2, dst1, scratch1, scratch2); |
489 __ push(ra); | 485 __ push(ra); |
490 __ Call(stub.GetCode(), RelocInfo::CODE_TARGET); | 486 __ Call(stub.GetCode(), RelocInfo::CODE_TARGET); |
491 __ pop(ra); | 487 __ pop(ra); |
492 } | 488 } |
(...skipping 50 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
543 | 539 |
544 void FloatingPointHelper::ConvertIntToDouble(MacroAssembler* masm, | 540 void FloatingPointHelper::ConvertIntToDouble(MacroAssembler* masm, |
545 Register int_scratch, | 541 Register int_scratch, |
546 Destination destination, | 542 Destination destination, |
547 FPURegister double_dst, | 543 FPURegister double_dst, |
548 Register dst1, | 544 Register dst1, |
549 Register dst2, | 545 Register dst2, |
550 Register scratch2, | 546 Register scratch2, |
551 FPURegister single_scratch) { | 547 FPURegister single_scratch) { |
552 ASSERT(!int_scratch.is(scratch2)); | 548 ASSERT(!int_scratch.is(scratch2)); |
| 549 ASSERT(!int_scratch.is(dst1)); |
| 550 ASSERT(!int_scratch.is(dst2)); |
553 | 551 |
554 Label done; | 552 Label done; |
555 | 553 |
556 if (CpuFeatures::IsSupported(FPU)) { | 554 if (CpuFeatures::IsSupported(FPU)) { |
557 CpuFeatures::Scope scope(FPU); | 555 CpuFeatures::Scope scope(FPU); |
558 __ mtc1(int_scratch, single_scratch); | 556 __ mtc1(int_scratch, single_scratch); |
559 __ cvt_d_w(double_dst, single_scratch); | 557 __ cvt_d_w(double_dst, single_scratch); |
560 if (destination == kCoreRegisters) { | 558 if (destination == kCoreRegisters) { |
561 __ mfc1(dst1, double_dst); | 559 __ Move(dst1, dst2, double_dst); |
562 __ mfc1(dst2, FPURegister::from_code(double_dst.code() + 1)); | |
563 } | 560 } |
564 } else { | 561 } else { |
565 Label fewer_than_20_useful_bits; | 562 Label fewer_than_20_useful_bits; |
566 // Expected output: | 563 // Expected output: |
567 // | dst2 | dst1 | | 564 // | dst2 | dst1 | |
568 // | s | exp | mantissa | | 565 // | s | exp | mantissa | |
569 | 566 |
570 // Check for zero. | 567 // Check for zero. |
571 __ mov(dst2, int_scratch); | 568 __ mov(dst2, int_scratch); |
572 __ mov(dst1, int_scratch); | 569 __ mov(dst1, int_scratch); |
(...skipping 103 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
676 __ ctc1(scratch1, FCSR); | 673 __ ctc1(scratch1, FCSR); |
677 | 674 |
678 // Check for inexact conversion. | 675 // Check for inexact conversion. |
679 __ srl(scratch2, scratch2, kFCSRFlagShift); | 676 __ srl(scratch2, scratch2, kFCSRFlagShift); |
680 __ And(scratch2, scratch2, (kFCSRFlagMask | kFCSRInexactFlagBit)); | 677 __ And(scratch2, scratch2, (kFCSRFlagMask | kFCSRInexactFlagBit)); |
681 | 678 |
682 // Jump to not_int32 if the operation did not succeed. | 679 // Jump to not_int32 if the operation did not succeed. |
683 __ Branch(not_int32, ne, scratch2, Operand(zero_reg)); | 680 __ Branch(not_int32, ne, scratch2, Operand(zero_reg)); |
684 | 681 |
685 if (destination == kCoreRegisters) { | 682 if (destination == kCoreRegisters) { |
686 __ mfc1(dst1, double_dst); | 683 __ Move(dst1, dst2, double_dst); |
687 __ mfc1(dst2, FPURegister::from_code(double_dst.code() + 1)); | |
688 } | 684 } |
689 | 685 |
690 } else { | 686 } else { |
691 ASSERT(!scratch1.is(object) && !scratch2.is(object)); | 687 ASSERT(!scratch1.is(object) && !scratch2.is(object)); |
692 // Load the double value in the destination registers. | 688 // Load the double value in the destination registers. |
693 __ lw(dst2, FieldMemOperand(object, HeapNumber::kExponentOffset)); | 689 __ lw(dst2, FieldMemOperand(object, HeapNumber::kExponentOffset)); |
694 __ lw(dst1, FieldMemOperand(object, HeapNumber::kMantissaOffset)); | 690 __ lw(dst1, FieldMemOperand(object, HeapNumber::kMantissaOffset)); |
695 | 691 |
696 // Check for 0 and -0. | 692 // Check for 0 and -0. |
697 __ And(scratch1, dst1, Operand(~HeapNumber::kSignMask)); | 693 __ And(scratch1, dst1, Operand(~HeapNumber::kSignMask)); |
(...skipping 186 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
884 // Push the current return address before the C call. | 880 // Push the current return address before the C call. |
885 __ push(ra); | 881 __ push(ra); |
886 __ PrepareCallCFunction(4, scratch); // Two doubles are 4 arguments. | 882 __ PrepareCallCFunction(4, scratch); // Two doubles are 4 arguments. |
887 if (!IsMipsSoftFloatABI) { | 883 if (!IsMipsSoftFloatABI) { |
888 CpuFeatures::Scope scope(FPU); | 884 CpuFeatures::Scope scope(FPU); |
889 // We are not using MIPS FPU instructions, and parameters for the runtime | 885 // We are not using MIPS FPU instructions, and parameters for the runtime |
890 // function call are prepaired in a0-a3 registers, but function we are | 886 // function call are prepaired in a0-a3 registers, but function we are |
891 // calling is compiled with hard-float flag and expecting hard float ABI | 887 // calling is compiled with hard-float flag and expecting hard float ABI |
892 // (parameters in f12/f14 registers). We need to copy parameters from | 888 // (parameters in f12/f14 registers). We need to copy parameters from |
893 // a0-a3 registers to f12/f14 register pairs. | 889 // a0-a3 registers to f12/f14 register pairs. |
894 __ mtc1(a0, f12); | 890 __ Move(f12, a0, a1); |
895 __ mtc1(a1, f13); | 891 __ Move(f14, a2, a3); |
896 __ mtc1(a2, f14); | |
897 __ mtc1(a3, f15); | |
898 } | 892 } |
899 // Call C routine that may not cause GC or other trouble. | 893 // Call C routine that may not cause GC or other trouble. |
900 __ CallCFunction(ExternalReference::double_fp_operation(op, masm->isolate()), | 894 __ CallCFunction(ExternalReference::double_fp_operation(op, masm->isolate()), |
901 4); | 895 4); |
902 // Store answer in the overwritable heap number. | 896 // Store answer in the overwritable heap number. |
903 if (!IsMipsSoftFloatABI) { | 897 if (!IsMipsSoftFloatABI) { |
904 CpuFeatures::Scope scope(FPU); | 898 CpuFeatures::Scope scope(FPU); |
905 // Double returned in register f0. | 899 // Double returned in register f0. |
906 __ sdc1(f0, FieldMemOperand(heap_number_result, HeapNumber::kValueOffset)); | 900 __ sdc1(f0, FieldMemOperand(heap_number_result, HeapNumber::kValueOffset)); |
907 } else { | 901 } else { |
(...skipping 256 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1164 } | 1158 } |
1165 // Fall through to both_loaded_as_doubles. | 1159 // Fall through to both_loaded_as_doubles. |
1166 } | 1160 } |
1167 | 1161 |
1168 | 1162 |
1169 void EmitNanCheck(MacroAssembler* masm, Condition cc) { | 1163 void EmitNanCheck(MacroAssembler* masm, Condition cc) { |
1170 bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset); | 1164 bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset); |
1171 if (CpuFeatures::IsSupported(FPU)) { | 1165 if (CpuFeatures::IsSupported(FPU)) { |
1172 CpuFeatures::Scope scope(FPU); | 1166 CpuFeatures::Scope scope(FPU); |
1173 // Lhs and rhs are already loaded to f12 and f14 register pairs. | 1167 // Lhs and rhs are already loaded to f12 and f14 register pairs. |
1174 __ mfc1(t0, f14); // f14 has LS 32 bits of rhs. | 1168 __ Move(t0, t1, f14); |
1175 __ mfc1(t1, f15); // f15 has MS 32 bits of rhs. | 1169 __ Move(t2, t3, f12); |
1176 __ mfc1(t2, f12); // f12 has LS 32 bits of lhs. | |
1177 __ mfc1(t3, f13); // f13 has MS 32 bits of lhs. | |
1178 } else { | 1170 } else { |
1179 // Lhs and rhs are already loaded to GP registers. | 1171 // Lhs and rhs are already loaded to GP registers. |
1180 __ mov(t0, a0); // a0 has LS 32 bits of rhs. | 1172 __ mov(t0, a0); // a0 has LS 32 bits of rhs. |
1181 __ mov(t1, a1); // a1 has MS 32 bits of rhs. | 1173 __ mov(t1, a1); // a1 has MS 32 bits of rhs. |
1182 __ mov(t2, a2); // a2 has LS 32 bits of lhs. | 1174 __ mov(t2, a2); // a2 has LS 32 bits of lhs. |
1183 __ mov(t3, a3); // a3 has MS 32 bits of lhs. | 1175 __ mov(t3, a3); // a3 has MS 32 bits of lhs. |
1184 } | 1176 } |
1185 Register rhs_exponent = exp_first ? t0 : t1; | 1177 Register rhs_exponent = exp_first ? t0 : t1; |
1186 Register lhs_exponent = exp_first ? t2 : t3; | 1178 Register lhs_exponent = exp_first ? t2 : t3; |
1187 Register rhs_mantissa = exp_first ? t1 : t0; | 1179 Register rhs_mantissa = exp_first ? t1 : t0; |
(...skipping 42 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1230 // Call C routine that may not cause GC or other trouble. | 1222 // Call C routine that may not cause GC or other trouble. |
1231 // We use a call_was and return manually because we need arguments slots to | 1223 // We use a call_was and return manually because we need arguments slots to |
1232 // be freed. | 1224 // be freed. |
1233 | 1225 |
1234 Label return_result_not_equal, return_result_equal; | 1226 Label return_result_not_equal, return_result_equal; |
1235 if (cc == eq) { | 1227 if (cc == eq) { |
1236 // Doubles are not equal unless they have the same bit pattern. | 1228 // Doubles are not equal unless they have the same bit pattern. |
1237 // Exception: 0 and -0. | 1229 // Exception: 0 and -0. |
1238 bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset); | 1230 bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset); |
1239 if (CpuFeatures::IsSupported(FPU)) { | 1231 if (CpuFeatures::IsSupported(FPU)) { |
1240 CpuFeatures::Scope scope(FPU); | 1232 CpuFeatures::Scope scope(FPU); |
1241 // Lhs and rhs are already loaded to f12 and f14 register pairs. | 1233 // Lhs and rhs are already loaded to f12 and f14 register pairs. |
1242 __ mfc1(t0, f14); // f14 has LS 32 bits of rhs. | 1234 __ Move(t0, t1, f14); |
1243 __ mfc1(t1, f15); // f15 has MS 32 bits of rhs. | 1235 __ Move(t2, t3, f12); |
1244 __ mfc1(t2, f12); // f12 has LS 32 bits of lhs. | |
1245 __ mfc1(t3, f13); // f13 has MS 32 bits of lhs. | |
1246 } else { | 1236 } else { |
1247 // Lhs and rhs are already loaded to GP registers. | 1237 // Lhs and rhs are already loaded to GP registers. |
1248 __ mov(t0, a0); // a0 has LS 32 bits of rhs. | 1238 __ mov(t0, a0); // a0 has LS 32 bits of rhs. |
1249 __ mov(t1, a1); // a1 has MS 32 bits of rhs. | 1239 __ mov(t1, a1); // a1 has MS 32 bits of rhs. |
1250 __ mov(t2, a2); // a2 has LS 32 bits of lhs. | 1240 __ mov(t2, a2); // a2 has LS 32 bits of lhs. |
1251 __ mov(t3, a3); // a3 has MS 32 bits of lhs. | 1241 __ mov(t3, a3); // a3 has MS 32 bits of lhs. |
1252 } | 1242 } |
1253 Register rhs_exponent = exp_first ? t0 : t1; | 1243 Register rhs_exponent = exp_first ? t0 : t1; |
1254 Register lhs_exponent = exp_first ? t2 : t3; | 1244 Register lhs_exponent = exp_first ? t2 : t3; |
1255 Register rhs_mantissa = exp_first ? t1 : t0; | 1245 Register rhs_mantissa = exp_first ? t1 : t0; |
(...skipping 21 matching lines...) Expand all Loading... |
1277 | 1267 |
1278 if (!CpuFeatures::IsSupported(FPU)) { | 1268 if (!CpuFeatures::IsSupported(FPU)) { |
1279 __ push(ra); | 1269 __ push(ra); |
1280 __ PrepareCallCFunction(4, t4); // Two doubles count as 4 arguments. | 1270 __ PrepareCallCFunction(4, t4); // Two doubles count as 4 arguments. |
1281 if (!IsMipsSoftFloatABI) { | 1271 if (!IsMipsSoftFloatABI) { |
1282 // We are not using MIPS FPU instructions, and parameters for the runtime | 1272 // We are not using MIPS FPU instructions, and parameters for the runtime |
1283 // function call are prepaired in a0-a3 registers, but function we are | 1273 // function call are prepaired in a0-a3 registers, but function we are |
1284 // calling is compiled with hard-float flag and expecting hard float ABI | 1274 // calling is compiled with hard-float flag and expecting hard float ABI |
1285 // (parameters in f12/f14 registers). We need to copy parameters from | 1275 // (parameters in f12/f14 registers). We need to copy parameters from |
1286 // a0-a3 registers to f12/f14 register pairs. | 1276 // a0-a3 registers to f12/f14 register pairs. |
1287 __ mtc1(a0, f12); | 1277 __ Move(f12, a0, a1); |
1288 __ mtc1(a1, f13); | 1278 __ Move(f14, a2, a3); |
1289 __ mtc1(a2, f14); | |
1290 __ mtc1(a3, f15); | |
1291 } | 1279 } |
1292 __ CallCFunction(ExternalReference::compare_doubles(masm->isolate()), 4); | 1280 __ CallCFunction(ExternalReference::compare_doubles(masm->isolate()), 4); |
1293 __ pop(ra); // Because this function returns int, result is in v0. | 1281 __ pop(ra); // Because this function returns int, result is in v0. |
1294 __ Ret(); | 1282 __ Ret(); |
1295 } else { | 1283 } else { |
1296 CpuFeatures::Scope scope(FPU); | 1284 CpuFeatures::Scope scope(FPU); |
1297 Label equal, less_than; | 1285 Label equal, less_than; |
1298 __ c(EQ, D, f12, f14); | 1286 __ c(EQ, D, f12, f14); |
1299 __ bc1t(&equal); | 1287 __ bc1t(&equal); |
1300 __ nop(); | 1288 __ nop(); |
(...skipping 1884 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3185 if (tagged) { | 3173 if (tagged) { |
3186 // Argument is a number and is on stack and in a0. | 3174 // Argument is a number and is on stack and in a0. |
3187 // Load argument and check if it is a smi. | 3175 // Load argument and check if it is a smi. |
3188 __ JumpIfNotSmi(a0, &input_not_smi); | 3176 __ JumpIfNotSmi(a0, &input_not_smi); |
3189 | 3177 |
3190 // Input is a smi. Convert to double and load the low and high words | 3178 // Input is a smi. Convert to double and load the low and high words |
3191 // of the double into a2, a3. | 3179 // of the double into a2, a3. |
3192 __ sra(t0, a0, kSmiTagSize); | 3180 __ sra(t0, a0, kSmiTagSize); |
3193 __ mtc1(t0, f4); | 3181 __ mtc1(t0, f4); |
3194 __ cvt_d_w(f4, f4); | 3182 __ cvt_d_w(f4, f4); |
3195 __ mfc1(a2, f4); | 3183 __ Move(a2, a3, f4); |
3196 __ mfc1(a3, f5); | |
3197 __ Branch(&loaded); | 3184 __ Branch(&loaded); |
3198 | 3185 |
3199 __ bind(&input_not_smi); | 3186 __ bind(&input_not_smi); |
3200 // Check if input is a HeapNumber. | 3187 // Check if input is a HeapNumber. |
3201 __ CheckMap(a0, | 3188 __ CheckMap(a0, |
3202 a1, | 3189 a1, |
3203 Heap::kHeapNumberMapRootIndex, | 3190 Heap::kHeapNumberMapRootIndex, |
3204 &calculate, | 3191 &calculate, |
3205 DONT_DO_SMI_CHECK); | 3192 DONT_DO_SMI_CHECK); |
3206 // Input is a HeapNumber. Store the | 3193 // Input is a HeapNumber. Store the |
3207 // low and high words into a2, a3. | 3194 // low and high words into a2, a3. |
3208 __ lw(a2, FieldMemOperand(a0, HeapNumber::kValueOffset)); | 3195 __ lw(a2, FieldMemOperand(a0, HeapNumber::kValueOffset)); |
3209 __ lw(a3, FieldMemOperand(a0, HeapNumber::kValueOffset + 4)); | 3196 __ lw(a3, FieldMemOperand(a0, HeapNumber::kValueOffset + 4)); |
3210 } else { | 3197 } else { |
3211 // Input is untagged double in f4. Output goes to f4. | 3198 // Input is untagged double in f4. Output goes to f4. |
3212 __ mfc1(a2, f4); | 3199 __ Move(a2, a3, f4); |
3213 __ mfc1(a3, f5); | |
3214 } | 3200 } |
3215 __ bind(&loaded); | 3201 __ bind(&loaded); |
3216 // a2 = low 32 bits of double value. | 3202 // a2 = low 32 bits of double value. |
3217 // a3 = high 32 bits of double value. | 3203 // a3 = high 32 bits of double value. |
3218 // Compute hash (the shifts are arithmetic): | 3204 // Compute hash (the shifts are arithmetic): |
3219 // h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1); | 3205 // h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1); |
3220 __ Xor(a1, a2, a3); | 3206 __ Xor(a1, a2, a3); |
3221 __ sra(t0, a1, 16); | 3207 __ sra(t0, a1, 16); |
3222 __ Xor(a1, a1, t0); | 3208 __ Xor(a1, a1, t0); |
3223 __ sra(t0, a1, 8); | 3209 __ sra(t0, a1, 8); |
(...skipping 123 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3347 __ LeaveInternalFrame(); | 3333 __ LeaveInternalFrame(); |
3348 __ Ret(); | 3334 __ Ret(); |
3349 } | 3335 } |
3350 } | 3336 } |
3351 | 3337 |
3352 | 3338 |
3353 void TranscendentalCacheStub::GenerateCallCFunction(MacroAssembler* masm, | 3339 void TranscendentalCacheStub::GenerateCallCFunction(MacroAssembler* masm, |
3354 Register scratch) { | 3340 Register scratch) { |
3355 __ push(ra); | 3341 __ push(ra); |
3356 __ PrepareCallCFunction(2, scratch); | 3342 __ PrepareCallCFunction(2, scratch); |
3357 __ mfc1(v0, f4); | 3343 if (IsMipsSoftFloatABI) { |
3358 __ mfc1(v1, f5); | 3344 __ Move(v0, v1, f4); |
| 3345 } else { |
| 3346 __ mov_d(f12, f4); |
| 3347 } |
3359 switch (type_) { | 3348 switch (type_) { |
3360 case TranscendentalCache::SIN: | 3349 case TranscendentalCache::SIN: |
3361 __ CallCFunction( | 3350 __ CallCFunction( |
3362 ExternalReference::math_sin_double_function(masm->isolate()), 2); | 3351 ExternalReference::math_sin_double_function(masm->isolate()), 2); |
3363 break; | 3352 break; |
3364 case TranscendentalCache::COS: | 3353 case TranscendentalCache::COS: |
3365 __ CallCFunction( | 3354 __ CallCFunction( |
3366 ExternalReference::math_cos_double_function(masm->isolate()), 2); | 3355 ExternalReference::math_cos_double_function(masm->isolate()), 2); |
3367 break; | 3356 break; |
3368 case TranscendentalCache::LOG: | 3357 case TranscendentalCache::LOG: |
(...skipping 75 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3444 // an untagged smi. Allocate a heap number and call a | 3433 // an untagged smi. Allocate a heap number and call a |
3445 // C function for integer exponents. The register containing | 3434 // C function for integer exponents. The register containing |
3446 // the heap number is callee-saved. | 3435 // the heap number is callee-saved. |
3447 __ AllocateHeapNumber(heapnumber, | 3436 __ AllocateHeapNumber(heapnumber, |
3448 scratch, | 3437 scratch, |
3449 scratch2, | 3438 scratch2, |
3450 heapnumbermap, | 3439 heapnumbermap, |
3451 &call_runtime); | 3440 &call_runtime); |
3452 __ push(ra); | 3441 __ push(ra); |
3453 __ PrepareCallCFunction(3, scratch); | 3442 __ PrepareCallCFunction(3, scratch); |
3454 // ABI (o32) for func(double d, int x): d in f12, x in a2. | 3443 __ SetCallCDoubleArguments(double_base, double_exponent); |
3455 ASSERT(double_base.is(f12)); | |
3456 ASSERT(exponent.is(a2)); | |
3457 if (IsMipsSoftFloatABI) { | |
3458 // Simulator case, supports FPU, but with soft-float passing. | |
3459 __ mfc1(a0, double_base); | |
3460 __ mfc1(a1, FPURegister::from_code(double_base.code() + 1)); | |
3461 } | |
3462 __ CallCFunction( | 3444 __ CallCFunction( |
3463 ExternalReference::power_double_int_function(masm->isolate()), 3); | 3445 ExternalReference::power_double_int_function(masm->isolate()), 4); |
3464 __ pop(ra); | 3446 __ pop(ra); |
3465 __ GetCFunctionDoubleResult(double_result); | 3447 __ GetCFunctionDoubleResult(double_result); |
3466 __ sdc1(double_result, | 3448 __ sdc1(double_result, |
3467 FieldMemOperand(heapnumber, HeapNumber::kValueOffset)); | 3449 FieldMemOperand(heapnumber, HeapNumber::kValueOffset)); |
3468 __ mov(v0, heapnumber); | 3450 __ mov(v0, heapnumber); |
3469 __ DropAndRet(2 * kPointerSize); | 3451 __ DropAndRet(2 * kPointerSize); |
3470 | 3452 |
3471 __ bind(&exponent_not_smi); | 3453 __ bind(&exponent_not_smi); |
3472 __ lw(scratch, FieldMemOperand(exponent, JSObject::kMapOffset)); | 3454 __ lw(scratch, FieldMemOperand(exponent, JSObject::kMapOffset)); |
3473 __ Branch(&call_runtime, ne, scratch, Operand(heapnumbermap)); | 3455 __ Branch(&call_runtime, ne, scratch, Operand(heapnumbermap)); |
3474 // Exponent is a heapnumber. Load it into double register. | 3456 // Exponent is a heapnumber. Load it into double register. |
3475 __ ldc1(double_exponent, | 3457 __ ldc1(double_exponent, |
3476 FieldMemOperand(exponent, HeapNumber::kValueOffset)); | 3458 FieldMemOperand(exponent, HeapNumber::kValueOffset)); |
3477 | 3459 |
3478 // The base and the exponent are in double registers. | 3460 // The base and the exponent are in double registers. |
3479 // Allocate a heap number and call a C function for | 3461 // Allocate a heap number and call a C function for |
3480 // double exponents. The register containing | 3462 // double exponents. The register containing |
3481 // the heap number is callee-saved. | 3463 // the heap number is callee-saved. |
3482 __ AllocateHeapNumber(heapnumber, | 3464 __ AllocateHeapNumber(heapnumber, |
3483 scratch, | 3465 scratch, |
3484 scratch2, | 3466 scratch2, |
3485 heapnumbermap, | 3467 heapnumbermap, |
3486 &call_runtime); | 3468 &call_runtime); |
3487 __ push(ra); | 3469 __ push(ra); |
3488 __ PrepareCallCFunction(4, scratch); | 3470 __ PrepareCallCFunction(4, scratch); |
3489 // ABI (o32) for func(double a, double b): a in f12, b in f14. | 3471 // ABI (o32) for func(double a, double b): a in f12, b in f14. |
3490 ASSERT(double_base.is(f12)); | 3472 ASSERT(double_base.is(f12)); |
3491 ASSERT(double_exponent.is(f14)); | 3473 ASSERT(double_exponent.is(f14)); |
3492 if (IsMipsSoftFloatABI) { | 3474 __ SetCallCDoubleArguments(double_base, double_exponent); |
3493 __ mfc1(a0, double_base); | |
3494 __ mfc1(a1, FPURegister::from_code(double_base.code() + 1)); | |
3495 __ mfc1(a2, double_exponent); | |
3496 __ mfc1(a3, FPURegister::from_code(double_exponent.code() + 1)); | |
3497 } | |
3498 __ CallCFunction( | 3475 __ CallCFunction( |
3499 ExternalReference::power_double_double_function(masm->isolate()), 4); | 3476 ExternalReference::power_double_double_function(masm->isolate()), 4); |
3500 __ pop(ra); | 3477 __ pop(ra); |
3501 __ GetCFunctionDoubleResult(double_result); | 3478 __ GetCFunctionDoubleResult(double_result); |
3502 __ sdc1(double_result, | 3479 __ sdc1(double_result, |
3503 FieldMemOperand(heapnumber, HeapNumber::kValueOffset)); | 3480 FieldMemOperand(heapnumber, HeapNumber::kValueOffset)); |
3504 __ mov(v0, heapnumber); | 3481 __ mov(v0, heapnumber); |
3505 __ DropAndRet(2 * kPointerSize); | 3482 __ DropAndRet(2 * kPointerSize); |
3506 } | 3483 } |
3507 | 3484 |
(...skipping 377 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3885 // Reset the stack to the callee saved registers. | 3862 // Reset the stack to the callee saved registers. |
3886 __ addiu(sp, sp, -EntryFrameConstants::kCallerFPOffset); | 3863 __ addiu(sp, sp, -EntryFrameConstants::kCallerFPOffset); |
3887 | 3864 |
3888 // Restore callee saved registers from the stack. | 3865 // Restore callee saved registers from the stack. |
3889 __ MultiPop((kCalleeSaved | ra.bit()) & ~sp.bit()); | 3866 __ MultiPop((kCalleeSaved | ra.bit()) & ~sp.bit()); |
3890 // Return. | 3867 // Return. |
3891 __ Jump(ra); | 3868 __ Jump(ra); |
3892 } | 3869 } |
3893 | 3870 |
3894 | 3871 |
3895 // Uses registers a0 to t0. Expected input is | 3872 // Uses registers a0 to t0. |
3896 // object in a0 (or at sp+1*kPointerSize) and function in | 3873 // Expected input (depending on whether args are in registers or on the stack): |
3897 // a1 (or at sp), depending on whether or not | 3874 // * object: a0 or at sp + 1 * kPointerSize. |
3898 // args_in_registers() is true. | 3875 // * function: a1 or at sp. |
| 3876 // |
| 3877 // Inlined call site patching is a crankshaft-specific feature that is not |
| 3878 // implemented on MIPS. |
3899 void InstanceofStub::Generate(MacroAssembler* masm) { | 3879 void InstanceofStub::Generate(MacroAssembler* masm) { |
| 3880 // This is a crankshaft-specific feature that has not been implemented yet. |
| 3881 ASSERT(!HasCallSiteInlineCheck()); |
| 3882 // Call site inlining and patching implies arguments in registers. |
| 3883 ASSERT(HasArgsInRegisters() || !HasCallSiteInlineCheck()); |
| 3884 // ReturnTrueFalse is only implemented for inlined call sites. |
| 3885 ASSERT(!ReturnTrueFalseObject() || HasCallSiteInlineCheck()); |
| 3886 |
3900 // Fixed register usage throughout the stub: | 3887 // Fixed register usage throughout the stub: |
3901 const Register object = a0; // Object (lhs). | 3888 const Register object = a0; // Object (lhs). |
3902 const Register map = a3; // Map of the object. | 3889 Register map = a3; // Map of the object. |
3903 const Register function = a1; // Function (rhs). | 3890 const Register function = a1; // Function (rhs). |
3904 const Register prototype = t0; // Prototype of the function. | 3891 const Register prototype = t0; // Prototype of the function. |
| 3892 const Register inline_site = t5; |
3905 const Register scratch = a2; | 3893 const Register scratch = a2; |
| 3894 |
3906 Label slow, loop, is_instance, is_not_instance, not_js_object; | 3895 Label slow, loop, is_instance, is_not_instance, not_js_object; |
| 3896 |
3907 if (!HasArgsInRegisters()) { | 3897 if (!HasArgsInRegisters()) { |
3908 __ lw(object, MemOperand(sp, 1 * kPointerSize)); | 3898 __ lw(object, MemOperand(sp, 1 * kPointerSize)); |
3909 __ lw(function, MemOperand(sp, 0)); | 3899 __ lw(function, MemOperand(sp, 0)); |
3910 } | 3900 } |
3911 | 3901 |
3912 // Check that the left hand is a JS object and load map. | 3902 // Check that the left hand is a JS object and load map. |
3913 __ JumpIfSmi(object, ¬_js_object); | 3903 __ JumpIfSmi(object, ¬_js_object); |
3914 __ IsObjectJSObjectType(object, map, scratch, ¬_js_object); | 3904 __ IsObjectJSObjectType(object, map, scratch, ¬_js_object); |
3915 | 3905 |
3916 // Look up the function and the map in the instanceof cache. | 3906 // If there is a call site cache don't look in the global cache, but do the |
3917 Label miss; | 3907 // real lookup and update the call site cache. |
3918 __ LoadRoot(t1, Heap::kInstanceofCacheFunctionRootIndex); | 3908 if (!HasCallSiteInlineCheck()) { |
3919 __ Branch(&miss, ne, function, Operand(t1)); | 3909 Label miss; |
3920 __ LoadRoot(t1, Heap::kInstanceofCacheMapRootIndex); | 3910 __ LoadRoot(t1, Heap::kInstanceofCacheFunctionRootIndex); |
3921 __ Branch(&miss, ne, map, Operand(t1)); | 3911 __ Branch(&miss, ne, function, Operand(t1)); |
3922 __ LoadRoot(v0, Heap::kInstanceofCacheAnswerRootIndex); | 3912 __ LoadRoot(t1, Heap::kInstanceofCacheMapRootIndex); |
3923 __ DropAndRet(HasArgsInRegisters() ? 0 : 2); | 3913 __ Branch(&miss, ne, map, Operand(t1)); |
| 3914 __ LoadRoot(v0, Heap::kInstanceofCacheAnswerRootIndex); |
| 3915 __ DropAndRet(HasArgsInRegisters() ? 0 : 2); |
3924 | 3916 |
3925 __ bind(&miss); | 3917 __ bind(&miss); |
| 3918 } |
| 3919 |
| 3920 // Get the prototype of the function. |
3926 __ TryGetFunctionPrototype(function, prototype, scratch, &slow); | 3921 __ TryGetFunctionPrototype(function, prototype, scratch, &slow); |
3927 | 3922 |
3928 // Check that the function prototype is a JS object. | 3923 // Check that the function prototype is a JS object. |
3929 __ JumpIfSmi(prototype, &slow); | 3924 __ JumpIfSmi(prototype, &slow); |
3930 __ IsObjectJSObjectType(prototype, scratch, scratch, &slow); | 3925 __ IsObjectJSObjectType(prototype, scratch, scratch, &slow); |
3931 | 3926 |
3932 __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex); | 3927 // Update the global instanceof or call site inlined cache with the current |
3933 __ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex); | 3928 // map and function. The cached answer will be set when it is known below. |
| 3929 if (!HasCallSiteInlineCheck()) { |
| 3930 __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex); |
| 3931 __ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex); |
| 3932 } else { |
| 3933 UNIMPLEMENTED_MIPS(); |
| 3934 } |
3934 | 3935 |
3935 // Register mapping: a3 is object map and t0 is function prototype. | 3936 // Register mapping: a3 is object map and t0 is function prototype. |
3936 // Get prototype of object into a2. | 3937 // Get prototype of object into a2. |
3937 __ lw(scratch, FieldMemOperand(map, Map::kPrototypeOffset)); | 3938 __ lw(scratch, FieldMemOperand(map, Map::kPrototypeOffset)); |
3938 | 3939 |
| 3940 // We don't need map any more. Use it as a scratch register. |
| 3941 Register scratch2 = map; |
| 3942 map = no_reg; |
| 3943 |
3939 // Loop through the prototype chain looking for the function prototype. | 3944 // Loop through the prototype chain looking for the function prototype. |
| 3945 __ LoadRoot(scratch2, Heap::kNullValueRootIndex); |
3940 __ bind(&loop); | 3946 __ bind(&loop); |
3941 __ Branch(&is_instance, eq, scratch, Operand(prototype)); | 3947 __ Branch(&is_instance, eq, scratch, Operand(prototype)); |
3942 __ LoadRoot(t1, Heap::kNullValueRootIndex); | 3948 __ Branch(&is_not_instance, eq, scratch, Operand(scratch2)); |
3943 __ Branch(&is_not_instance, eq, scratch, Operand(t1)); | |
3944 __ lw(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset)); | 3949 __ lw(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset)); |
3945 __ lw(scratch, FieldMemOperand(scratch, Map::kPrototypeOffset)); | 3950 __ lw(scratch, FieldMemOperand(scratch, Map::kPrototypeOffset)); |
3946 __ Branch(&loop); | 3951 __ Branch(&loop); |
3947 | 3952 |
3948 __ bind(&is_instance); | 3953 __ bind(&is_instance); |
3949 ASSERT(Smi::FromInt(0) == 0); | 3954 ASSERT(Smi::FromInt(0) == 0); |
3950 __ mov(v0, zero_reg); | 3955 if (!HasCallSiteInlineCheck()) { |
3951 __ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex); | 3956 __ mov(v0, zero_reg); |
| 3957 __ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex); |
| 3958 } else { |
| 3959 UNIMPLEMENTED_MIPS(); |
| 3960 } |
3952 __ DropAndRet(HasArgsInRegisters() ? 0 : 2); | 3961 __ DropAndRet(HasArgsInRegisters() ? 0 : 2); |
3953 | 3962 |
3954 __ bind(&is_not_instance); | 3963 __ bind(&is_not_instance); |
3955 __ li(v0, Operand(Smi::FromInt(1))); | 3964 if (!HasCallSiteInlineCheck()) { |
3956 __ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex); | 3965 __ li(v0, Operand(Smi::FromInt(1))); |
| 3966 __ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex); |
| 3967 } else { |
| 3968 UNIMPLEMENTED_MIPS(); |
| 3969 } |
3957 __ DropAndRet(HasArgsInRegisters() ? 0 : 2); | 3970 __ DropAndRet(HasArgsInRegisters() ? 0 : 2); |
3958 | 3971 |
3959 Label object_not_null, object_not_null_or_smi; | 3972 Label object_not_null, object_not_null_or_smi; |
3960 __ bind(¬_js_object); | 3973 __ bind(¬_js_object); |
3961 // Before null, smi and string value checks, check that the rhs is a function | 3974 // Before null, smi and string value checks, check that the rhs is a function |
3962 // as for a non-function rhs an exception needs to be thrown. | 3975 // as for a non-function rhs an exception needs to be thrown. |
3963 __ JumpIfSmi(function, &slow); | 3976 __ JumpIfSmi(function, &slow); |
3964 __ GetObjectType(function, map, scratch); | 3977 __ GetObjectType(function, scratch2, scratch); |
3965 __ Branch(&slow, ne, scratch, Operand(JS_FUNCTION_TYPE)); | 3978 __ Branch(&slow, ne, scratch, Operand(JS_FUNCTION_TYPE)); |
3966 | 3979 |
3967 // Null is not instance of anything. | 3980 // Null is not instance of anything. |
3968 __ Branch(&object_not_null, ne, scratch, | 3981 __ Branch(&object_not_null, ne, scratch, |
3969 Operand(masm->isolate()->factory()->null_value())); | 3982 Operand(masm->isolate()->factory()->null_value())); |
3970 __ li(v0, Operand(Smi::FromInt(1))); | 3983 __ li(v0, Operand(Smi::FromInt(1))); |
3971 __ DropAndRet(HasArgsInRegisters() ? 0 : 2); | 3984 __ DropAndRet(HasArgsInRegisters() ? 0 : 2); |
3972 | 3985 |
3973 __ bind(&object_not_null); | 3986 __ bind(&object_not_null); |
3974 // Smi values are not instances of anything. | 3987 // Smi values are not instances of anything. |
3975 __ JumpIfNotSmi(object, &object_not_null_or_smi); | 3988 __ JumpIfNotSmi(object, &object_not_null_or_smi); |
3976 __ li(v0, Operand(Smi::FromInt(1))); | 3989 __ li(v0, Operand(Smi::FromInt(1))); |
3977 __ DropAndRet(HasArgsInRegisters() ? 0 : 2); | 3990 __ DropAndRet(HasArgsInRegisters() ? 0 : 2); |
3978 | 3991 |
3979 __ bind(&object_not_null_or_smi); | 3992 __ bind(&object_not_null_or_smi); |
3980 // String values are not instances of anything. | 3993 // String values are not instances of anything. |
3981 __ IsObjectJSStringType(object, scratch, &slow); | 3994 __ IsObjectJSStringType(object, scratch, &slow); |
3982 __ li(v0, Operand(Smi::FromInt(1))); | 3995 __ li(v0, Operand(Smi::FromInt(1))); |
3983 __ DropAndRet(HasArgsInRegisters() ? 0 : 2); | 3996 __ DropAndRet(HasArgsInRegisters() ? 0 : 2); |
3984 | 3997 |
3985 // Slow-case. Tail call builtin. | 3998 // Slow-case. Tail call builtin. |
3986 __ bind(&slow); | 3999 __ bind(&slow); |
3987 if (HasArgsInRegisters()) { | 4000 if (!ReturnTrueFalseObject()) { |
| 4001 if (HasArgsInRegisters()) { |
| 4002 __ Push(a0, a1); |
| 4003 } |
| 4004 __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION); |
| 4005 } else { |
| 4006 __ EnterInternalFrame(); |
3988 __ Push(a0, a1); | 4007 __ Push(a0, a1); |
| 4008 __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION); |
| 4009 __ LeaveInternalFrame(); |
| 4010 __ mov(a0, v0); |
| 4011 __ LoadRoot(v0, Heap::kTrueValueRootIndex); |
| 4012 __ DropAndRet(HasArgsInRegisters() ? 0 : 2, eq, a0, Operand(zero_reg)); |
| 4013 __ LoadRoot(v0, Heap::kFalseValueRootIndex); |
| 4014 __ DropAndRet(HasArgsInRegisters() ? 0 : 2); |
3989 } | 4015 } |
3990 __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION); | |
3991 } | 4016 } |
3992 | 4017 |
3993 | 4018 |
| 4019 Register InstanceofStub::left() { return a0; } |
| 4020 |
| 4021 |
| 4022 Register InstanceofStub::right() { return a1; } |
| 4023 |
| 4024 |
3994 void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) { | 4025 void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) { |
3995 // The displacement is the offset of the last parameter (if any) | 4026 // The displacement is the offset of the last parameter (if any) |
3996 // relative to the frame pointer. | 4027 // relative to the frame pointer. |
3997 static const int kDisplacement = | 4028 static const int kDisplacement = |
3998 StandardFrameConstants::kCallerSPOffset - kPointerSize; | 4029 StandardFrameConstants::kCallerSPOffset - kPointerSize; |
3999 | 4030 |
4000 // Check that the key is a smiGenerateReadElement. | 4031 // Check that the key is a smiGenerateReadElement. |
4001 Label slow; | 4032 Label slow; |
4002 __ JumpIfNotSmi(a1, &slow); | 4033 __ JumpIfNotSmi(a1, &slow); |
4003 | 4034 |
(...skipping 2613 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
6617 __ mov(result, zero_reg); | 6648 __ mov(result, zero_reg); |
6618 __ Ret(); | 6649 __ Ret(); |
6619 } | 6650 } |
6620 | 6651 |
6621 | 6652 |
6622 #undef __ | 6653 #undef __ |
6623 | 6654 |
6624 } } // namespace v8::internal | 6655 } } // namespace v8::internal |
6625 | 6656 |
6626 #endif // V8_TARGET_ARCH_MIPS | 6657 #endif // V8_TARGET_ARCH_MIPS |
OLD | NEW |