| OLD | NEW |
| 1 // Copyright 2010 the V8 project authors. All rights reserved. | 1 // Copyright 2010 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 2454 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2465 | 2465 |
| 2466 void TypeRecordingBinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) { | 2466 void TypeRecordingBinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) { |
| 2467 __ pop(ecx); | 2467 __ pop(ecx); |
| 2468 __ push(edx); | 2468 __ push(edx); |
| 2469 __ push(eax); | 2469 __ push(eax); |
| 2470 __ push(ecx); | 2470 __ push(ecx); |
| 2471 } | 2471 } |
| 2472 | 2472 |
| 2473 | 2473 |
| 2474 void TranscendentalCacheStub::Generate(MacroAssembler* masm) { | 2474 void TranscendentalCacheStub::Generate(MacroAssembler* masm) { |
| 2475 // Input on stack: | 2475 // TAGGED case: |
| 2476 // esp[4]: argument (should be number). | 2476 // Input: |
| 2477 // esp[0]: return address. | 2477 // esp[4]: tagged number input argument (should be number). |
| 2478 // Test that eax is a number. | 2478 // esp[0]: return address. |
| 2479 // Output: |
| 2480 // eax: tagged double result. |
| 2481 // UNTAGGED case: |
| 2482 // Input:: |
| 2483 // esp[0]: return address. |
| 2484 // xmm1: untagged double input argument |
| 2485 // Output: |
| 2486 // xmm1: untagged double result. |
| 2487 |
| 2479 Label runtime_call; | 2488 Label runtime_call; |
| 2480 Label runtime_call_clear_stack; | 2489 Label runtime_call_clear_stack; |
| 2481 NearLabel input_not_smi; | 2490 Label skip_cache; |
| 2482 NearLabel loaded; | 2491 Label call_runtime; |
| 2483 __ mov(eax, Operand(esp, kPointerSize)); | 2492 const bool tagged = (argument_type_ == TAGGED); |
| 2484 __ test(eax, Immediate(kSmiTagMask)); | 2493 if (tagged) { |
| 2485 __ j(not_zero, &input_not_smi); | 2494 // Test that eax is a number. |
| 2486 // Input is a smi. Untag and load it onto the FPU stack. | 2495 NearLabel input_not_smi; |
| 2487 // Then load the low and high words of the double into ebx, edx. | 2496 NearLabel loaded; |
| 2488 STATIC_ASSERT(kSmiTagSize == 1); | 2497 __ mov(eax, Operand(esp, kPointerSize)); |
| 2489 __ sar(eax, 1); | 2498 __ test(eax, Immediate(kSmiTagMask)); |
| 2490 __ sub(Operand(esp), Immediate(2 * kPointerSize)); | 2499 __ j(not_zero, &input_not_smi); |
| 2491 __ mov(Operand(esp, 0), eax); | 2500 // Input is a smi. Untag and load it onto the FPU stack. |
| 2492 __ fild_s(Operand(esp, 0)); | 2501 // Then load the low and high words of the double into ebx, edx. |
| 2493 __ fst_d(Operand(esp, 0)); | 2502 STATIC_ASSERT(kSmiTagSize == 1); |
| 2494 __ pop(edx); | 2503 __ sar(eax, 1); |
| 2495 __ pop(ebx); | 2504 __ sub(Operand(esp), Immediate(2 * kPointerSize)); |
| 2496 __ jmp(&loaded); | 2505 __ mov(Operand(esp, 0), eax); |
| 2497 __ bind(&input_not_smi); | 2506 __ fild_s(Operand(esp, 0)); |
| 2498 // Check if input is a HeapNumber. | 2507 __ fst_d(Operand(esp, 0)); |
| 2499 __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset)); | 2508 __ pop(edx); |
| 2500 __ cmp(Operand(ebx), Immediate(Factory::heap_number_map())); | 2509 __ pop(ebx); |
| 2501 __ j(not_equal, &runtime_call); | 2510 __ jmp(&loaded); |
| 2502 // Input is a HeapNumber. Push it on the FPU stack and load its | 2511 __ bind(&input_not_smi); |
| 2503 // low and high words into ebx, edx. | 2512 // Check if input is a HeapNumber. |
| 2504 __ fld_d(FieldOperand(eax, HeapNumber::kValueOffset)); | 2513 __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset)); |
| 2505 __ mov(edx, FieldOperand(eax, HeapNumber::kExponentOffset)); | 2514 __ cmp(Operand(ebx), Immediate(Factory::heap_number_map())); |
| 2506 __ mov(ebx, FieldOperand(eax, HeapNumber::kMantissaOffset)); | 2515 __ j(not_equal, &runtime_call); |
| 2516 // Input is a HeapNumber. Push it on the FPU stack and load its |
| 2517 // low and high words into ebx, edx. |
| 2518 __ fld_d(FieldOperand(eax, HeapNumber::kValueOffset)); |
| 2519 __ mov(edx, FieldOperand(eax, HeapNumber::kExponentOffset)); |
| 2520 __ mov(ebx, FieldOperand(eax, HeapNumber::kMantissaOffset)); |
| 2507 | 2521 |
| 2508 __ bind(&loaded); | 2522 __ bind(&loaded); |
| 2509 // ST[0] == double value | 2523 } else { // UNTAGGED. |
| 2524 if (CpuFeatures::IsSupported(SSE4_1)) { |
| 2525 CpuFeatures::Scope sse4_scope(SSE4_1); |
| 2526 __ pextrd(Operand(edx), xmm1, 0x1); // copy xmm1[63..32] to edx. |
| 2527 } else { |
| 2528 __ pshufd(xmm0, xmm1, 0x1); |
| 2529 __ movd(Operand(edx), xmm0); |
| 2530 } |
| 2531 __ movd(Operand(ebx), xmm1); |
| 2532 } |
| 2533 |
| 2534 // ST[0] or xmm1 == double value |
| 2510 // ebx = low 32 bits of double value | 2535 // ebx = low 32 bits of double value |
| 2511 // edx = high 32 bits of double value | 2536 // edx = high 32 bits of double value |
| 2512 // Compute hash (the shifts are arithmetic): | 2537 // Compute hash (the shifts are arithmetic): |
| 2513 // h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1); | 2538 // h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1); |
| 2514 __ mov(ecx, ebx); | 2539 __ mov(ecx, ebx); |
| 2515 __ xor_(ecx, Operand(edx)); | 2540 __ xor_(ecx, Operand(edx)); |
| 2516 __ mov(eax, ecx); | 2541 __ mov(eax, ecx); |
| 2517 __ sar(eax, 16); | 2542 __ sar(eax, 16); |
| 2518 __ xor_(ecx, Operand(eax)); | 2543 __ xor_(ecx, Operand(eax)); |
| 2519 __ mov(eax, ecx); | 2544 __ mov(eax, ecx); |
| 2520 __ sar(eax, 8); | 2545 __ sar(eax, 8); |
| 2521 __ xor_(ecx, Operand(eax)); | 2546 __ xor_(ecx, Operand(eax)); |
| 2522 ASSERT(IsPowerOf2(TranscendentalCache::kCacheSize)); | 2547 ASSERT(IsPowerOf2(TranscendentalCache::kCacheSize)); |
| 2523 __ and_(Operand(ecx), Immediate(TranscendentalCache::kCacheSize - 1)); | 2548 __ and_(Operand(ecx), Immediate(TranscendentalCache::kCacheSize - 1)); |
| 2524 | 2549 |
| 2525 // ST[0] == double value. | 2550 // ST[0] or xmm1 == double value. |
| 2526 // ebx = low 32 bits of double value. | 2551 // ebx = low 32 bits of double value. |
| 2527 // edx = high 32 bits of double value. | 2552 // edx = high 32 bits of double value. |
| 2528 // ecx = TranscendentalCache::hash(double value). | 2553 // ecx = TranscendentalCache::hash(double value). |
| 2529 __ mov(eax, | 2554 __ mov(eax, |
| 2530 Immediate(ExternalReference::transcendental_cache_array_address())); | 2555 Immediate(ExternalReference::transcendental_cache_array_address())); |
| 2531 // Eax points to cache array. | 2556 // Eax points to cache array. |
| 2532 __ mov(eax, Operand(eax, type_ * sizeof(TranscendentalCache::caches_[0]))); | 2557 __ mov(eax, Operand(eax, type_ * sizeof(TranscendentalCache::caches_[0]))); |
| 2533 // Eax points to the cache for the type type_. | 2558 // Eax points to the cache for the type type_. |
| 2534 // If NULL, the cache hasn't been initialized yet, so go through runtime. | 2559 // If NULL, the cache hasn't been initialized yet, so go through runtime. |
| 2535 __ test(eax, Operand(eax)); | 2560 __ test(eax, Operand(eax)); |
| (...skipping 16 matching lines...) Expand all Loading... |
| 2552 __ lea(ecx, Operand(ecx, ecx, times_2, 0)); | 2577 __ lea(ecx, Operand(ecx, ecx, times_2, 0)); |
| 2553 __ lea(ecx, Operand(eax, ecx, times_4, 0)); | 2578 __ lea(ecx, Operand(eax, ecx, times_4, 0)); |
| 2554 // Check if cache matches: Double value is stored in uint32_t[2] array. | 2579 // Check if cache matches: Double value is stored in uint32_t[2] array. |
| 2555 NearLabel cache_miss; | 2580 NearLabel cache_miss; |
| 2556 __ cmp(ebx, Operand(ecx, 0)); | 2581 __ cmp(ebx, Operand(ecx, 0)); |
| 2557 __ j(not_equal, &cache_miss); | 2582 __ j(not_equal, &cache_miss); |
| 2558 __ cmp(edx, Operand(ecx, kIntSize)); | 2583 __ cmp(edx, Operand(ecx, kIntSize)); |
| 2559 __ j(not_equal, &cache_miss); | 2584 __ j(not_equal, &cache_miss); |
| 2560 // Cache hit! | 2585 // Cache hit! |
| 2561 __ mov(eax, Operand(ecx, 2 * kIntSize)); | 2586 __ mov(eax, Operand(ecx, 2 * kIntSize)); |
| 2562 __ fstp(0); | 2587 if (tagged) { |
| 2563 __ ret(kPointerSize); | 2588 __ fstp(0); |
| 2589 __ ret(kPointerSize); |
| 2590 } else { // UNTAGGED. |
| 2591 __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset)); |
| 2592 __ Ret(); |
| 2593 } |
| 2564 | 2594 |
| 2565 __ bind(&cache_miss); | 2595 __ bind(&cache_miss); |
| 2566 // Update cache with new value. | 2596 // Update cache with new value. |
| 2567 // We are short on registers, so use no_reg as scratch. | 2597 // We are short on registers, so use no_reg as scratch. |
| 2568 // This gives slightly larger code. | 2598 // This gives slightly larger code. |
| 2569 __ AllocateHeapNumber(eax, edi, no_reg, &runtime_call_clear_stack); | 2599 if (tagged) { |
| 2600 __ AllocateHeapNumber(eax, edi, no_reg, &runtime_call_clear_stack); |
| 2601 } else { // UNTAGGED. |
| 2602 __ AllocateHeapNumber(eax, edi, no_reg, &skip_cache); |
| 2603 __ sub(Operand(esp), Immediate(kDoubleSize)); |
| 2604 __ movdbl(Operand(esp, 0), xmm1); |
| 2605 __ fld_d(Operand(esp, 0)); |
| 2606 __ add(Operand(esp), Immediate(kDoubleSize)); |
| 2607 } |
| 2570 GenerateOperation(masm); | 2608 GenerateOperation(masm); |
| 2571 __ mov(Operand(ecx, 0), ebx); | 2609 __ mov(Operand(ecx, 0), ebx); |
| 2572 __ mov(Operand(ecx, kIntSize), edx); | 2610 __ mov(Operand(ecx, kIntSize), edx); |
| 2573 __ mov(Operand(ecx, 2 * kIntSize), eax); | 2611 __ mov(Operand(ecx, 2 * kIntSize), eax); |
| 2574 __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset)); | 2612 __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset)); |
| 2575 __ ret(kPointerSize); | 2613 if (tagged) { |
| 2614 __ ret(kPointerSize); |
| 2615 } else { // UNTAGGED. |
| 2616 __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset)); |
| 2617 __ Ret(); |
| 2576 | 2618 |
| 2577 __ bind(&runtime_call_clear_stack); | 2619 // Skip cache and return answer directly, only in untagged case. |
| 2578 __ fstp(0); | 2620 __ bind(&skip_cache); |
| 2579 __ bind(&runtime_call); | 2621 __ sub(Operand(esp), Immediate(kDoubleSize)); |
| 2580 __ TailCallExternalReference(ExternalReference(RuntimeFunction()), 1, 1); | 2622 __ movdbl(Operand(esp, 0), xmm1); |
| 2623 __ fld_d(Operand(esp, 0)); |
| 2624 GenerateOperation(masm); |
| 2625 __ fstp_d(Operand(esp, 0)); |
| 2626 __ movdbl(xmm1, Operand(esp, 0)); |
| 2627 __ add(Operand(esp), Immediate(kDoubleSize)); |
| 2628 __ Ret(); |
| 2629 } |
| 2630 |
| 2631 // Call runtime, doing whatever allocation and cleanup is necessary. |
| 2632 if (tagged) { |
| 2633 __ bind(&runtime_call_clear_stack); |
| 2634 __ fstp(0); |
| 2635 __ bind(&runtime_call); |
| 2636 __ TailCallExternalReference(ExternalReference(RuntimeFunction()), 1, 1); |
| 2637 } else { // UNTAGGED. |
| 2638 __ bind(&call_runtime); |
| 2639 __ AllocateHeapNumber(eax, edi, no_reg, &skip_cache); |
| 2640 __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm1); |
| 2641 __ EnterInternalFrame(); |
| 2642 __ push(eax); |
| 2643 __ CallRuntime(RuntimeFunction(), 1); |
| 2644 __ LeaveInternalFrame(); |
| 2645 __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset)); |
| 2646 __ Ret(); |
| 2647 } |
| 2581 } | 2648 } |
| 2582 | 2649 |
| 2583 | 2650 |
| 2584 Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() { | 2651 Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() { |
| 2585 switch (type_) { | 2652 switch (type_) { |
| 2586 // Add more cases when necessary. | |
| 2587 case TranscendentalCache::SIN: return Runtime::kMath_sin; | 2653 case TranscendentalCache::SIN: return Runtime::kMath_sin; |
| 2588 case TranscendentalCache::COS: return Runtime::kMath_cos; | 2654 case TranscendentalCache::COS: return Runtime::kMath_cos; |
| 2589 case TranscendentalCache::LOG: return Runtime::kMath_log; | 2655 case TranscendentalCache::LOG: return Runtime::kMath_log; |
| 2590 default: | 2656 default: |
| 2591 UNIMPLEMENTED(); | 2657 UNIMPLEMENTED(); |
| 2592 return Runtime::kAbort; | 2658 return Runtime::kAbort; |
| 2593 } | 2659 } |
| 2594 } | 2660 } |
| 2595 | 2661 |
| 2596 | 2662 |
| 2597 void TranscendentalCacheStub::GenerateOperation(MacroAssembler* masm) { | 2663 void TranscendentalCacheStub::GenerateOperation(MacroAssembler* masm) { |
| 2598 // Only free register is edi. | 2664 // Only free register is edi. |
| 2599 // Input value is on FP stack, and also in ebx/edx. Address of result | 2665 // Input value is on FP stack, and also in ebx/edx. |
| 2600 // (a newly allocated HeapNumber) is in eax. | 2666 // Input value is possibly in xmm1. |
| 2601 NearLabel done; | 2667 // Address of result (a newly allocated HeapNumber) may be in eax. |
| 2602 if (type_ == TranscendentalCache::SIN || type_ == TranscendentalCache::COS) { | 2668 if (type_ == TranscendentalCache::SIN || type_ == TranscendentalCache::COS) { |
| 2603 // Both fsin and fcos require arguments in the range +/-2^63 and | 2669 // Both fsin and fcos require arguments in the range +/-2^63 and |
| 2604 // return NaN for infinities and NaN. They can share all code except | 2670 // return NaN for infinities and NaN. They can share all code except |
| 2605 // the actual fsin/fcos operation. | 2671 // the actual fsin/fcos operation. |
| 2606 NearLabel in_range; | 2672 NearLabel in_range, done; |
| 2607 // If argument is outside the range -2^63..2^63, fsin/cos doesn't | 2673 // If argument is outside the range -2^63..2^63, fsin/cos doesn't |
| 2608 // work. We must reduce it to the appropriate range. | 2674 // work. We must reduce it to the appropriate range. |
| 2609 __ mov(edi, edx); | 2675 __ mov(edi, edx); |
| 2610 __ and_(Operand(edi), Immediate(0x7ff00000)); // Exponent only. | 2676 __ and_(Operand(edi), Immediate(0x7ff00000)); // Exponent only. |
| 2611 int supported_exponent_limit = | 2677 int supported_exponent_limit = |
| 2612 (63 + HeapNumber::kExponentBias) << HeapNumber::kExponentShift; | 2678 (63 + HeapNumber::kExponentBias) << HeapNumber::kExponentShift; |
| 2613 __ cmp(Operand(edi), Immediate(supported_exponent_limit)); | 2679 __ cmp(Operand(edi), Immediate(supported_exponent_limit)); |
| 2614 __ j(below, &in_range, taken); | 2680 __ j(below, &in_range, taken); |
| 2615 // Check for infinity and NaN. Both return NaN for sin. | 2681 // Check for infinity and NaN. Both return NaN for sin. |
| 2616 __ cmp(Operand(edi), Immediate(0x7ff00000)); | 2682 __ cmp(Operand(edi), Immediate(0x7ff00000)); |
| (...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2676 __ bind(&done); | 2742 __ bind(&done); |
| 2677 } else { | 2743 } else { |
| 2678 ASSERT(type_ == TranscendentalCache::LOG); | 2744 ASSERT(type_ == TranscendentalCache::LOG); |
| 2679 __ fldln2(); | 2745 __ fldln2(); |
| 2680 __ fxch(); | 2746 __ fxch(); |
| 2681 __ fyl2x(); | 2747 __ fyl2x(); |
| 2682 } | 2748 } |
| 2683 } | 2749 } |
| 2684 | 2750 |
| 2685 | 2751 |
| 2686 void TranscendentalCacheSSE2Stub::Generate(MacroAssembler* masm) { | |
| 2687 // Input on stack: | |
| 2688 // esp[0]: return address. | |
| 2689 // Input in registers: | |
| 2690 // xmm1: untagged double input argument. | |
| 2691 // Output: | |
| 2692 // xmm1: untagged double result. | |
| 2693 Label skip_cache; | |
| 2694 Label call_runtime; | |
| 2695 | |
| 2696 // Input is an untagged double in xmm1. | |
| 2697 // Compute hash (the shifts are arithmetic): | |
| 2698 // h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1); | |
| 2699 if (CpuFeatures::IsSupported(SSE4_1)) { | |
| 2700 CpuFeatures::Scope sse4_scope(SSE4_1); | |
| 2701 __ pextrd(Operand(edx), xmm1, 0x1); // copy xmm1[63..32] to edx. | |
| 2702 } else { | |
| 2703 __ pshufd(xmm0, xmm1, 0x1); | |
| 2704 __ movd(Operand(edx), xmm0); | |
| 2705 } | |
| 2706 __ movd(Operand(ebx), xmm1); | |
| 2707 | |
| 2708 // xmm1 = double value | |
| 2709 // ebx = low 32 bits of double value | |
| 2710 // edx = high 32 bits of double value | |
| 2711 // Compute hash (the shifts are arithmetic): | |
| 2712 // h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1); | |
| 2713 __ mov(ecx, ebx); | |
| 2714 __ xor_(ecx, Operand(edx)); | |
| 2715 __ mov(eax, ecx); | |
| 2716 __ sar(eax, 16); | |
| 2717 __ xor_(ecx, Operand(eax)); | |
| 2718 __ mov(eax, ecx); | |
| 2719 __ sar(eax, 8); | |
| 2720 __ xor_(ecx, Operand(eax)); | |
| 2721 ASSERT(IsPowerOf2(TranscendentalCache::kCacheSize)); | |
| 2722 __ and_(Operand(ecx), Immediate(TranscendentalCache::kCacheSize - 1)); | |
| 2723 | |
| 2724 // xmm1 = double value. | |
| 2725 // ebx = low 32 bits of double value. | |
| 2726 // edx = high 32 bits of double value. | |
| 2727 // ecx = TranscendentalCache::hash(double value). | |
| 2728 __ mov(eax, | |
| 2729 Immediate(ExternalReference::transcendental_cache_array_address())); | |
| 2730 // Eax points to cache array. | |
| 2731 __ mov(eax, Operand(eax, type_ * sizeof(TranscendentalCache::caches_[0]))); | |
| 2732 // Eax points to the cache for the type type_. | |
| 2733 // If NULL, the cache hasn't been initialized yet, so go through runtime. | |
| 2734 __ test(eax, Operand(eax)); | |
| 2735 __ j(zero, &call_runtime); | |
| 2736 #ifdef DEBUG | |
| 2737 // Check that the layout of cache elements match expectations. | |
| 2738 { TranscendentalCache::Element test_elem[2]; | |
| 2739 char* elem_start = reinterpret_cast<char*>(&test_elem[0]); | |
| 2740 char* elem2_start = reinterpret_cast<char*>(&test_elem[1]); | |
| 2741 char* elem_in0 = reinterpret_cast<char*>(&(test_elem[0].in[0])); | |
| 2742 char* elem_in1 = reinterpret_cast<char*>(&(test_elem[0].in[1])); | |
| 2743 char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output)); | |
| 2744 CHECK_EQ(12, elem2_start - elem_start); // Two uint_32's and a pointer. | |
| 2745 CHECK_EQ(0, elem_in0 - elem_start); | |
| 2746 CHECK_EQ(kIntSize, elem_in1 - elem_start); | |
| 2747 CHECK_EQ(2 * kIntSize, elem_out - elem_start); | |
| 2748 } | |
| 2749 #endif | |
| 2750 // Find the address of the ecx'th entry in the cache, i.e., &eax[ecx*12]. | |
| 2751 __ lea(ecx, Operand(ecx, ecx, times_2, 0)); | |
| 2752 __ lea(ecx, Operand(eax, ecx, times_4, 0)); | |
| 2753 // Check if cache matches: Double value is stored in uint32_t[2] array. | |
| 2754 NearLabel cache_miss; | |
| 2755 __ cmp(ebx, Operand(ecx, 0)); | |
| 2756 __ j(not_equal, &cache_miss); | |
| 2757 __ cmp(edx, Operand(ecx, kIntSize)); | |
| 2758 __ j(not_equal, &cache_miss); | |
| 2759 // Cache hit! | |
| 2760 __ mov(eax, Operand(ecx, 2 * kIntSize)); | |
| 2761 __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset)); | |
| 2762 __ Ret(); | |
| 2763 | |
| 2764 __ bind(&cache_miss); | |
| 2765 // Update cache with new value. | |
| 2766 // We are short on registers, so use no_reg as scratch. | |
| 2767 // This gives slightly larger code. | |
| 2768 __ AllocateHeapNumber(eax, edi, no_reg, &skip_cache); | |
| 2769 __ sub(Operand(esp), Immediate(kDoubleSize)); | |
| 2770 __ movdbl(Operand(esp, 0), xmm1); | |
| 2771 __ fld_d(Operand(esp, 0)); | |
| 2772 __ add(Operand(esp), Immediate(kDoubleSize)); | |
| 2773 GenerateOperation(masm); | |
| 2774 __ mov(Operand(ecx, 0), ebx); | |
| 2775 __ mov(Operand(ecx, kIntSize), edx); | |
| 2776 __ mov(Operand(ecx, 2 * kIntSize), eax); | |
| 2777 __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset)); | |
| 2778 __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset)); | |
| 2779 __ Ret(); | |
| 2780 | |
| 2781 __ bind(&skip_cache); | |
| 2782 __ sub(Operand(esp), Immediate(kDoubleSize)); | |
| 2783 __ movdbl(Operand(esp, 0), xmm1); | |
| 2784 __ fld_d(Operand(esp, 0)); | |
| 2785 GenerateOperation(masm); | |
| 2786 __ fstp_d(Operand(esp, 0)); | |
| 2787 __ movdbl(xmm1, Operand(esp, 0)); | |
| 2788 __ add(Operand(esp), Immediate(kDoubleSize)); | |
| 2789 __ Ret(); | |
| 2790 | |
| 2791 __ bind(&call_runtime); | |
| 2792 __ AllocateHeapNumber(eax, edi, no_reg, &skip_cache); | |
| 2793 __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm1); | |
| 2794 __ EnterInternalFrame(); | |
| 2795 __ push(eax); | |
| 2796 __ CallRuntime(RuntimeFunction(), 1); | |
| 2797 __ LeaveInternalFrame(); | |
| 2798 __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset)); | |
| 2799 __ Ret(); | |
| 2800 } | |
| 2801 | |
| 2802 | |
| 2803 Runtime::FunctionId TranscendentalCacheSSE2Stub::RuntimeFunction() { | |
| 2804 switch (type_) { | |
| 2805 // Add more cases when necessary. | |
| 2806 case TranscendentalCache::LOG: return Runtime::kMath_log; | |
| 2807 default: | |
| 2808 UNIMPLEMENTED(); | |
| 2809 return Runtime::kAbort; | |
| 2810 } | |
| 2811 } | |
| 2812 | |
| 2813 | |
| 2814 void TranscendentalCacheSSE2Stub::GenerateOperation(MacroAssembler* masm) { | |
| 2815 // Only free register is edi. | |
| 2816 // Input value is on FP stack and in xmm1. | |
| 2817 | |
| 2818 ASSERT(type_ == TranscendentalCache::LOG); | |
| 2819 __ fldln2(); | |
| 2820 __ fxch(); | |
| 2821 __ fyl2x(); | |
| 2822 } | |
| 2823 | |
| 2824 | |
| 2825 // Get the integer part of a heap number. Surprisingly, all this bit twiddling | 2752 // Get the integer part of a heap number. Surprisingly, all this bit twiddling |
| 2826 // is faster than using the built-in instructions on floating point registers. | 2753 // is faster than using the built-in instructions on floating point registers. |
| 2827 // Trashes edi and ebx. Dest is ecx. Source cannot be ecx or one of the | 2754 // Trashes edi and ebx. Dest is ecx. Source cannot be ecx or one of the |
| 2828 // trashed registers. | 2755 // trashed registers. |
| 2829 void IntegerConvert(MacroAssembler* masm, | 2756 void IntegerConvert(MacroAssembler* masm, |
| 2830 Register source, | 2757 Register source, |
| 2831 TypeInfo type_info, | 2758 TypeInfo type_info, |
| 2832 bool use_sse3, | 2759 bool use_sse3, |
| 2833 Label* conversion_failure) { | 2760 Label* conversion_failure) { |
| 2834 ASSERT(!source.is(ecx) && !source.is(edi) && !source.is(ebx)); | 2761 ASSERT(!source.is(ecx) && !source.is(edi) && !source.is(ebx)); |
| (...skipping 3636 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 6471 // Do a tail call to the rewritten stub. | 6398 // Do a tail call to the rewritten stub. |
| 6472 __ jmp(Operand(edi)); | 6399 __ jmp(Operand(edi)); |
| 6473 } | 6400 } |
| 6474 | 6401 |
| 6475 | 6402 |
| 6476 #undef __ | 6403 #undef __ |
| 6477 | 6404 |
| 6478 } } // namespace v8::internal | 6405 } } // namespace v8::internal |
| 6479 | 6406 |
| 6480 #endif // V8_TARGET_ARCH_IA32 | 6407 #endif // V8_TARGET_ARCH_IA32 |
| OLD | NEW |