OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 2486 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2497 __ cmp(ebx, Immediate(factory->heap_number_map())); | 2497 __ cmp(ebx, Immediate(factory->heap_number_map())); |
2498 __ j(not_equal, &runtime_call); | 2498 __ j(not_equal, &runtime_call); |
2499 // Input is a HeapNumber. Push it on the FPU stack and load its | 2499 // Input is a HeapNumber. Push it on the FPU stack and load its |
2500 // low and high words into ebx, edx. | 2500 // low and high words into ebx, edx. |
2501 __ fld_d(FieldOperand(eax, HeapNumber::kValueOffset)); | 2501 __ fld_d(FieldOperand(eax, HeapNumber::kValueOffset)); |
2502 __ mov(edx, FieldOperand(eax, HeapNumber::kExponentOffset)); | 2502 __ mov(edx, FieldOperand(eax, HeapNumber::kExponentOffset)); |
2503 __ mov(ebx, FieldOperand(eax, HeapNumber::kMantissaOffset)); | 2503 __ mov(ebx, FieldOperand(eax, HeapNumber::kMantissaOffset)); |
2504 | 2504 |
2505 __ bind(&loaded); | 2505 __ bind(&loaded); |
2506 } else { // UNTAGGED. | 2506 } else { // UNTAGGED. |
| 2507 CpuFeatures::Scope scope(SSE2); |
2507 if (CpuFeatures::IsSupported(SSE4_1)) { | 2508 if (CpuFeatures::IsSupported(SSE4_1)) { |
2508 CpuFeatures::Scope sse4_scope(SSE4_1); | 2509 CpuFeatures::Scope sse4_scope(SSE4_1); |
2509 __ pextrd(edx, xmm1, 0x1); // copy xmm1[63..32] to edx. | 2510 __ pextrd(edx, xmm1, 0x1); // copy xmm1[63..32] to edx. |
2510 } else { | 2511 } else { |
2511 __ pshufd(xmm0, xmm1, 0x1); | 2512 __ pshufd(xmm0, xmm1, 0x1); |
2512 __ movd(edx, xmm0); | 2513 __ movd(edx, xmm0); |
2513 } | 2514 } |
2514 __ movd(ebx, xmm1); | 2515 __ movd(ebx, xmm1); |
2515 } | 2516 } |
2516 | 2517 |
(...skipping 52 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2569 __ cmp(edx, Operand(ecx, kIntSize)); | 2570 __ cmp(edx, Operand(ecx, kIntSize)); |
2570 __ j(not_equal, &cache_miss, Label::kNear); | 2571 __ j(not_equal, &cache_miss, Label::kNear); |
2571 // Cache hit! | 2572 // Cache hit! |
2572 Counters* counters = masm->isolate()->counters(); | 2573 Counters* counters = masm->isolate()->counters(); |
2573 __ IncrementCounter(counters->transcendental_cache_hit(), 1); | 2574 __ IncrementCounter(counters->transcendental_cache_hit(), 1); |
2574 __ mov(eax, Operand(ecx, 2 * kIntSize)); | 2575 __ mov(eax, Operand(ecx, 2 * kIntSize)); |
2575 if (tagged) { | 2576 if (tagged) { |
2576 __ fstp(0); | 2577 __ fstp(0); |
2577 __ ret(kPointerSize); | 2578 __ ret(kPointerSize); |
2578 } else { // UNTAGGED. | 2579 } else { // UNTAGGED. |
| 2580 CpuFeatures::Scope scope(SSE2); |
2579 __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset)); | 2581 __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset)); |
2580 __ Ret(); | 2582 __ Ret(); |
2581 } | 2583 } |
2582 | 2584 |
2583 __ bind(&cache_miss); | 2585 __ bind(&cache_miss); |
2584 __ IncrementCounter(counters->transcendental_cache_miss(), 1); | 2586 __ IncrementCounter(counters->transcendental_cache_miss(), 1); |
2585 // Update cache with new value. | 2587 // Update cache with new value. |
2586 // We are short on registers, so use no_reg as scratch. | 2588 // We are short on registers, so use no_reg as scratch. |
2587 // This gives slightly larger code. | 2589 // This gives slightly larger code. |
2588 if (tagged) { | 2590 if (tagged) { |
2589 __ AllocateHeapNumber(eax, edi, no_reg, &runtime_call_clear_stack); | 2591 __ AllocateHeapNumber(eax, edi, no_reg, &runtime_call_clear_stack); |
2590 } else { // UNTAGGED. | 2592 } else { // UNTAGGED. |
| 2593 CpuFeatures::Scope scope(SSE2); |
2591 __ AllocateHeapNumber(eax, edi, no_reg, &skip_cache); | 2594 __ AllocateHeapNumber(eax, edi, no_reg, &skip_cache); |
2592 __ sub(esp, Immediate(kDoubleSize)); | 2595 __ sub(esp, Immediate(kDoubleSize)); |
2593 __ movdbl(Operand(esp, 0), xmm1); | 2596 __ movdbl(Operand(esp, 0), xmm1); |
2594 __ fld_d(Operand(esp, 0)); | 2597 __ fld_d(Operand(esp, 0)); |
2595 __ add(esp, Immediate(kDoubleSize)); | 2598 __ add(esp, Immediate(kDoubleSize)); |
2596 } | 2599 } |
2597 GenerateOperation(masm, type_); | 2600 GenerateOperation(masm, type_); |
2598 __ mov(Operand(ecx, 0), ebx); | 2601 __ mov(Operand(ecx, 0), ebx); |
2599 __ mov(Operand(ecx, kIntSize), edx); | 2602 __ mov(Operand(ecx, kIntSize), edx); |
2600 __ mov(Operand(ecx, 2 * kIntSize), eax); | 2603 __ mov(Operand(ecx, 2 * kIntSize), eax); |
2601 __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset)); | 2604 __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset)); |
2602 if (tagged) { | 2605 if (tagged) { |
2603 __ ret(kPointerSize); | 2606 __ ret(kPointerSize); |
2604 } else { // UNTAGGED. | 2607 } else { // UNTAGGED. |
| 2608 CpuFeatures::Scope scope(SSE2); |
2605 __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset)); | 2609 __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset)); |
2606 __ Ret(); | 2610 __ Ret(); |
2607 | 2611 |
2608 // Skip cache and return answer directly, only in untagged case. | 2612 // Skip cache and return answer directly, only in untagged case. |
2609 __ bind(&skip_cache); | 2613 __ bind(&skip_cache); |
2610 __ sub(esp, Immediate(kDoubleSize)); | 2614 __ sub(esp, Immediate(kDoubleSize)); |
2611 __ movdbl(Operand(esp, 0), xmm1); | 2615 __ movdbl(Operand(esp, 0), xmm1); |
2612 __ fld_d(Operand(esp, 0)); | 2616 __ fld_d(Operand(esp, 0)); |
2613 GenerateOperation(masm, type_); | 2617 GenerateOperation(masm, type_); |
2614 __ fstp_d(Operand(esp, 0)); | 2618 __ fstp_d(Operand(esp, 0)); |
(...skipping 12 matching lines...) Expand all Loading... |
2627 | 2631 |
2628 // Call runtime, doing whatever allocation and cleanup is necessary. | 2632 // Call runtime, doing whatever allocation and cleanup is necessary. |
2629 if (tagged) { | 2633 if (tagged) { |
2630 __ bind(&runtime_call_clear_stack); | 2634 __ bind(&runtime_call_clear_stack); |
2631 __ fstp(0); | 2635 __ fstp(0); |
2632 __ bind(&runtime_call); | 2636 __ bind(&runtime_call); |
2633 ExternalReference runtime = | 2637 ExternalReference runtime = |
2634 ExternalReference(RuntimeFunction(), masm->isolate()); | 2638 ExternalReference(RuntimeFunction(), masm->isolate()); |
2635 __ TailCallExternalReference(runtime, 1, 1); | 2639 __ TailCallExternalReference(runtime, 1, 1); |
2636 } else { // UNTAGGED. | 2640 } else { // UNTAGGED. |
| 2641 CpuFeatures::Scope scope(SSE2); |
2637 __ bind(&runtime_call_clear_stack); | 2642 __ bind(&runtime_call_clear_stack); |
2638 __ bind(&runtime_call); | 2643 __ bind(&runtime_call); |
2639 __ AllocateHeapNumber(eax, edi, no_reg, &skip_cache); | 2644 __ AllocateHeapNumber(eax, edi, no_reg, &skip_cache); |
2640 __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm1); | 2645 __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm1); |
2641 { | 2646 { |
2642 FrameScope scope(masm, StackFrame::INTERNAL); | 2647 FrameScope scope(masm, StackFrame::INTERNAL); |
2643 __ push(eax); | 2648 __ push(eax); |
2644 __ CallRuntime(RuntimeFunction(), 1); | 2649 __ CallRuntime(RuntimeFunction(), 1); |
2645 } | 2650 } |
2646 __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset)); | 2651 __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset)); |
(...skipping 2215 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4862 | 4867 |
4863 void CodeStub::GenerateStubsAheadOfTime() { | 4868 void CodeStub::GenerateStubsAheadOfTime() { |
4864 CEntryStub::GenerateAheadOfTime(); | 4869 CEntryStub::GenerateAheadOfTime(); |
4865 StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(); | 4870 StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(); |
4866 // It is important that the store buffer overflow stubs are generated first. | 4871 // It is important that the store buffer overflow stubs are generated first. |
4867 RecordWriteStub::GenerateFixedRegStubsAheadOfTime(); | 4872 RecordWriteStub::GenerateFixedRegStubsAheadOfTime(); |
4868 } | 4873 } |
4869 | 4874 |
4870 | 4875 |
4871 void CodeStub::GenerateFPStubs() { | 4876 void CodeStub::GenerateFPStubs() { |
4872 CEntryStub save_doubles(1, kSaveFPRegs); | 4877 if (CpuFeatures::IsSupported(SSE2)) { |
4873 Handle<Code> code = save_doubles.GetCode(); | 4878 CEntryStub save_doubles(1, kSaveFPRegs); |
4874 code->set_is_pregenerated(true); | 4879 Handle<Code> code = save_doubles.GetCode(); |
4875 code->GetIsolate()->set_fp_stubs_generated(true); | 4880 code->set_is_pregenerated(true); |
| 4881 code->GetIsolate()->set_fp_stubs_generated(true); |
| 4882 } |
4876 } | 4883 } |
4877 | 4884 |
4878 | 4885 |
4879 void CEntryStub::GenerateAheadOfTime() { | 4886 void CEntryStub::GenerateAheadOfTime() { |
4880 CEntryStub stub(1, kDontSaveFPRegs); | 4887 CEntryStub stub(1, kDontSaveFPRegs); |
4881 Handle<Code> code = stub.GetCode(); | 4888 Handle<Code> code = stub.GetCode(); |
4882 code->set_is_pregenerated(true); | 4889 code->set_is_pregenerated(true); |
4883 } | 4890 } |
4884 | 4891 |
4885 | 4892 |
(...skipping 2641 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
7527 // Restore ecx. | 7534 // Restore ecx. |
7528 __ pop(ecx); | 7535 __ pop(ecx); |
7529 __ ret(0); | 7536 __ ret(0); |
7530 } | 7537 } |
7531 | 7538 |
7532 #undef __ | 7539 #undef __ |
7533 | 7540 |
7534 } } // namespace v8::internal | 7541 } } // namespace v8::internal |
7535 | 7542 |
7536 #endif // V8_TARGET_ARCH_IA32 | 7543 #endif // V8_TARGET_ARCH_IA32 |
OLD | NEW |