OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 509 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
520 // Double value, canonicalize NaN. | 520 // Double value, canonicalize NaN. |
521 uint32_t offset = HeapNumber::kValueOffset + sizeof(kHoleNanLower32); | 521 uint32_t offset = HeapNumber::kValueOffset + sizeof(kHoleNanLower32); |
522 cmp(FieldOperand(maybe_number, offset), | 522 cmp(FieldOperand(maybe_number, offset), |
523 Immediate(kNaNOrInfinityLowerBoundUpper32)); | 523 Immediate(kNaNOrInfinityLowerBoundUpper32)); |
524 j(greater_equal, &maybe_nan, Label::kNear); | 524 j(greater_equal, &maybe_nan, Label::kNear); |
525 | 525 |
526 bind(¬_nan); | 526 bind(¬_nan); |
527 ExternalReference canonical_nan_reference = | 527 ExternalReference canonical_nan_reference = |
528 ExternalReference::address_of_canonical_non_hole_nan(); | 528 ExternalReference::address_of_canonical_non_hole_nan(); |
529 if (CpuFeatures::IsSupported(SSE2) && specialize_for_processor) { | 529 if (CpuFeatures::IsSupported(SSE2) && specialize_for_processor) { |
530 CpuFeatures::Scope use_sse2(SSE2); | 530 CpuFeatureScope use_sse2(this, SSE2); |
531 movdbl(scratch2, FieldOperand(maybe_number, HeapNumber::kValueOffset)); | 531 movdbl(scratch2, FieldOperand(maybe_number, HeapNumber::kValueOffset)); |
532 bind(&have_double_value); | 532 bind(&have_double_value); |
533 movdbl(FieldOperand(elements, key, times_4, | 533 movdbl(FieldOperand(elements, key, times_4, |
534 FixedDoubleArray::kHeaderSize - elements_offset), | 534 FixedDoubleArray::kHeaderSize - elements_offset), |
535 scratch2); | 535 scratch2); |
536 } else { | 536 } else { |
537 fld_d(FieldOperand(maybe_number, HeapNumber::kValueOffset)); | 537 fld_d(FieldOperand(maybe_number, HeapNumber::kValueOffset)); |
538 bind(&have_double_value); | 538 bind(&have_double_value); |
539 fstp_d(FieldOperand(elements, key, times_4, | 539 fstp_d(FieldOperand(elements, key, times_4, |
540 FixedDoubleArray::kHeaderSize - elements_offset)); | 540 FixedDoubleArray::kHeaderSize - elements_offset)); |
541 } | 541 } |
542 jmp(&done); | 542 jmp(&done); |
543 | 543 |
544 bind(&maybe_nan); | 544 bind(&maybe_nan); |
545 // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise | 545 // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise |
546 // it's an Infinity, and the non-NaN code path applies. | 546 // it's an Infinity, and the non-NaN code path applies. |
547 j(greater, &is_nan, Label::kNear); | 547 j(greater, &is_nan, Label::kNear); |
548 cmp(FieldOperand(maybe_number, HeapNumber::kValueOffset), Immediate(0)); | 548 cmp(FieldOperand(maybe_number, HeapNumber::kValueOffset), Immediate(0)); |
549 j(zero, ¬_nan); | 549 j(zero, ¬_nan); |
550 bind(&is_nan); | 550 bind(&is_nan); |
551 if (CpuFeatures::IsSupported(SSE2) && specialize_for_processor) { | 551 if (CpuFeatures::IsSupported(SSE2) && specialize_for_processor) { |
552 CpuFeatures::Scope use_sse2(SSE2); | 552 CpuFeatureScope use_sse2(this, SSE2); |
553 movdbl(scratch2, Operand::StaticVariable(canonical_nan_reference)); | 553 movdbl(scratch2, Operand::StaticVariable(canonical_nan_reference)); |
554 } else { | 554 } else { |
555 fld_d(Operand::StaticVariable(canonical_nan_reference)); | 555 fld_d(Operand::StaticVariable(canonical_nan_reference)); |
556 } | 556 } |
557 jmp(&have_double_value, Label::kNear); | 557 jmp(&have_double_value, Label::kNear); |
558 | 558 |
559 bind(&smi_value); | 559 bind(&smi_value); |
560 // Value is a smi. Convert to a double and store. | 560 // Value is a smi. Convert to a double and store. |
561 // Preserve original value. | 561 // Preserve original value. |
562 mov(scratch1, maybe_number); | 562 mov(scratch1, maybe_number); |
563 SmiUntag(scratch1); | 563 SmiUntag(scratch1); |
564 if (CpuFeatures::IsSupported(SSE2) && specialize_for_processor) { | 564 if (CpuFeatures::IsSupported(SSE2) && specialize_for_processor) { |
565 CpuFeatures::Scope fscope(SSE2); | 565 CpuFeatureScope fscope(this, SSE2); |
566 cvtsi2sd(scratch2, scratch1); | 566 cvtsi2sd(scratch2, scratch1); |
567 movdbl(FieldOperand(elements, key, times_4, | 567 movdbl(FieldOperand(elements, key, times_4, |
568 FixedDoubleArray::kHeaderSize - elements_offset), | 568 FixedDoubleArray::kHeaderSize - elements_offset), |
569 scratch2); | 569 scratch2); |
570 } else { | 570 } else { |
571 push(scratch1); | 571 push(scratch1); |
572 fild_s(Operand(esp, 0)); | 572 fild_s(Operand(esp, 0)); |
573 pop(scratch1); | 573 pop(scratch1); |
574 fstp_d(FieldOperand(elements, key, times_4, | 574 fstp_d(FieldOperand(elements, key, times_4, |
575 FixedDoubleArray::kHeaderSize - elements_offset)); | 575 FixedDoubleArray::kHeaderSize - elements_offset)); |
(...skipping 207 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
783 ExternalReference context_address(Isolate::kContextAddress, | 783 ExternalReference context_address(Isolate::kContextAddress, |
784 isolate()); | 784 isolate()); |
785 mov(Operand::StaticVariable(c_entry_fp_address), ebp); | 785 mov(Operand::StaticVariable(c_entry_fp_address), ebp); |
786 mov(Operand::StaticVariable(context_address), esi); | 786 mov(Operand::StaticVariable(context_address), esi); |
787 } | 787 } |
788 | 788 |
789 | 789 |
790 void MacroAssembler::EnterExitFrameEpilogue(int argc, bool save_doubles) { | 790 void MacroAssembler::EnterExitFrameEpilogue(int argc, bool save_doubles) { |
791 // Optionally save all XMM registers. | 791 // Optionally save all XMM registers. |
792 if (save_doubles) { | 792 if (save_doubles) { |
793 CpuFeatures::Scope scope(SSE2); | 793 CpuFeatureScope scope(this, SSE2); |
794 int space = XMMRegister::kNumRegisters * kDoubleSize + argc * kPointerSize; | 794 int space = XMMRegister::kNumRegisters * kDoubleSize + argc * kPointerSize; |
795 sub(esp, Immediate(space)); | 795 sub(esp, Immediate(space)); |
796 const int offset = -2 * kPointerSize; | 796 const int offset = -2 * kPointerSize; |
797 for (int i = 0; i < XMMRegister::kNumRegisters; i++) { | 797 for (int i = 0; i < XMMRegister::kNumRegisters; i++) { |
798 XMMRegister reg = XMMRegister::from_code(i); | 798 XMMRegister reg = XMMRegister::from_code(i); |
799 movdbl(Operand(ebp, offset - ((i + 1) * kDoubleSize)), reg); | 799 movdbl(Operand(ebp, offset - ((i + 1) * kDoubleSize)), reg); |
800 } | 800 } |
801 } else { | 801 } else { |
802 sub(esp, Immediate(argc * kPointerSize)); | 802 sub(esp, Immediate(argc * kPointerSize)); |
803 } | 803 } |
(...skipping 25 matching lines...) Expand all Loading... |
829 | 829 |
830 void MacroAssembler::EnterApiExitFrame(int argc) { | 830 void MacroAssembler::EnterApiExitFrame(int argc) { |
831 EnterExitFramePrologue(); | 831 EnterExitFramePrologue(); |
832 EnterExitFrameEpilogue(argc, false); | 832 EnterExitFrameEpilogue(argc, false); |
833 } | 833 } |
834 | 834 |
835 | 835 |
836 void MacroAssembler::LeaveExitFrame(bool save_doubles) { | 836 void MacroAssembler::LeaveExitFrame(bool save_doubles) { |
837 // Optionally restore all XMM registers. | 837 // Optionally restore all XMM registers. |
838 if (save_doubles) { | 838 if (save_doubles) { |
839 CpuFeatures::Scope scope(SSE2); | 839 CpuFeatureScope scope(this, SSE2); |
840 const int offset = -2 * kPointerSize; | 840 const int offset = -2 * kPointerSize; |
841 for (int i = 0; i < XMMRegister::kNumRegisters; i++) { | 841 for (int i = 0; i < XMMRegister::kNumRegisters; i++) { |
842 XMMRegister reg = XMMRegister::from_code(i); | 842 XMMRegister reg = XMMRegister::from_code(i); |
843 movdbl(reg, Operand(ebp, offset - ((i + 1) * kDoubleSize))); | 843 movdbl(reg, Operand(ebp, offset - ((i + 1) * kDoubleSize))); |
844 } | 844 } |
845 } | 845 } |
846 | 846 |
847 // Get the return address from the stack and restore the frame pointer. | 847 // Get the return address from the stack and restore the frame pointer. |
848 mov(ecx, Operand(ebp, 1 * kPointerSize)); | 848 mov(ecx, Operand(ebp, 1 * kPointerSize)); |
849 mov(ebp, Operand(ebp, 0 * kPointerSize)); | 849 mov(ebp, Operand(ebp, 0 * kPointerSize)); |
(...skipping 2265 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3115 j(greater, &no_info_available); | 3115 j(greater, &no_info_available); |
3116 cmp(MemOperand(scratch_reg, -AllocationSiteInfo::kSize), | 3116 cmp(MemOperand(scratch_reg, -AllocationSiteInfo::kSize), |
3117 Immediate(Handle<Map>(isolate()->heap()->allocation_site_info_map()))); | 3117 Immediate(Handle<Map>(isolate()->heap()->allocation_site_info_map()))); |
3118 bind(&no_info_available); | 3118 bind(&no_info_available); |
3119 } | 3119 } |
3120 | 3120 |
3121 | 3121 |
3122 } } // namespace v8::internal | 3122 } } // namespace v8::internal |
3123 | 3123 |
3124 #endif // V8_TARGET_ARCH_IA32 | 3124 #endif // V8_TARGET_ARCH_IA32 |
OLD | NEW |