| OLD | NEW |
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 509 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 520 // Double value, canonicalize NaN. | 520 // Double value, canonicalize NaN. |
| 521 uint32_t offset = HeapNumber::kValueOffset + sizeof(kHoleNanLower32); | 521 uint32_t offset = HeapNumber::kValueOffset + sizeof(kHoleNanLower32); |
| 522 cmp(FieldOperand(maybe_number, offset), | 522 cmp(FieldOperand(maybe_number, offset), |
| 523 Immediate(kNaNOrInfinityLowerBoundUpper32)); | 523 Immediate(kNaNOrInfinityLowerBoundUpper32)); |
| 524 j(greater_equal, &maybe_nan, Label::kNear); | 524 j(greater_equal, &maybe_nan, Label::kNear); |
| 525 | 525 |
| 526 bind(¬_nan); | 526 bind(¬_nan); |
| 527 ExternalReference canonical_nan_reference = | 527 ExternalReference canonical_nan_reference = |
| 528 ExternalReference::address_of_canonical_non_hole_nan(); | 528 ExternalReference::address_of_canonical_non_hole_nan(); |
| 529 if (CpuFeatures::IsSupported(SSE2) && specialize_for_processor) { | 529 if (CpuFeatures::IsSupported(SSE2) && specialize_for_processor) { |
| 530 CpuFeatures::Scope use_sse2(SSE2); | 530 CpuFeatureScope use_sse2(this, SSE2); |
| 531 movdbl(scratch2, FieldOperand(maybe_number, HeapNumber::kValueOffset)); | 531 movdbl(scratch2, FieldOperand(maybe_number, HeapNumber::kValueOffset)); |
| 532 bind(&have_double_value); | 532 bind(&have_double_value); |
| 533 movdbl(FieldOperand(elements, key, times_4, | 533 movdbl(FieldOperand(elements, key, times_4, |
| 534 FixedDoubleArray::kHeaderSize - elements_offset), | 534 FixedDoubleArray::kHeaderSize - elements_offset), |
| 535 scratch2); | 535 scratch2); |
| 536 } else { | 536 } else { |
| 537 fld_d(FieldOperand(maybe_number, HeapNumber::kValueOffset)); | 537 fld_d(FieldOperand(maybe_number, HeapNumber::kValueOffset)); |
| 538 bind(&have_double_value); | 538 bind(&have_double_value); |
| 539 fstp_d(FieldOperand(elements, key, times_4, | 539 fstp_d(FieldOperand(elements, key, times_4, |
| 540 FixedDoubleArray::kHeaderSize - elements_offset)); | 540 FixedDoubleArray::kHeaderSize - elements_offset)); |
| 541 } | 541 } |
| 542 jmp(&done); | 542 jmp(&done); |
| 543 | 543 |
| 544 bind(&maybe_nan); | 544 bind(&maybe_nan); |
| 545 // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise | 545 // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise |
| 546 // it's an Infinity, and the non-NaN code path applies. | 546 // it's an Infinity, and the non-NaN code path applies. |
| 547 j(greater, &is_nan, Label::kNear); | 547 j(greater, &is_nan, Label::kNear); |
| 548 cmp(FieldOperand(maybe_number, HeapNumber::kValueOffset), Immediate(0)); | 548 cmp(FieldOperand(maybe_number, HeapNumber::kValueOffset), Immediate(0)); |
| 549 j(zero, ¬_nan); | 549 j(zero, ¬_nan); |
| 550 bind(&is_nan); | 550 bind(&is_nan); |
| 551 if (CpuFeatures::IsSupported(SSE2) && specialize_for_processor) { | 551 if (CpuFeatures::IsSupported(SSE2) && specialize_for_processor) { |
| 552 CpuFeatures::Scope use_sse2(SSE2); | 552 CpuFeatureScope use_sse2(this, SSE2); |
| 553 movdbl(scratch2, Operand::StaticVariable(canonical_nan_reference)); | 553 movdbl(scratch2, Operand::StaticVariable(canonical_nan_reference)); |
| 554 } else { | 554 } else { |
| 555 fld_d(Operand::StaticVariable(canonical_nan_reference)); | 555 fld_d(Operand::StaticVariable(canonical_nan_reference)); |
| 556 } | 556 } |
| 557 jmp(&have_double_value, Label::kNear); | 557 jmp(&have_double_value, Label::kNear); |
| 558 | 558 |
| 559 bind(&smi_value); | 559 bind(&smi_value); |
| 560 // Value is a smi. Convert to a double and store. | 560 // Value is a smi. Convert to a double and store. |
| 561 // Preserve original value. | 561 // Preserve original value. |
| 562 mov(scratch1, maybe_number); | 562 mov(scratch1, maybe_number); |
| 563 SmiUntag(scratch1); | 563 SmiUntag(scratch1); |
| 564 if (CpuFeatures::IsSupported(SSE2) && specialize_for_processor) { | 564 if (CpuFeatures::IsSupported(SSE2) && specialize_for_processor) { |
| 565 CpuFeatures::Scope fscope(SSE2); | 565 CpuFeatureScope fscope(this, SSE2); |
| 566 cvtsi2sd(scratch2, scratch1); | 566 cvtsi2sd(scratch2, scratch1); |
| 567 movdbl(FieldOperand(elements, key, times_4, | 567 movdbl(FieldOperand(elements, key, times_4, |
| 568 FixedDoubleArray::kHeaderSize - elements_offset), | 568 FixedDoubleArray::kHeaderSize - elements_offset), |
| 569 scratch2); | 569 scratch2); |
| 570 } else { | 570 } else { |
| 571 push(scratch1); | 571 push(scratch1); |
| 572 fild_s(Operand(esp, 0)); | 572 fild_s(Operand(esp, 0)); |
| 573 pop(scratch1); | 573 pop(scratch1); |
| 574 fstp_d(FieldOperand(elements, key, times_4, | 574 fstp_d(FieldOperand(elements, key, times_4, |
| 575 FixedDoubleArray::kHeaderSize - elements_offset)); | 575 FixedDoubleArray::kHeaderSize - elements_offset)); |
| (...skipping 184 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 760 ExternalReference context_address(Isolate::kContextAddress, | 760 ExternalReference context_address(Isolate::kContextAddress, |
| 761 isolate()); | 761 isolate()); |
| 762 mov(Operand::StaticVariable(c_entry_fp_address), ebp); | 762 mov(Operand::StaticVariable(c_entry_fp_address), ebp); |
| 763 mov(Operand::StaticVariable(context_address), esi); | 763 mov(Operand::StaticVariable(context_address), esi); |
| 764 } | 764 } |
| 765 | 765 |
| 766 | 766 |
| 767 void MacroAssembler::EnterExitFrameEpilogue(int argc, bool save_doubles) { | 767 void MacroAssembler::EnterExitFrameEpilogue(int argc, bool save_doubles) { |
| 768 // Optionally save all XMM registers. | 768 // Optionally save all XMM registers. |
| 769 if (save_doubles) { | 769 if (save_doubles) { |
| 770 CpuFeatures::Scope scope(SSE2); | 770 CpuFeatureScope scope(this, SSE2); |
| 771 int space = XMMRegister::kNumRegisters * kDoubleSize + argc * kPointerSize; | 771 int space = XMMRegister::kNumRegisters * kDoubleSize + argc * kPointerSize; |
| 772 sub(esp, Immediate(space)); | 772 sub(esp, Immediate(space)); |
| 773 const int offset = -2 * kPointerSize; | 773 const int offset = -2 * kPointerSize; |
| 774 for (int i = 0; i < XMMRegister::kNumRegisters; i++) { | 774 for (int i = 0; i < XMMRegister::kNumRegisters; i++) { |
| 775 XMMRegister reg = XMMRegister::from_code(i); | 775 XMMRegister reg = XMMRegister::from_code(i); |
| 776 movdbl(Operand(ebp, offset - ((i + 1) * kDoubleSize)), reg); | 776 movdbl(Operand(ebp, offset - ((i + 1) * kDoubleSize)), reg); |
| 777 } | 777 } |
| 778 } else { | 778 } else { |
| 779 sub(esp, Immediate(argc * kPointerSize)); | 779 sub(esp, Immediate(argc * kPointerSize)); |
| 780 } | 780 } |
| (...skipping 25 matching lines...) Expand all Loading... |
| 806 | 806 |
| 807 void MacroAssembler::EnterApiExitFrame(int argc) { | 807 void MacroAssembler::EnterApiExitFrame(int argc) { |
| 808 EnterExitFramePrologue(); | 808 EnterExitFramePrologue(); |
| 809 EnterExitFrameEpilogue(argc, false); | 809 EnterExitFrameEpilogue(argc, false); |
| 810 } | 810 } |
| 811 | 811 |
| 812 | 812 |
| 813 void MacroAssembler::LeaveExitFrame(bool save_doubles) { | 813 void MacroAssembler::LeaveExitFrame(bool save_doubles) { |
| 814 // Optionally restore all XMM registers. | 814 // Optionally restore all XMM registers. |
| 815 if (save_doubles) { | 815 if (save_doubles) { |
| 816 CpuFeatures::Scope scope(SSE2); | 816 CpuFeatureScope scope(this, SSE2); |
| 817 const int offset = -2 * kPointerSize; | 817 const int offset = -2 * kPointerSize; |
| 818 for (int i = 0; i < XMMRegister::kNumRegisters; i++) { | 818 for (int i = 0; i < XMMRegister::kNumRegisters; i++) { |
| 819 XMMRegister reg = XMMRegister::from_code(i); | 819 XMMRegister reg = XMMRegister::from_code(i); |
| 820 movdbl(reg, Operand(ebp, offset - ((i + 1) * kDoubleSize))); | 820 movdbl(reg, Operand(ebp, offset - ((i + 1) * kDoubleSize))); |
| 821 } | 821 } |
| 822 } | 822 } |
| 823 | 823 |
| 824 // Get the return address from the stack and restore the frame pointer. | 824 // Get the return address from the stack and restore the frame pointer. |
| 825 mov(ecx, Operand(ebp, 1 * kPointerSize)); | 825 mov(ecx, Operand(ebp, 1 * kPointerSize)); |
| 826 mov(ebp, Operand(ebp, 0 * kPointerSize)); | 826 mov(ebp, Operand(ebp, 0 * kPointerSize)); |
| (...skipping 2265 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3092 j(greater, &no_info_available); | 3092 j(greater, &no_info_available); |
| 3093 cmp(MemOperand(scratch_reg, -AllocationSiteInfo::kSize), | 3093 cmp(MemOperand(scratch_reg, -AllocationSiteInfo::kSize), |
| 3094 Immediate(Handle<Map>(isolate()->heap()->allocation_site_info_map()))); | 3094 Immediate(Handle<Map>(isolate()->heap()->allocation_site_info_map()))); |
| 3095 bind(&no_info_available); | 3095 bind(&no_info_available); |
| 3096 } | 3096 } |
| 3097 | 3097 |
| 3098 | 3098 |
| 3099 } } // namespace v8::internal | 3099 } } // namespace v8::internal |
| 3100 | 3100 |
| 3101 #endif // V8_TARGET_ARCH_IA32 | 3101 #endif // V8_TARGET_ARCH_IA32 |
| OLD | NEW |