| OLD | NEW |
| 1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 2242 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2253 } | 2253 } |
| 2254 | 2254 |
| 2255 | 2255 |
| 2256 void MacroAssembler::LeaveExitFrame(bool save_doubles) { | 2256 void MacroAssembler::LeaveExitFrame(bool save_doubles) { |
| 2257 // Registers: | 2257 // Registers: |
| 2258 // r15 : argv | 2258 // r15 : argv |
| 2259 if (save_doubles) { | 2259 if (save_doubles) { |
| 2260 int offset = -2 * kPointerSize; | 2260 int offset = -2 * kPointerSize; |
| 2261 for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; i++) { | 2261 for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; i++) { |
| 2262 XMMRegister reg = XMMRegister::FromAllocationIndex(i); | 2262 XMMRegister reg = XMMRegister::FromAllocationIndex(i); |
| 2263 movsd(reg, Operand(rbp, offset - ((i + 1) * kDoubleSize))); | 2263 LoadDbl(reg, Operand(rbp, offset - ((i + 1) * kDoubleSize))); |
| 2264 } | 2264 } |
| 2265 } | 2265 } |
| 2266 // Get the return address from the stack and restore the frame pointer. | 2266 // Get the return address from the stack and restore the frame pointer. |
| 2267 movq(rcx, Operand(rbp, 1 * kPointerSize)); | 2267 movq(rcx, Operand(rbp, 1 * kPointerSize)); |
| 2268 movq(rbp, Operand(rbp, 0 * kPointerSize)); | 2268 movq(rbp, Operand(rbp, 0 * kPointerSize)); |
| 2269 | 2269 |
| 2270 // Drop everything up to and including the arguments and the receiver | 2270 // Drop everything up to and including the arguments and the receiver |
| 2271 // from the caller stack. | 2271 // from the caller stack. |
| 2272 lea(rsp, Operand(r15, 1 * kPointerSize)); | 2272 lea(rsp, Operand(r15, 1 * kPointerSize)); |
| 2273 | 2273 |
| (...skipping 613 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2887 | 2887 |
| 2888 call(function); | 2888 call(function); |
| 2889 ASSERT(OS::ActivationFrameAlignment() != 0); | 2889 ASSERT(OS::ActivationFrameAlignment() != 0); |
| 2890 ASSERT(num_arguments >= 0); | 2890 ASSERT(num_arguments >= 0); |
| 2891 int argument_slots_on_stack = | 2891 int argument_slots_on_stack = |
| 2892 ArgumentStackSlotsForCFunctionCall(num_arguments); | 2892 ArgumentStackSlotsForCFunctionCall(num_arguments); |
| 2893 movq(rsp, Operand(rsp, argument_slots_on_stack * kPointerSize)); | 2893 movq(rsp, Operand(rsp, argument_slots_on_stack * kPointerSize)); |
| 2894 } | 2894 } |
| 2895 | 2895 |
| 2896 | 2896 |
| 2897 void MacroAssembler::LoadDbl(XMMRegister dst, const Operand& src) { |
| 2898 if (CpuFeatures::IsSupported(SSE3)) { |
| 2899 CpuFeatures::Scope enable(SSE3); |
| 2900 movddup(dst, src); |
| 2901 } else { |
| 2902 movsd(dst, src); |
| 2903 } |
| 2904 } |
| 2905 |
| 2906 |
| 2897 CodePatcher::CodePatcher(byte* address, int size) | 2907 CodePatcher::CodePatcher(byte* address, int size) |
| 2898 : address_(address), | 2908 : address_(address), |
| 2899 size_(size), | 2909 size_(size), |
| 2900 masm_(Isolate::Current(), address, size + Assembler::kGap) { | 2910 masm_(Isolate::Current(), address, size + Assembler::kGap) { |
| 2901 // Create a new macro assembler pointing to the address of the code to patch. | 2911 // Create a new macro assembler pointing to the address of the code to patch. |
| 2902 // The size is adjusted with kGap on order for the assembler to generate size | 2912 // The size is adjusted with kGap on order for the assembler to generate size |
| 2903 // bytes of instructions without failing with buffer size constraints. | 2913 // bytes of instructions without failing with buffer size constraints. |
| 2904 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap); | 2914 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap); |
| 2905 } | 2915 } |
| 2906 | 2916 |
| 2907 | 2917 |
| 2908 CodePatcher::~CodePatcher() { | 2918 CodePatcher::~CodePatcher() { |
| 2909 // Indicate that code has changed. | 2919 // Indicate that code has changed. |
| 2910 CPU::FlushICache(address_, size_); | 2920 CPU::FlushICache(address_, size_); |
| 2911 | 2921 |
| 2912 // Check that the code was patched as expected. | 2922 // Check that the code was patched as expected. |
| 2913 ASSERT(masm_.pc_ == address_ + size_); | 2923 ASSERT(masm_.pc_ == address_ + size_); |
| 2914 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap); | 2924 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap); |
| 2915 } | 2925 } |
| 2916 | 2926 |
| 2917 } } // namespace v8::internal | 2927 } } // namespace v8::internal |
| 2918 | 2928 |
| 2919 #endif // V8_TARGET_ARCH_X64 | 2929 #endif // V8_TARGET_ARCH_X64 |
| OLD | NEW |