Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #if V8_TARGET_ARCH_X64 | 5 #if V8_TARGET_ARCH_X64 |
| 6 | 6 |
| 7 #include "src/base/bits.h" | 7 #include "src/base/bits.h" |
| 8 #include "src/base/division-by-constant.h" | 8 #include "src/base/division-by-constant.h" |
| 9 #include "src/bootstrapper.h" | 9 #include "src/bootstrapper.h" |
| 10 #include "src/codegen.h" | 10 #include "src/codegen.h" |
| (...skipping 2956 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 2967 void MacroAssembler::Ucomisd(XMMRegister src1, const Operand& src2) { | 2967 void MacroAssembler::Ucomisd(XMMRegister src1, const Operand& src2) { |
| 2968 if (CpuFeatures::IsSupported(AVX)) { | 2968 if (CpuFeatures::IsSupported(AVX)) { |
| 2969 CpuFeatureScope scope(this, AVX); | 2969 CpuFeatureScope scope(this, AVX); |
| 2970 vucomisd(src1, src2); | 2970 vucomisd(src1, src2); |
| 2971 } else { | 2971 } else { |
| 2972 ucomisd(src1, src2); | 2972 ucomisd(src1, src2); |
| 2973 } | 2973 } |
| 2974 } | 2974 } |
| 2975 | 2975 |
| 2976 | 2976 |
| 2977 // ---------------------------------------------------------------------------- | |
| 2978 | |
| 2979 void MacroAssembler::Absps(XMMRegister dst) { | |
| 2980 static const struct V8_ALIGNED(16) { | |
| 2981 uint32_t a; | |
| 2982 uint32_t b; | |
| 2983 uint32_t c; | |
| 2984 uint32_t d; | |
| 2985 } float_absolute_constant = {0x7FFFFFFF, 0x7FFFFFFF, 0x7FFFFFFF, 0x7FFFFFFF}; | |
| 2986 Set(kScratchRegister, reinterpret_cast<intptr_t>(&float_absolute_constant)); | |
|
Benedikt Meurer
2016/07/18 04:47:52
Can we somehow put this somewhere near the roots a
| |
| 2987 Andps(dst, Operand(kScratchRegister, 0)); | |
| 2988 } | |
| 2989 | |
| 2990 | |
| 2991 void MacroAssembler::Negps(XMMRegister dst) { | |
| 2992 static const struct V8_ALIGNED(16) { | |
| 2993 uint32_t a; | |
| 2994 uint32_t b; | |
| 2995 uint32_t c; | |
| 2996 uint32_t d; | |
| 2997 } float_negate_constant = {0x80000000, 0x80000000, 0x80000000, 0x80000000}; | |
| 2998 Set(kScratchRegister, reinterpret_cast<intptr_t>(&float_negate_constant)); | |
| 2999 Xorps(dst, Operand(kScratchRegister, 0)); | |
|
bbudge
2016/07/18 14:08:09
It seems like we could avoid the memory access wit
| |
| 3000 } | |
| 3001 | |
| 3002 | |
| 2977 void MacroAssembler::Cmp(Register dst, Handle<Object> source) { | 3003 void MacroAssembler::Cmp(Register dst, Handle<Object> source) { |
| 2978 AllowDeferredHandleDereference smi_check; | 3004 AllowDeferredHandleDereference smi_check; |
| 2979 if (source->IsSmi()) { | 3005 if (source->IsSmi()) { |
| 2980 Cmp(dst, Smi::cast(*source)); | 3006 Cmp(dst, Smi::cast(*source)); |
| 2981 } else { | 3007 } else { |
| 2982 MoveHeapObject(kScratchRegister, source); | 3008 MoveHeapObject(kScratchRegister, source); |
| 2983 cmpp(dst, kScratchRegister); | 3009 cmpp(dst, kScratchRegister); |
| 2984 } | 3010 } |
| 2985 } | 3011 } |
| 2986 | 3012 |
| (...skipping 2789 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 5776 movl(rax, dividend); | 5802 movl(rax, dividend); |
| 5777 shrl(rax, Immediate(31)); | 5803 shrl(rax, Immediate(31)); |
| 5778 addl(rdx, rax); | 5804 addl(rdx, rax); |
| 5779 } | 5805 } |
| 5780 | 5806 |
| 5781 | 5807 |
| 5782 } // namespace internal | 5808 } // namespace internal |
| 5783 } // namespace v8 | 5809 } // namespace v8 |
| 5784 | 5810 |
| 5785 #endif // V8_TARGET_ARCH_X64 | 5811 #endif // V8_TARGET_ARCH_X64 |
| OLD | NEW |