Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(262)

Side by Side Diff: src/compiler/wasm-compiler.cc

Issue 2122853002: Implement UnaligedLoad and UnaligedStore turbofan operators. (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: Add UnalingedLoad and UnalignedStore tests Created 4 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2015 the V8 project authors. All rights reserved. 1 // Copyright 2015 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/compiler/wasm-compiler.h" 5 #include "src/compiler/wasm-compiler.h"
6 6
7 #include "src/isolate-inl.h" 7 #include "src/isolate-inl.h"
8 8
9 #include "src/base/platform/elapsed-timer.h" 9 #include "src/base/platform/elapsed-timer.h"
10 #include "src/base/platform/platform.h" 10 #include "src/base/platform/platform.h"
(...skipping 2737 matching lines...) Expand 10 before | Expand all | Expand 10 after
2748 } 2748 }
2749 } 2749 }
2750 2750
2751 Node* cond = graph()->NewNode(jsgraph()->machine()->Uint32LessThan(), index, 2751 Node* cond = graph()->NewNode(jsgraph()->machine()->Uint32LessThan(), index,
2752 jsgraph()->RelocatableInt32Constant( 2752 jsgraph()->RelocatableInt32Constant(
2753 static_cast<uint32_t>(effective_size), 2753 static_cast<uint32_t>(effective_size),
2754 RelocInfo::WASM_MEMORY_SIZE_REFERENCE)); 2754 RelocInfo::WASM_MEMORY_SIZE_REFERENCE));
2755 trap_->AddTrapIfFalse(wasm::kTrapMemOutOfBounds, cond, position); 2755 trap_->AddTrapIfFalse(wasm::kTrapMemOutOfBounds, cond, position);
2756 } 2756 }
2757 2757
2758 MachineType WasmGraphBuilder::GetTypeForUnalignedAccess(uint32_t alignment,
2759 bool signExtend) {
2760 switch (alignment) {
2761 case 0:
2762 return signExtend ? MachineType::Int8() : MachineType::Uint8();
2763 case 1:
2764 return signExtend ? MachineType::Int16() : MachineType::Uint16();
2765 case 2:
2766 return signExtend ? MachineType::Int32() : MachineType::Uint32();
2767 default:
2768 UNREACHABLE();
2769 return MachineType::None();
2770 }
2771 }
2772
2773 Node* WasmGraphBuilder::GetUnalignedLoadOffsetNode(Node* baseOffset,
2774 int numberOfBytes,
2775 int stride, int current) {
2776 int offset;
2777 wasm::WasmOpcode addOpcode;
2778
2779 #if defined(V8_TARGET_LITTLE_ENDIAN)
2780 offset = numberOfBytes - stride - current;
2781 #elif defined(V8_TARGET_BIG_ENDIAN)
2782 offset = current;
2783 #else
2784 #error Unsupported endianness
2785 #endif
2786
2787 #if WASM_64
2788 addOpcode = wasm::kExprI64Add;
2789 #else
2790 addOpcode = wasm::kExprI32Add;
2791 #endif
2792
2793 if (offset == 0) {
2794 return baseOffset;
2795 } else {
2796 return Binop(addOpcode, baseOffset, jsgraph()->Int32Constant(offset));
2797 }
2798 }
2799
2800 Node* WasmGraphBuilder::BuildUnalignedLoad(wasm::LocalType type,
2801 MachineType memtype, Node* index,
2802 uint32_t offset,
2803 uint32_t alignment) {
2804 Node* result;
2805 Node* load;
2806 bool extendTo64Bit = false;
2807
2808 wasm::WasmOpcode shiftOpcode;
2809 wasm::WasmOpcode orOpcode;
2810 Node* shiftConst;
2811
2812 bool signExtend = memtype.IsSigned();
2813
2814 bool isFloat = IsFloatingPoint(memtype.representation());
2815 int stride =
2816 1 << ElementSizeLog2Of(
2817 GetTypeForUnalignedAccess(alignment, false).representation());
2818 int numberOfBytes = 1 << ElementSizeLog2Of(memtype.representation());
2819 DCHECK(numberOfBytes % stride == 0);
2820
2821 switch (type) {
2822 case wasm::kAstI64:
2823 case wasm::kAstF64:
2824 shiftOpcode = wasm::kExprI64Shl;
2825 orOpcode = wasm::kExprI64Ior;
2826 result = jsgraph()->Int64Constant(0);
2827 shiftConst = jsgraph()->Int64Constant(8 * stride);
2828 extendTo64Bit = true;
2829 break;
2830 case wasm::kAstI32:
2831 case wasm::kAstF32:
2832 shiftOpcode = wasm::kExprI32Shl;
2833 orOpcode = wasm::kExprI32Ior;
2834 result = jsgraph()->Int32Constant(0);
2835 shiftConst = jsgraph()->Int32Constant(8 * stride);
2836 break;
2837 default:
2838 UNREACHABLE();
2839 }
2840
2841 Node* baseOffset = MemBuffer(offset);
2842
2843 for (int i = 0; i < numberOfBytes; i += stride) {
2844 result = Binop(shiftOpcode, result, shiftConst);
2845 load = graph()->NewNode(
2846 jsgraph()->machine()->Load(
2847 GetTypeForUnalignedAccess(alignment, signExtend)),
2848 GetUnalignedLoadOffsetNode(baseOffset, numberOfBytes, stride, i), index,
2849 *effect_, *control_);
2850 *effect_ = load;
2851 if (extendTo64Bit) {
2852 if (signExtend) {
2853 load =
2854 graph()->NewNode(jsgraph()->machine()->ChangeInt32ToInt64(), load);
2855 } else {
2856 load = graph()->NewNode(jsgraph()->machine()->ChangeUint32ToUint64(),
2857 load);
2858 }
2859 }
2860 signExtend = false;
2861 result = Binop(orOpcode, result, load);
2862 }
2863
2864 // Convert to float
2865 if (isFloat) {
2866 switch (type) {
2867 case wasm::kAstF32:
2868 result = Unop(wasm::kExprF32ReinterpretI32, result);
2869 break;
2870 case wasm::kAstF64:
2871 result = Unop(wasm::kExprF64ReinterpretI64, result);
2872 break;
2873 default:
2874 UNREACHABLE();
2875 }
2876 }
2877
2878 return result;
2879 }
2880 2758
2881 Node* WasmGraphBuilder::LoadMem(wasm::LocalType type, MachineType memtype, 2759 Node* WasmGraphBuilder::LoadMem(wasm::LocalType type, MachineType memtype,
2882 Node* index, uint32_t offset, 2760 Node* index, uint32_t offset,
2883 uint32_t alignment, 2761 uint32_t alignment,
2884 wasm::WasmCodePosition position) { 2762 wasm::WasmCodePosition position) {
2885 Node* load; 2763 Node* load;
2886 2764
2887 // WASM semantics throw on OOB. Introduce explicit bounds check. 2765 // WASM semantics throw on OOB. Introduce explicit bounds check.
2888 BoundsCheckMem(memtype, index, offset, position); 2766 BoundsCheckMem(memtype, index, offset, position);
2889 bool aligned = static_cast<int>(alignment) >= 2767 bool aligned = static_cast<int>(alignment) >=
2890 ElementSizeLog2Of(memtype.representation()); 2768 ElementSizeLog2Of(memtype.representation());
2891 2769
2892 if (aligned || 2770 if (aligned ||
2893 jsgraph()->machine()->UnalignedLoadSupported(memtype, alignment)) { 2771 jsgraph()->machine()->UnalignedLoadSupported(memtype, alignment)) {
2894 load = graph()->NewNode(jsgraph()->machine()->Load(memtype), 2772 load = graph()->NewNode(jsgraph()->machine()->Load(memtype),
2895 MemBuffer(offset), index, *effect_, *control_); 2773 MemBuffer(offset), index, *effect_, *control_);
2896 *effect_ = load;
2897 } else { 2774 } else {
2898 load = BuildUnalignedLoad(type, memtype, index, offset, alignment); 2775 load = graph()->NewNode(jsgraph()->machine()->UnalignedLoad(memtype),
2776 MemBuffer(offset), index, *effect_, *control_);
2899 } 2777 }
2778
2779 *effect_ = load;
2780
2900 #if defined(V8_TARGET_BIG_ENDIAN) 2781 #if defined(V8_TARGET_BIG_ENDIAN)
2901 // TODO(john.yan) Implement byte swap turbofan operator 2782 // TODO(john.yan) Implement byte swap turbofan operator
2902 // and use it if available for better performance 2783 // and use it if available for better performance
2903 load = BuildChangeEndianness(load, memtype, type); 2784 load = BuildChangeEndianness(load, memtype, type);
2904 #endif 2785 #endif
2905 2786
2906 if (type == wasm::kAstI64 && 2787 if (type == wasm::kAstI64 &&
2907 ElementSizeLog2Of(memtype.representation()) < 3) { 2788 ElementSizeLog2Of(memtype.representation()) < 3) {
2908 // TODO(titzer): TF zeroes the upper bits of 64-bit loads for subword sizes. 2789 // TODO(titzer): TF zeroes the upper bits of 64-bit loads for subword sizes.
2909 if (memtype.IsSigned()) { 2790 if (memtype.IsSigned()) {
2910 // sign extend 2791 // sign extend
2911 load = graph()->NewNode(jsgraph()->machine()->ChangeInt32ToInt64(), load); 2792 load = graph()->NewNode(jsgraph()->machine()->ChangeInt32ToInt64(), load);
2912 } else { 2793 } else {
2913 // zero extend 2794 // zero extend
2914 load = 2795 load =
2915 graph()->NewNode(jsgraph()->machine()->ChangeUint32ToUint64(), load); 2796 graph()->NewNode(jsgraph()->machine()->ChangeUint32ToUint64(), load);
2916 } 2797 }
2917 } 2798 }
2918 2799
2919 return load; 2800 return load;
2920 } 2801 }
2921 2802
2922 Node* WasmGraphBuilder::GetUnalignedStoreOffsetNode(Node* baseOffset,
2923 int numberOfBytes,
2924 int stride, int current) {
2925 int offset;
2926 wasm::WasmOpcode addOpcode;
2927
2928 #if defined(V8_TARGET_LITTLE_ENDIAN)
2929 offset = current;
2930 #elif defined(V8_TARGET_BIG_ENDIAN)
2931 offset = numberOfBytes - stride - current;
2932 #else
2933 #error Unsupported endianness
2934 #endif
2935
2936 #if WASM_64
2937 addOpcode = wasm::kExprI64Add;
2938 #else
2939 addOpcode = wasm::kExprI32Add;
2940 #endif
2941
2942 if (offset == 0) {
2943 return baseOffset;
2944 } else {
2945 return Binop(addOpcode, baseOffset, jsgraph()->Int32Constant(offset));
2946 }
2947 }
2948
2949 Node* WasmGraphBuilder::BuildUnalignedStore(MachineType memtype, Node* index,
2950 uint32_t offset, uint32_t alignment,
2951 Node* val) {
2952 Node* store;
2953 Node* newValue;
2954
2955 wasm::WasmOpcode shiftOpcode;
2956
2957 Node* shiftConst;
2958 bool extendTo64Bit = false;
2959 bool isFloat = IsFloatingPoint(memtype.representation());
2960 int stride = 1 << ElementSizeLog2Of(
2961 GetTypeForUnalignedAccess(alignment).representation());
2962 int numberOfBytes = 1 << ElementSizeLog2Of(memtype.representation());
2963 DCHECK(numberOfBytes % stride == 0);
2964
2965 StoreRepresentation rep(GetTypeForUnalignedAccess(alignment).representation(),
2966 kNoWriteBarrier);
2967
2968 if (ElementSizeLog2Of(memtype.representation()) <= 2) {
2969 shiftOpcode = wasm::kExprI32ShrU;
2970 shiftConst = jsgraph()->Int32Constant(8 * stride);
2971 } else {
2972 shiftOpcode = wasm::kExprI64ShrU;
2973 shiftConst = jsgraph()->Int64Constant(8 * stride);
2974 extendTo64Bit = true;
2975 }
2976
2977 newValue = val;
2978 if (isFloat) {
2979 switch (memtype.representation()) {
2980 case MachineRepresentation::kFloat64:
2981 newValue = Unop(wasm::kExprI64ReinterpretF64, val);
2982 break;
2983 case MachineRepresentation::kFloat32:
2984 newValue = Unop(wasm::kExprI32ReinterpretF32, val);
2985 break;
2986 default:
2987 UNREACHABLE();
2988 }
2989 }
2990
2991 Node* baseOffset = MemBuffer(offset);
2992
2993 for (int i = 0; i < numberOfBytes - stride; i += stride) {
2994 store = graph()->NewNode(
2995 jsgraph()->machine()->Store(rep),
2996 GetUnalignedStoreOffsetNode(baseOffset, numberOfBytes, stride, i),
2997 index,
2998 extendTo64Bit ? Unop(wasm::kExprI32ConvertI64, newValue) : newValue,
2999 *effect_, *control_);
3000 newValue = Binop(shiftOpcode, newValue, shiftConst);
3001 *effect_ = store;
3002 }
3003 store = graph()->NewNode(
3004 jsgraph()->machine()->Store(rep),
3005 GetUnalignedStoreOffsetNode(baseOffset, numberOfBytes, stride,
3006 numberOfBytes - stride),
3007 index,
3008 extendTo64Bit ? Unop(wasm::kExprI32ConvertI64, newValue) : newValue,
3009 *effect_, *control_);
3010 *effect_ = store;
3011 return val;
3012 }
3013 2803
3014 Node* WasmGraphBuilder::StoreMem(MachineType memtype, Node* index, 2804 Node* WasmGraphBuilder::StoreMem(MachineType memtype, Node* index,
3015 uint32_t offset, uint32_t alignment, Node* val, 2805 uint32_t offset, uint32_t alignment, Node* val,
3016 wasm::WasmCodePosition position) { 2806 wasm::WasmCodePosition position) {
3017 Node* store; 2807 Node* store;
3018 2808
3019 // WASM semantics throw on OOB. Introduce explicit bounds check. 2809 // WASM semantics throw on OOB. Introduce explicit bounds check.
3020 BoundsCheckMem(memtype, index, offset, position); 2810 BoundsCheckMem(memtype, index, offset, position);
3021 StoreRepresentation rep(memtype.representation(), kNoWriteBarrier); 2811 StoreRepresentation rep(memtype.representation(), kNoWriteBarrier);
3022 bool aligned = static_cast<int>(alignment) >= 2812 bool aligned = static_cast<int>(alignment) >=
3023 ElementSizeLog2Of(memtype.representation()); 2813 ElementSizeLog2Of(memtype.representation());
3024 2814
3025 #if defined(V8_TARGET_BIG_ENDIAN) 2815 #if defined(V8_TARGET_BIG_ENDIAN)
3026 // TODO(john.yan) Implement byte swap turbofan operator 2816 // TODO(john.yan) Implement byte swap turbofan operator
3027 // and use it if available for better performance 2817 // and use it if available for better performance
3028 val = BuildChangeEndianness(val, memtype); 2818 val = BuildChangeEndianness(val, memtype);
3029 #endif 2819 #endif
3030 2820
3031 if (aligned || 2821 if (aligned ||
3032 jsgraph()->machine()->UnalignedStoreSupported(memtype, alignment)) { 2822 jsgraph()->machine()->UnalignedStoreSupported(memtype, alignment)) {
3033 StoreRepresentation rep(memtype.representation(), kNoWriteBarrier); 2823 StoreRepresentation rep(memtype.representation(), kNoWriteBarrier);
3034 store = 2824 store =
3035 graph()->NewNode(jsgraph()->machine()->Store(rep), MemBuffer(offset), 2825 graph()->NewNode(jsgraph()->machine()->Store(rep), MemBuffer(offset),
3036 index, val, *effect_, *control_); 2826 index, val, *effect_, *control_);
3037 *effect_ = store;
3038 } else { 2827 } else {
3039 store = BuildUnalignedStore(memtype, index, offset, alignment, val); 2828 UnalignedStoreRepresentation rep(memtype.representation());
2829 store =
2830 graph()->NewNode(jsgraph()->machine()->UnalignedStore(rep),
2831 MemBuffer(offset), index, val, *effect_, *control_);
3040 } 2832 }
3041 2833
2834 *effect_ = store;
2835
3042 return store; 2836 return store;
3043 } 2837 }
3044 2838
3045 Node* WasmGraphBuilder::BuildAsmjsLoadMem(MachineType type, Node* index) { 2839 Node* WasmGraphBuilder::BuildAsmjsLoadMem(MachineType type, Node* index) {
3046 // TODO(turbofan): fold bounds checks for constant asm.js loads. 2840 // TODO(turbofan): fold bounds checks for constant asm.js loads.
3047 // asm.js semantics use CheckedLoad (i.e. OOB reads return 0ish). 2841 // asm.js semantics use CheckedLoad (i.e. OOB reads return 0ish).
3048 const Operator* op = jsgraph()->machine()->CheckedLoad(type); 2842 const Operator* op = jsgraph()->machine()->CheckedLoad(type);
3049 Node* load = graph()->NewNode(op, MemBuffer(0), index, MemSize(0), *effect_, 2843 Node* load = graph()->NewNode(op, MemBuffer(0), index, MemSize(0), *effect_,
3050 *control_); 2844 *control_);
3051 *effect_ = load; 2845 *effect_ = load;
(...skipping 412 matching lines...) Expand 10 before | Expand all | Expand 10 after
3464 function_->code_start_offset), 3258 function_->code_start_offset),
3465 compile_ms); 3259 compile_ms);
3466 } 3260 }
3467 3261
3468 return code; 3262 return code;
3469 } 3263 }
3470 3264
3471 } // namespace compiler 3265 } // namespace compiler
3472 } // namespace internal 3266 } // namespace internal
3473 } // namespace v8 3267 } // namespace v8
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698