OLD | NEW |
(Empty) | |
| 1 ; RUN: opt -nacl-rewrite-atomics -S < %s | FileCheck %s |
| 2 ; |
| 3 ; Validate that volatile loads/stores get rewritten into NaCl atomic builtins. |
| 4 ; The memory ordering for volatile loads/stores could technically be constrained |
| 5 ; to sequential consistency (enum value 6), or left as relaxed. |
| 6 |
| 7 target datalayout = "p:32:32:32" |
| 8 |
| 9 ; CHECK-LABEL: @test_volatile_load_i8 |
| 10 define zeroext i8 @test_volatile_load_i8(i8* %ptr) { |
| 11 ; CHECK-NEXT: %res = call i8 @llvm.nacl.atomic.load.i8(i8* %ptr, i32 6) |
| 12 %res = load volatile i8* %ptr, align 1 |
| 13 ret i8 %res ; CHECK-NEXT: ret i8 %res |
| 14 } |
| 15 |
| 16 ; CHECK-LABEL: @test_volatile_store_i8 |
| 17 define void @test_volatile_store_i8(i8* %ptr, i8 zeroext %value) { |
| 18 ; CHECK-NEXT: call void @llvm.nacl.atomic.store.i8(i8 %value, i8* %ptr, i32 6) |
| 19 store volatile i8 %value, i8* %ptr, align 1 |
| 20 ret void ; CHECK-NEXT: ret void |
| 21 } |
| 22 |
| 23 ; CHECK-LABEL: @test_volatile_load_i16 |
| 24 define zeroext i16 @test_volatile_load_i16(i16* %ptr) { |
| 25 ; CHECK-NEXT: %res = call i16 @llvm.nacl.atomic.load.i16(i16* %ptr, i32 6) |
| 26 %res = load volatile i16* %ptr, align 2 |
| 27 ret i16 %res ; CHECK-NEXT: ret i16 %res |
| 28 } |
| 29 |
| 30 ; CHECK-LABEL: @test_volatile_store_i16 |
| 31 define void @test_volatile_store_i16(i16* %ptr, i16 zeroext %value) { |
| 32 ; CHECK-NEXT: call void @llvm.nacl.atomic.store.i16(i16 %value, i16* %ptr, i32
6) |
| 33 store volatile i16 %value, i16* %ptr, align 2 |
| 34 ret void ; CHECK-NEXT: ret void |
| 35 } |
| 36 |
| 37 ; CHECK-LABEL: @test_volatile_load_i32 |
| 38 define i32 @test_volatile_load_i32(i32* %ptr) { |
| 39 ; CHECK-NEXT: %res = call i32 @llvm.nacl.atomic.load.i32(i32* %ptr, i32 6) |
| 40 %res = load volatile i32* %ptr, align 4 |
| 41 ret i32 %res ; CHECK-NEXT: ret i32 %res |
| 42 } |
| 43 |
| 44 ; CHECK-LABEL: @test_volatile_store_i32 |
| 45 define void @test_volatile_store_i32(i32* %ptr, i32 %value) { |
| 46 ; CHECK-NEXT: call void @llvm.nacl.atomic.store.i32(i32 %value, i32* %ptr, i32
6) |
| 47 store volatile i32 %value, i32* %ptr, align 4 |
| 48 ret void ; CHECK-NEXT: ret void |
| 49 } |
| 50 |
| 51 ; CHECK-LABEL: @test_volatile_load_i64 |
| 52 define i64 @test_volatile_load_i64(i64* %ptr) { |
| 53 ; CHECK-NEXT: %res = call i64 @llvm.nacl.atomic.load.i64(i64* %ptr, i32 6) |
| 54 %res = load volatile i64* %ptr, align 8 |
| 55 ret i64 %res ; CHECK-NEXT: ret i64 %res |
| 56 } |
| 57 |
| 58 ; CHECK-LABEL: @test_volatile_store_i64 |
| 59 define void @test_volatile_store_i64(i64* %ptr, i64 %value) { |
| 60 ; CHECK-NEXT: call void @llvm.nacl.atomic.store.i64(i64 %value, i64* %ptr, i32
6) |
| 61 store volatile i64 %value, i64* %ptr, align 8 |
| 62 ret void ; CHECK-NEXT: ret void |
| 63 } |
| 64 |
| 65 ; CHECK-LABEL: @test_volatile_load_float |
| 66 define float @test_volatile_load_float(float* %ptr) { |
| 67 ; CHECK-NEXT: %ptr.cast = bitcast float* %ptr to i32* |
| 68 ; CHECK-NEXT: %res = call i32 @llvm.nacl.atomic.load.i32(i32* %ptr.cast, i32 6
) |
| 69 ; CHECK-NEXT: %res.cast = bitcast i32 %res to float |
| 70 %res = load volatile float* %ptr, align 4 |
| 71 ret float %res ; CHECK-NEXT: ret float %res.cast |
| 72 } |
| 73 |
| 74 ; CHECK-LABEL: @test_volatile_store_float |
| 75 define void @test_volatile_store_float(float* %ptr, float %value) { |
| 76 ; CHECK-NEXT: %ptr.cast = bitcast float* %ptr to i32* |
| 77 ; CHECK-NEXT: %value.cast = bitcast float %value to i32 |
| 78 ; CHECK-NEXT: call void @llvm.nacl.atomic.store.i32(i32 %value.cast, i32* %ptr
.cast, i32 6) |
| 79 store volatile float %value, float* %ptr, align 4 |
| 80 ret void ; CHECK-NEXT: ret void |
| 81 } |
| 82 |
| 83 ; CHECK-LABEL: @test_volatile_load_double |
| 84 define double @test_volatile_load_double(double* %ptr) { |
| 85 ; CHECK-NEXT: %ptr.cast = bitcast double* %ptr to i64* |
| 86 ; CHECK-NEXT: %res = call i64 @llvm.nacl.atomic.load.i64(i64* %ptr.cast, i32 6
) |
| 87 ; CHECK-NEXT: %res.cast = bitcast i64 %res to double |
| 88 %res = load volatile double* %ptr, align 8 |
| 89 ret double %res ; CHECK-NEXT: ret double %res.cast |
| 90 } |
| 91 |
| 92 ; CHECK-LABEL: @test_volatile_store_double |
| 93 define void @test_volatile_store_double(double* %ptr, double %value) { |
| 94 ; CHECK-NEXT: %ptr.cast = bitcast double* %ptr to i64* |
| 95 ; CHECK-NEXT: %value.cast = bitcast double %value to i64 |
| 96 ; CHECK-NEXT: call void @llvm.nacl.atomic.store.i64(i64 %value.cast, i64* %ptr
.cast, i32 6) |
| 97 store volatile double %value, double* %ptr, align 8 |
| 98 ret void ; CHECK-NEXT: ret void |
| 99 } |
| 100 |
| 101 ; CHECK-LABEL: @test_volatile_load_i32_pointer |
| 102 define i32* @test_volatile_load_i32_pointer(i32** %ptr) { |
| 103 ; CHECK-NEXT: %ptr.cast = bitcast i32** %ptr to i32* |
| 104 ; CHECK-NEXT: %res = call i32 @llvm.nacl.atomic.load.i32(i32* %ptr.cast, i32 6
) |
| 105 ; CHECK-NEXT: %res.cast = inttoptr i32 %res to i32* |
| 106 %res = load volatile i32** %ptr, align 4 |
| 107 ret i32* %res ; CHECK-NEXT: ret i32* %res.cast |
| 108 } |
| 109 |
| 110 ; CHECK-LABEL: @test_volatile_store_i32_pointer |
| 111 define void @test_volatile_store_i32_pointer(i32** %ptr, i32* %value) { |
| 112 ; CHECK-NEXT: %ptr.cast = bitcast i32** %ptr to i32* |
| 113 ; CHECK-NEXT: %value.cast = ptrtoint i32* %value to i32 |
| 114 ; CHECK-NEXT: call void @llvm.nacl.atomic.store.i32(i32 %value.cast, i32* %ptr
.cast, i32 6) |
| 115 store volatile i32* %value, i32** %ptr, align 4 |
| 116 ret void ; CHECK-NEXT: ret void |
| 117 } |
| 118 |
| 119 ; CHECK-LABEL: @test_volatile_load_double_pointer |
| 120 define double* @test_volatile_load_double_pointer(double** %ptr) { |
| 121 ; CHECK-NEXT: %ptr.cast = bitcast double** %ptr to i32* |
| 122 ; CHECK-NEXT: %res = call i32 @llvm.nacl.atomic.load.i32(i32* %ptr.cast, i32 6
) |
| 123 ; CHECK-NEXT: %res.cast = inttoptr i32 %res to double* |
| 124 %res = load volatile double** %ptr, align 4 |
| 125 ret double* %res ; CHECK-NEXT: ret double* %res.cast |
| 126 } |
| 127 |
| 128 ; CHECK-LABEL: @test_volatile_store_double_pointer |
| 129 define void @test_volatile_store_double_pointer(double** %ptr, double* %value) { |
| 130 ; CHECK-NEXT: %ptr.cast = bitcast double** %ptr to i32* |
| 131 ; CHECK-NEXT: %value.cast = ptrtoint double* %value to i32 |
| 132 ; CHECK-NEXT: call void @llvm.nacl.atomic.store.i32(i32 %value.cast, i32* %ptr
.cast, i32 6) |
| 133 store volatile double* %value, double** %ptr, align 4 |
| 134 ret void ; CHECK-NEXT: ret void |
| 135 } |
| 136 |
| 137 ; CHECK-LABEL: @test_volatile_load_v4i8 |
| 138 define <4 x i8> @test_volatile_load_v4i8(<4 x i8>* %ptr) { |
| 139 ; CHECK-NEXT: %ptr.cast = bitcast <4 x i8>* %ptr to i32* |
| 140 ; CHECK-NEXT: %res = call i32 @llvm.nacl.atomic.load.i32(i32* %ptr.cast, i32 6
) |
| 141 ; CHECK-NEXT: %res.cast = bitcast i32 %res to <4 x i8> |
| 142 %res = load volatile <4 x i8>* %ptr, align 8 |
| 143 ret <4 x i8> %res ; CHECK-NEXT: ret <4 x i8> %res.cast |
| 144 } |
| 145 |
| 146 ; CHECK-LABEL: @test_volatile_store_v4i8 |
| 147 define void @test_volatile_store_v4i8(<4 x i8>* %ptr, <4 x i8> %value) { |
| 148 ; CHECK-NEXT: %ptr.cast = bitcast <4 x i8>* %ptr to i32* |
| 149 ; CHECK-NEXT: %value.cast = bitcast <4 x i8> %value to i32 |
| 150 ; CHECK-NEXT: call void @llvm.nacl.atomic.store.i32(i32 %value.cast, i32* %ptr
.cast, i32 6) |
| 151 store volatile <4 x i8> %value, <4 x i8>* %ptr, align 8 |
| 152 ret void ; CHECK-NEXT: ret void |
| 153 } |
| 154 |
| 155 ; CHECK-LABEL: @test_volatile_load_v4i16 |
| 156 define <4 x i16> @test_volatile_load_v4i16(<4 x i16>* %ptr) { |
| 157 ; CHECK-NEXT: %ptr.cast = bitcast <4 x i16>* %ptr to i64* |
| 158 ; CHECK-NEXT: %res = call i64 @llvm.nacl.atomic.load.i64(i64* %ptr.cast, i32 6
) |
| 159 ; CHECK-NEXT: %res.cast = bitcast i64 %res to <4 x i16> |
| 160 %res = load volatile <4 x i16>* %ptr, align 8 |
| 161 ret <4 x i16> %res ; CHECK-NEXT: ret <4 x i16> %res.cast |
| 162 } |
| 163 |
| 164 ; CHECK-LABEL: @test_volatile_store_v4i16 |
| 165 define void @test_volatile_store_v4i16(<4 x i16>* %ptr, <4 x i16> %value) { |
| 166 ; CHECK-NEXT: %ptr.cast = bitcast <4 x i16>* %ptr to i64* |
| 167 ; CHECK-NEXT: %value.cast = bitcast <4 x i16> %value to i64 |
| 168 ; CHECK-NEXT: call void @llvm.nacl.atomic.store.i64(i64 %value.cast, i64* %ptr
.cast, i32 6) |
| 169 store volatile <4 x i16> %value, <4 x i16>* %ptr, align 8 |
| 170 ret void ; CHECK-NEXT: ret void |
| 171 } |
OLD | NEW |