| OLD | NEW |
| 1 ; RUN: pnacl-abicheck < %s | FileCheck %s | 1 ; RUN: pnacl-abicheck < %s | FileCheck %s |
| 2 | 2 |
| 3 ; Test the "align" attributes that are allowed on load and store | 3 ; Test the "align" attributes that are allowed on load and store |
| 4 ; instructions. Note that "cmpxchg" and "atomicrmw" do not take | 4 ; instructions. |
| 5 ; "align" attributes, so are not tested here. | |
| 6 | 5 |
| 7 | 6 |
| 8 declare void @llvm.memcpy.p0i8.p0i8.i32(i8*, i8*, i32, i32, i1) | 7 declare void @llvm.memcpy.p0i8.p0i8.i32(i8*, i8*, i32, i32, i1) |
| 9 declare void @llvm.memmove.p0i8.p0i8.i32(i8*, i8*, i32, i32, i1) | 8 declare void @llvm.memmove.p0i8.p0i8.i32(i8*, i8*, i32, i32, i1) |
| 10 declare void @llvm.memset.p0i8.i32(i8*, i8, i32, i32, i1) | 9 declare void @llvm.memset.p0i8.i32(i8*, i8, i32, i32, i1) |
| 11 | 10 |
| 12 | 11 |
| 13 define internal void @allowed_cases(i32 %ptr, float %f, double %d) { | 12 define internal void @allowed_cases(i32 %ptr, float %f, double %d) { |
| 14 %ptr.i32 = inttoptr i32 %ptr to i32* | 13 %ptr.i32 = inttoptr i32 %ptr to i32* |
| 15 load i32* %ptr.i32, align 1 | 14 load i32* %ptr.i32, align 1 |
| 16 store i32 123, i32* %ptr.i32, align 1 | 15 store i32 123, i32* %ptr.i32, align 1 |
| 17 | 16 |
| 18 %ptr.float = inttoptr i32 %ptr to float* | 17 %ptr.float = inttoptr i32 %ptr to float* |
| 19 load float* %ptr.float, align 1 | 18 load float* %ptr.float, align 1 |
| 20 load float* %ptr.float, align 4 | 19 load float* %ptr.float, align 4 |
| 21 store float %f, float* %ptr.float, align 1 | 20 store float %f, float* %ptr.float, align 1 |
| 22 store float %f, float* %ptr.float, align 4 | 21 store float %f, float* %ptr.float, align 4 |
| 23 | 22 |
| 24 %ptr.double = inttoptr i32 %ptr to double* | 23 %ptr.double = inttoptr i32 %ptr to double* |
| 25 load double* %ptr.double, align 1 | 24 load double* %ptr.double, align 1 |
| 26 load double* %ptr.double, align 8 | 25 load double* %ptr.double, align 8 |
| 27 store double %d, double* %ptr.double, align 1 | 26 store double %d, double* %ptr.double, align 1 |
| 28 store double %d, double* %ptr.double, align 8 | 27 store double %d, double* %ptr.double, align 8 |
| 29 | 28 |
| 30 ; Stricter alignments are required for atomics. | |
| 31 load atomic i32* %ptr.i32 seq_cst, align 4 | |
| 32 store atomic i32 123, i32* %ptr.i32 seq_cst, align 4 | |
| 33 load atomic float* %ptr.float seq_cst, align 4 | |
| 34 store atomic float %f, float* %ptr.float seq_cst, align 4 | |
| 35 load atomic double* %ptr.double seq_cst, align 8 | |
| 36 store atomic double %d, double* %ptr.double seq_cst, align 8 | |
| 37 | |
| 38 ; memcpy() et el take an alignment parameter, which is allowed to be 1. | 29 ; memcpy() et el take an alignment parameter, which is allowed to be 1. |
| 39 %ptr.p = inttoptr i32 %ptr to i8* | 30 %ptr.p = inttoptr i32 %ptr to i8* |
| 40 call void @llvm.memcpy.p0i8.p0i8.i32(i8* %ptr.p, i8* %ptr.p, | 31 call void @llvm.memcpy.p0i8.p0i8.i32(i8* %ptr.p, i8* %ptr.p, |
| 41 i32 10, i32 1, i1 false) | 32 i32 10, i32 1, i1 false) |
| 42 call void @llvm.memmove.p0i8.p0i8.i32(i8* %ptr.p, i8* %ptr.p, | 33 call void @llvm.memmove.p0i8.p0i8.i32(i8* %ptr.p, i8* %ptr.p, |
| 43 i32 10, i32 1, i1 false) | 34 i32 10, i32 1, i1 false) |
| 44 call void @llvm.memset.p0i8.i32(i8* %ptr.p, i8 99, | 35 call void @llvm.memset.p0i8.i32(i8* %ptr.p, i8 99, |
| 45 i32 10, i32 1, i1 false) | 36 i32 10, i32 1, i1 false) |
| 46 | 37 |
| 47 ret void | 38 ret void |
| (...skipping 22 matching lines...) Expand all Loading... |
| 70 %ptr.double = inttoptr i32 %ptr to double* | 61 %ptr.double = inttoptr i32 %ptr to double* |
| 71 load double* %ptr.double, align 2 | 62 load double* %ptr.double, align 2 |
| 72 load double* %ptr.double, align 4 | 63 load double* %ptr.double, align 4 |
| 73 store double %d, double* %ptr.double, align 2 | 64 store double %d, double* %ptr.double, align 2 |
| 74 store double %d, double* %ptr.double, align 4 | 65 store double %d, double* %ptr.double, align 4 |
| 75 ; CHECK-NEXT: disallowed: bad alignment: {{.*}} load double{{.*}} align 2 | 66 ; CHECK-NEXT: disallowed: bad alignment: {{.*}} load double{{.*}} align 2 |
| 76 ; CHECK-NEXT: disallowed: bad alignment: {{.*}} load double{{.*}} align 4 | 67 ; CHECK-NEXT: disallowed: bad alignment: {{.*}} load double{{.*}} align 4 |
| 77 ; CHECK-NEXT: disallowed: bad alignment: store double{{.*}} align 2 | 68 ; CHECK-NEXT: disallowed: bad alignment: store double{{.*}} align 2 |
| 78 ; CHECK-NEXT: disallowed: bad alignment: store double{{.*}} align 4 | 69 ; CHECK-NEXT: disallowed: bad alignment: store double{{.*}} align 4 |
| 79 | 70 |
| 80 ; Too-small alignments for atomics are rejected. | |
| 81 load atomic i32* %ptr.i32 seq_cst, align 2 | |
| 82 load atomic float* %ptr.float seq_cst, align 2 | |
| 83 load atomic double* %ptr.double seq_cst, align 4 | |
| 84 ; CHECK-NEXT: disallowed: bad alignment: {{.*}} load atomic i32{{.*}} align 2 | |
| 85 ; CHECK-NEXT: disallowed: bad alignment: {{.*}} load atomic float{{.*}} align 2 | |
| 86 ; CHECK-NEXT: disallowed: bad alignment: {{.*}} load atomic double{{.*}} align 4 | |
| 87 | |
| 88 ; Too-large alignments for atomics are also rejected. | |
| 89 load atomic i32* %ptr.i32 seq_cst, align 8 | |
| 90 load atomic float* %ptr.float seq_cst, align 8 | |
| 91 load atomic double* %ptr.double seq_cst, align 16 | |
| 92 ; CHECK-NEXT: disallowed: bad alignment: {{.*}} load atomic i32{{.*}} align 8 | |
| 93 ; CHECK-NEXT: disallowed: bad alignment: {{.*}} load atomic float{{.*}} align 8 | |
| 94 ; CHECK-NEXT: disallowed: bad alignment: {{.*}} load atomic double{{.*}} align 1
6 | |
| 95 | |
| 96 ; Non-pessimistic alignments for memcpy() et al are rejected. | 71 ; Non-pessimistic alignments for memcpy() et al are rejected. |
| 97 %ptr.p = inttoptr i32 %ptr to i8* | 72 %ptr.p = inttoptr i32 %ptr to i8* |
| 98 call void @llvm.memcpy.p0i8.p0i8.i32(i8* %ptr.p, i8* %ptr.p, | 73 call void @llvm.memcpy.p0i8.p0i8.i32(i8* %ptr.p, i8* %ptr.p, |
| 99 i32 10, i32 4, i1 false) | 74 i32 10, i32 4, i1 false) |
| 100 call void @llvm.memmove.p0i8.p0i8.i32(i8* %ptr.p, i8* %ptr.p, | 75 call void @llvm.memmove.p0i8.p0i8.i32(i8* %ptr.p, i8* %ptr.p, |
| 101 i32 10, i32 4, i1 false) | 76 i32 10, i32 4, i1 false) |
| 102 call void @llvm.memset.p0i8.i32(i8* %ptr.p, i8 99, | 77 call void @llvm.memset.p0i8.i32(i8* %ptr.p, i8 99, |
| 103 i32 10, i32 4, i1 false) | 78 i32 10, i32 4, i1 false) |
| 104 ; CHECK-NEXT: bad alignment: call void @llvm.memcpy | 79 ; CHECK-NEXT: bad alignment: call void @llvm.memcpy |
| 105 ; CHECK-NEXT: bad alignment: call void @llvm.memmove | 80 ; CHECK-NEXT: bad alignment: call void @llvm.memmove |
| 106 ; CHECK-NEXT: bad alignment: call void @llvm.memset | 81 ; CHECK-NEXT: bad alignment: call void @llvm.memset |
| 107 | 82 |
| 108 ; Check that the verifier does not crash if the alignment argument | 83 ; Check that the verifier does not crash if the alignment argument |
| 109 ; is not a constant. | 84 ; is not a constant. |
| 110 call void @llvm.memcpy.p0i8.p0i8.i32(i8* %ptr.p, i8* %ptr.p, | 85 call void @llvm.memcpy.p0i8.p0i8.i32(i8* %ptr.p, i8* %ptr.p, |
| 111 i32 10, i32 %align, i1 false) | 86 i32 10, i32 %align, i1 false) |
| 112 call void @llvm.memmove.p0i8.p0i8.i32(i8* %ptr.p, i8* %ptr.p, | 87 call void @llvm.memmove.p0i8.p0i8.i32(i8* %ptr.p, i8* %ptr.p, |
| 113 i32 10, i32 %align, i1 false) | 88 i32 10, i32 %align, i1 false) |
| 114 call void @llvm.memset.p0i8.i32(i8* %ptr.p, i8 99, | 89 call void @llvm.memset.p0i8.i32(i8* %ptr.p, i8 99, |
| 115 i32 10, i32 %align, i1 false) | 90 i32 10, i32 %align, i1 false) |
| 116 ; CHECK-NEXT: bad alignment: call void @llvm.memcpy | 91 ; CHECK-NEXT: bad alignment: call void @llvm.memcpy |
| 117 ; CHECK-NEXT: bad alignment: call void @llvm.memmove | 92 ; CHECK-NEXT: bad alignment: call void @llvm.memmove |
| 118 ; CHECK-NEXT: bad alignment: call void @llvm.memset | 93 ; CHECK-NEXT: bad alignment: call void @llvm.memset |
| 119 | 94 |
| 120 ret void | 95 ret void |
| 121 } | 96 } |
| 122 ; CHECK-NOT: disallowed | 97 ; CHECK-NOT: disallowed |
| OLD | NEW |