OLD | NEW |
1 ; Test for a call to __asan_check() preceding stores | 1 ; Test for a call to __asan_check() preceding stores |
2 | 2 |
3 ; REQUIRES: allow_dump | 3 ; REQUIRES: allow_dump |
4 | 4 |
5 ; RUN: %p2i -i %s --args -verbose=inst -threads=0 -fsanitize-address \ | 5 ; RUN: %p2i -i %s --args -verbose=inst -threads=0 -fsanitize-address \ |
6 ; RUN: | FileCheck --check-prefix=DUMP %s | 6 ; RUN: | FileCheck --check-prefix=DUMP %s |
7 | 7 |
8 ; A global to store data to | 8 ; A global to store data to |
9 @destGlobal8 = internal global [1 x i8] zeroinitializer | 9 @destGlobal8 = internal global [1 x i8] zeroinitializer |
10 @destGlobal16 = internal global [2 x i8] zeroinitializer | 10 @destGlobal16 = internal global [2 x i8] zeroinitializer |
11 @destGlobal32 = internal global [4 x i8] zeroinitializer | 11 @destGlobal32 = internal global [4 x i8] zeroinitializer |
12 @destGlobal64 = internal global [8 x i8] zeroinitializer | 12 @destGlobal64 = internal global [8 x i8] zeroinitializer |
13 @destGlobal128 = internal global [16 x i8] zeroinitializer | 13 @destGlobal128 = internal global [16 x i8] zeroinitializer |
14 | 14 |
15 ; A function with a local variable that does the stores | 15 ; A function with a local variable that does the stores |
16 define internal void @doStores(<4 x i32> %vecSrc) { | 16 define internal void @doStores(<4 x i32> %vecSrc, i32 %arg8, i32 %arg16, |
17 %destLocal8 = alloca i8, i32 1, align 4 | 17 i32 %arg32, i32 %arg64, i32 %arg128) { |
18 %destLocal16 = alloca i8, i32 2, align 4 | 18 %destLocal8 = inttoptr i32 %arg8 to i8* |
19 %destLocal32 = alloca i8, i32 4, align 4 | 19 %destLocal16 = inttoptr i32 %arg16 to i16* |
20 %destLocal64 = alloca i8, i32 8, align 4 | 20 %destLocal32 = inttoptr i32 %arg32 to i32* |
21 %destLocal128 = alloca i8, i32 16, align 4 | 21 %destLocal64 = inttoptr i32 %arg64 to i64* |
| 22 %destLocal128 = inttoptr i32 %arg128 to <4 x i32>* |
22 | 23 |
23 %ptrGlobal8 = bitcast [1 x i8]* @destGlobal8 to i8* | 24 %ptrGlobal8 = bitcast [1 x i8]* @destGlobal8 to i8* |
24 %ptrGlobal16 = bitcast [2 x i8]* @destGlobal16 to i16* | 25 %ptrGlobal16 = bitcast [2 x i8]* @destGlobal16 to i16* |
25 %ptrGlobal32 = bitcast [4 x i8]* @destGlobal32 to i32* | 26 %ptrGlobal32 = bitcast [4 x i8]* @destGlobal32 to i32* |
26 %ptrGlobal64 = bitcast [8 x i8]* @destGlobal64 to i64* | 27 %ptrGlobal64 = bitcast [8 x i8]* @destGlobal64 to i64* |
27 %ptrGlobal128 = bitcast [16 x i8]* @destGlobal128 to <4 x i32>* | 28 %ptrGlobal128 = bitcast [16 x i8]* @destGlobal128 to <4 x i32>* |
28 | 29 |
29 %ptrLocal8 = bitcast i8* %destLocal8 to i8* | |
30 %ptrLocal16 = bitcast i8* %destLocal16 to i16* | |
31 %ptrLocal32 = bitcast i8* %destLocal32 to i32* | |
32 %ptrLocal64 = bitcast i8* %destLocal64 to i64* | |
33 %ptrLocal128 = bitcast i8* %destLocal128 to <4 x i32>* | |
34 | |
35 store i8 42, i8* %ptrGlobal8, align 1 | 30 store i8 42, i8* %ptrGlobal8, align 1 |
36 store i16 42, i16* %ptrGlobal16, align 1 | 31 store i16 42, i16* %ptrGlobal16, align 1 |
37 store i32 42, i32* %ptrGlobal32, align 1 | 32 store i32 42, i32* %ptrGlobal32, align 1 |
38 store i64 42, i64* %ptrGlobal64, align 1 | 33 store i64 42, i64* %ptrGlobal64, align 1 |
39 store <4 x i32> %vecSrc, <4 x i32>* %ptrGlobal128, align 4 | 34 store <4 x i32> %vecSrc, <4 x i32>* %ptrGlobal128, align 4 |
40 | 35 |
41 store i8 42, i8* %ptrLocal8, align 1 | 36 store i8 42, i8* %destLocal8, align 1 |
42 store i16 42, i16* %ptrLocal16, align 1 | 37 store i16 42, i16* %destLocal16, align 1 |
43 store i32 42, i32* %ptrLocal32, align 1 | 38 store i32 42, i32* %destLocal32, align 1 |
44 store i64 42, i64* %ptrLocal64, align 1 | 39 store i64 42, i64* %destLocal64, align 1 |
45 store <4 x i32> %vecSrc, <4 x i32>* %ptrLocal128, align 4 | 40 store <4 x i32> %vecSrc, <4 x i32>* %destLocal128, align 4 |
46 | 41 |
47 ret void | 42 ret void |
48 } | 43 } |
49 | 44 |
50 ; DUMP-LABEL: ================ Instrumented CFG ================ | 45 ; DUMP-LABEL: ================ Instrumented CFG ================ |
51 ; DUMP-NEXT: define internal void @doStores(<4 x i32> %vecSrc) { | 46 ; DUMP-NEXT: define internal void @doStores( |
52 ; DUMP-NEXT: __0: | 47 ; DUMP-NEXT: __0: |
53 ; DUMP: call void @__asan_check_store(i32 @destGlobal8, i32 1) | 48 ; DUMP-NEXT: call void @__asan_check_store(i32 @destGlobal8, i32 1) |
54 ; DUMP-NEXT: store i8 42, i8* @destGlobal8, align 1 | 49 ; DUMP-NEXT: store i8 42, i8* @destGlobal8, align 1 |
55 ; DUMP-NEXT: call void @__asan_check_store(i32 @destGlobal16, i32 2) | 50 ; DUMP-NEXT: call void @__asan_check_store(i32 @destGlobal16, i32 2) |
56 ; DUMP-NEXT: store i16 42, i16* @destGlobal16, align 1 | 51 ; DUMP-NEXT: store i16 42, i16* @destGlobal16, align 1 |
57 ; DUMP-NEXT: call void @__asan_check_store(i32 @destGlobal32, i32 4) | 52 ; DUMP-NEXT: call void @__asan_check_store(i32 @destGlobal32, i32 4) |
58 ; DUMP-NEXT: store i32 42, i32* @destGlobal32, align 1 | 53 ; DUMP-NEXT: store i32 42, i32* @destGlobal32, align 1 |
59 ; DUMP-NEXT: call void @__asan_check_store(i32 @destGlobal64, i32 8) | 54 ; DUMP-NEXT: call void @__asan_check_store(i32 @destGlobal64, i32 8) |
60 ; DUMP-NEXT: store i64 42, i64* @destGlobal64, align 1 | 55 ; DUMP-NEXT: store i64 42, i64* @destGlobal64, align 1 |
61 ; DUMP-NEXT: call void @__asan_check_store(i32 @destGlobal128, i32 16) | 56 ; DUMP-NEXT: call void @__asan_check_store(i32 @destGlobal128, i32 16) |
62 ; DUMP-NEXT: store <4 x i32> %vecSrc, <4 x i32>* @destGlobal128, align 4 | 57 ; DUMP-NEXT: store <4 x i32> %vecSrc, <4 x i32>* @destGlobal128, align 4 |
63 ; DUMP-NEXT: call void @__asan_check_store(i32 %destLocal8, i32 1) | 58 ; DUMP-NEXT: call void @__asan_check_store(i32 %arg8, i32 1) |
64 ; DUMP-NEXT: store i8 42, i8* %destLocal8, align 1 | 59 ; DUMP-NEXT: store i8 42, i8* %arg8, align 1 |
65 ; DUMP-NEXT: call void @__asan_check_store(i32 %destLocal16, i32 2) | 60 ; DUMP-NEXT: call void @__asan_check_store(i32 %arg16, i32 2) |
66 ; DUMP-NEXT: store i16 42, i16* %destLocal16, align 1 | 61 ; DUMP-NEXT: store i16 42, i16* %arg16, align 1 |
67 ; DUMP-NEXT: call void @__asan_check_store(i32 %destLocal32, i32 4) | 62 ; DUMP-NEXT: call void @__asan_check_store(i32 %arg32, i32 4) |
68 ; DUMP-NEXT: store i32 42, i32* %destLocal32, align 1 | 63 ; DUMP-NEXT: store i32 42, i32* %arg32, align 1 |
69 ; DUMP-NEXT: call void @__asan_check_store(i32 %destLocal64, i32 8) | 64 ; DUMP-NEXT: call void @__asan_check_store(i32 %arg64, i32 8) |
70 ; DUMP-NEXT: store i64 42, i64* %destLocal64, align 1 | 65 ; DUMP-NEXT: store i64 42, i64* %arg64, align 1 |
71 ; DUMP-NEXT: call void @__asan_check_store(i32 %destLocal128, i32 16) | 66 ; DUMP-NEXT: call void @__asan_check_store(i32 %arg128, i32 16) |
72 ; DUMP-NEXT: store <4 x i32> %vecSrc, <4 x i32>* %destLocal128, align 4 | 67 ; DUMP-NEXT: store <4 x i32> %vecSrc, <4 x i32>* %arg128, align 4 |
73 ; DUMP: ret void | 68 ; DUMP: ret void |
74 ; DUMP-NEXT: } | 69 ; DUMP-NEXT: } |
OLD | NEW |