OLD | NEW |
1 ; Test for a call to __asan_check() preceding loads | 1 ; Test for a call to __asan_check() preceding loads |
2 | 2 |
3 ; REQUIRES: allow_dump | 3 ; REQUIRES: allow_dump |
4 | 4 |
5 ; RUN: %p2i -i %s --args -verbose=inst -threads=0 -fsanitize-address \ | 5 ; RUN: %p2i -i %s --args -verbose=inst -threads=0 -fsanitize-address \ |
6 ; RUN: | FileCheck --check-prefix=DUMP %s | 6 ; RUN: | FileCheck --check-prefix=DUMP %s |
7 | 7 |
8 ; Constants to load data from | 8 ; Constants to load data from |
9 @srcConst8 = internal constant [1 x i8] c"D" | 9 @srcConst8 = internal constant [1 x i8] c"D" |
10 @srcConst16 = internal constant [2 x i8] c"DA" | 10 @srcConst16 = internal constant [2 x i8] c"DA" |
11 @srcConst32 = internal constant [4 x i8] c"DATA" | 11 @srcConst32 = internal constant [4 x i8] c"DATA" |
12 @srcConst64 = internal constant [8 x i8] c"DATADATA" | 12 @srcConst64 = internal constant [8 x i8] c"DATADATA" |
13 @srcConst128 = internal constant [16 x i8] c"DATADATADATADATA" | 13 @srcConst128 = internal constant [16 x i8] c"DATADATADATADATA" |
14 | 14 |
15 ; A global to load data from | 15 ; A global to load data from |
16 @srcGlobal8 = internal global [1 x i8] c"D" | 16 @srcGlobal8 = internal global [1 x i8] c"D" |
17 @srcGlobal16 = internal global [2 x i8] c"DA" | 17 @srcGlobal16 = internal global [2 x i8] c"DA" |
18 @srcGlobal32 = internal global [4 x i8] c"DATA" | 18 @srcGlobal32 = internal global [4 x i8] c"DATA" |
19 @srcGlobal64 = internal global [8 x i8] c"DATADATA" | 19 @srcGlobal64 = internal global [8 x i8] c"DATADATA" |
20 @srcGlobal128 = internal global [16 x i8] c"DATADATADATADATA" | 20 @srcGlobal128 = internal global [16 x i8] c"DATADATADATADATA" |
21 | 21 |
22 ; A function with a local variable that does the loads | 22 ; A function with a local variable that does the loads |
23 define internal void @doLoads() { | 23 define internal void @doLoads(i32 %arg8, i32 %arg16, i32 %arg32, i32 %arg64, |
24 %srcLocal8 = alloca i8, i32 1, align 4 | 24 i32 %arg128) { |
25 %srcLocal16 = alloca i8, i32 2, align 4 | 25 %srcLocal8 = inttoptr i32 %arg8 to i8* |
26 %srcLocal32 = alloca i8, i32 4, align 4 | 26 %srcLocal16 = inttoptr i32 %arg16 to i16* |
27 %srcLocal64 = alloca i8, i32 8, align 4 | 27 %srcLocal32 = inttoptr i32 %arg32 to i32* |
28 %srcLocal128 = alloca i8, i32 16, align 4 | 28 %srcLocal64 = inttoptr i32 %arg64 to i64* |
| 29 %srcLocal128 = inttoptr i32 %arg128 to <4 x i32>* |
29 | 30 |
30 %ptrConst8 = bitcast [1 x i8]* @srcConst8 to i8* | 31 %ptrConst8 = bitcast [1 x i8]* @srcConst8 to i8* |
31 %ptrConst16 = bitcast [2 x i8]* @srcConst16 to i16* | 32 %ptrConst16 = bitcast [2 x i8]* @srcConst16 to i16* |
32 %ptrConst32 = bitcast [4 x i8]* @srcConst32 to i32* | 33 %ptrConst32 = bitcast [4 x i8]* @srcConst32 to i32* |
33 %ptrConst64 = bitcast [8 x i8]* @srcConst64 to i64* | 34 %ptrConst64 = bitcast [8 x i8]* @srcConst64 to i64* |
34 %ptrConst128 = bitcast [16 x i8]* @srcConst128 to <4 x i32>* | 35 %ptrConst128 = bitcast [16 x i8]* @srcConst128 to <4 x i32>* |
35 | 36 |
36 %ptrGlobal8 = bitcast [1 x i8]* @srcGlobal8 to i8* | 37 %ptrGlobal8 = bitcast [1 x i8]* @srcGlobal8 to i8* |
37 %ptrGlobal16 = bitcast [2 x i8]* @srcGlobal16 to i16* | 38 %ptrGlobal16 = bitcast [2 x i8]* @srcGlobal16 to i16* |
38 %ptrGlobal32 = bitcast [4 x i8]* @srcGlobal32 to i32* | 39 %ptrGlobal32 = bitcast [4 x i8]* @srcGlobal32 to i32* |
39 %ptrGlobal64 = bitcast [8 x i8]* @srcGlobal64 to i64* | 40 %ptrGlobal64 = bitcast [8 x i8]* @srcGlobal64 to i64* |
40 %ptrGlobal128 = bitcast [16 x i8]* @srcGlobal128 to <4 x i32>* | 41 %ptrGlobal128 = bitcast [16 x i8]* @srcGlobal128 to <4 x i32>* |
41 | 42 |
42 %ptrLocal8 = bitcast i8* %srcLocal8 to i8* | |
43 %ptrLocal16 = bitcast i8* %srcLocal16 to i16* | |
44 %ptrLocal32 = bitcast i8* %srcLocal32 to i32* | |
45 %ptrLocal64 = bitcast i8* %srcLocal64 to i64* | |
46 %ptrLocal128 = bitcast i8* %srcLocal128 to <4 x i32>* | |
47 | |
48 %dest1 = load i8, i8* %ptrConst8, align 1 | 43 %dest1 = load i8, i8* %ptrConst8, align 1 |
49 %dest2 = load i16, i16* %ptrConst16, align 1 | 44 %dest2 = load i16, i16* %ptrConst16, align 1 |
50 %dest3 = load i32, i32* %ptrConst32, align 1 | 45 %dest3 = load i32, i32* %ptrConst32, align 1 |
51 %dest4 = load i64, i64* %ptrConst64, align 1 | 46 %dest4 = load i64, i64* %ptrConst64, align 1 |
52 %dest5 = load <4 x i32>, <4 x i32>* %ptrConst128, align 4 | 47 %dest5 = load <4 x i32>, <4 x i32>* %ptrConst128, align 4 |
53 | 48 |
54 %dest6 = load i8, i8* %ptrGlobal8, align 1 | 49 %dest6 = load i8, i8* %ptrGlobal8, align 1 |
55 %dest7 = load i16, i16* %ptrGlobal16, align 1 | 50 %dest7 = load i16, i16* %ptrGlobal16, align 1 |
56 %dest8 = load i32, i32* %ptrGlobal32, align 1 | 51 %dest8 = load i32, i32* %ptrGlobal32, align 1 |
57 %dest9 = load i64, i64* %ptrGlobal64, align 1 | 52 %dest9 = load i64, i64* %ptrGlobal64, align 1 |
58 %dest10 = load <4 x i32>, <4 x i32>* %ptrGlobal128, align 4 | 53 %dest10 = load <4 x i32>, <4 x i32>* %ptrGlobal128, align 4 |
59 | 54 |
60 %dest11 = load i8, i8* %ptrLocal8, align 1 | 55 %dest11 = load i8, i8* %srcLocal8, align 1 |
61 %dest12 = load i16, i16* %ptrLocal16, align 1 | 56 %dest12 = load i16, i16* %srcLocal16, align 1 |
62 %dest13 = load i32, i32* %ptrLocal32, align 1 | 57 %dest13 = load i32, i32* %srcLocal32, align 1 |
63 %dest14 = load i64, i64* %ptrLocal64, align 1 | 58 %dest14 = load i64, i64* %srcLocal64, align 1 |
64 %dest15 = load <4 x i32>, <4 x i32>* %ptrLocal128, align 4 | 59 %dest15 = load <4 x i32>, <4 x i32>* %srcLocal128, align 4 |
65 | 60 |
66 ret void | 61 ret void |
67 } | 62 } |
68 | 63 |
69 ; DUMP-LABEL: ================ Instrumented CFG ================ | 64 ; DUMP-LABEL: ================ Instrumented CFG ================ |
70 ; DUMP-NEXT: define internal void @doLoads() { | 65 ; DUMP-NEXT: define internal void @doLoads( |
71 ; DUMP-NEXT: __0: | 66 ; DUMP-NEXT: __0: |
72 ; DUMP: call void @__asan_check_load(i32 @srcConst8, i32 1) | 67 ; DUMP-NEXT: call void @__asan_check_load(i32 @srcConst8, i32 1) |
73 ; DUMP-NEXT: %dest1 = load i8, i8* @srcConst8, align 1 | 68 ; DUMP-NEXT: %dest1 = load i8, i8* @srcConst8, align 1 |
74 ; DUMP-NEXT: call void @__asan_check_load(i32 @srcConst16, i32 2) | 69 ; DUMP-NEXT: call void @__asan_check_load(i32 @srcConst16, i32 2) |
75 ; DUMP-NEXT: %dest2 = load i16, i16* @srcConst16, align 1 | 70 ; DUMP-NEXT: %dest2 = load i16, i16* @srcConst16, align 1 |
76 ; DUMP-NEXT: call void @__asan_check_load(i32 @srcConst32, i32 4) | 71 ; DUMP-NEXT: call void @__asan_check_load(i32 @srcConst32, i32 4) |
77 ; DUMP-NEXT: %dest3 = load i32, i32* @srcConst32, align 1 | 72 ; DUMP-NEXT: %dest3 = load i32, i32* @srcConst32, align 1 |
78 ; DUMP-NEXT: call void @__asan_check_load(i32 @srcConst64, i32 8) | 73 ; DUMP-NEXT: call void @__asan_check_load(i32 @srcConst64, i32 8) |
79 ; DUMP-NEXT: %dest4 = load i64, i64* @srcConst64, align 1 | 74 ; DUMP-NEXT: %dest4 = load i64, i64* @srcConst64, align 1 |
80 ; DUMP-NEXT: call void @__asan_check_load(i32 @srcConst128, i32 16) | 75 ; DUMP-NEXT: call void @__asan_check_load(i32 @srcConst128, i32 16) |
81 ; DUMP-NEXT: %dest5 = load <4 x i32>, <4 x i32>* @srcConst128, align 4 | 76 ; DUMP-NEXT: %dest5 = load <4 x i32>, <4 x i32>* @srcConst128, align 4 |
82 ; DUMP-NEXT: call void @__asan_check_load(i32 @srcGlobal8, i32 1) | 77 ; DUMP-NEXT: call void @__asan_check_load(i32 @srcGlobal8, i32 1) |
83 ; DUMP-NEXT: %dest6 = load i8, i8* @srcGlobal8, align 1 | 78 ; DUMP-NEXT: %dest6 = load i8, i8* @srcGlobal8, align 1 |
84 ; DUMP-NEXT: call void @__asan_check_load(i32 @srcGlobal16, i32 2) | 79 ; DUMP-NEXT: call void @__asan_check_load(i32 @srcGlobal16, i32 2) |
85 ; DUMP-NEXT: %dest7 = load i16, i16* @srcGlobal16, align 1 | 80 ; DUMP-NEXT: %dest7 = load i16, i16* @srcGlobal16, align 1 |
86 ; DUMP-NEXT: call void @__asan_check_load(i32 @srcGlobal32, i32 4) | 81 ; DUMP-NEXT: call void @__asan_check_load(i32 @srcGlobal32, i32 4) |
87 ; DUMP-NEXT: %dest8 = load i32, i32* @srcGlobal32, align 1 | 82 ; DUMP-NEXT: %dest8 = load i32, i32* @srcGlobal32, align 1 |
88 ; DUMP-NEXT: call void @__asan_check_load(i32 @srcGlobal64, i32 8) | 83 ; DUMP-NEXT: call void @__asan_check_load(i32 @srcGlobal64, i32 8) |
89 ; DUMP-NEXT: %dest9 = load i64, i64* @srcGlobal64, align 1 | 84 ; DUMP-NEXT: %dest9 = load i64, i64* @srcGlobal64, align 1 |
90 ; DUMP-NEXT: call void @__asan_check_load(i32 @srcGlobal128, i32 16) | 85 ; DUMP-NEXT: call void @__asan_check_load(i32 @srcGlobal128, i32 16) |
91 ; DUMP-NEXT: %dest10 = load <4 x i32>, <4 x i32>* @srcGlobal128, align 4 | 86 ; DUMP-NEXT: %dest10 = load <4 x i32>, <4 x i32>* @srcGlobal128, align 4 |
92 ; DUMP-NEXT: call void @__asan_check_load(i32 %srcLocal8, i32 1) | 87 ; DUMP-NEXT: call void @__asan_check_load(i32 %arg8, i32 1) |
93 ; DUMP-NEXT: %dest11 = load i8, i8* %srcLocal8, align 1 | 88 ; DUMP-NEXT: %dest11 = load i8, i8* %arg8, align 1 |
94 ; DUMP-NEXT: call void @__asan_check_load(i32 %srcLocal16, i32 2) | 89 ; DUMP-NEXT: call void @__asan_check_load(i32 %arg16, i32 2) |
95 ; DUMP-NEXT: %dest12 = load i16, i16* %srcLocal16, align 1 | 90 ; DUMP-NEXT: %dest12 = load i16, i16* %arg16, align 1 |
96 ; DUMP-NEXT: call void @__asan_check_load(i32 %srcLocal32, i32 4) | 91 ; DUMP-NEXT: call void @__asan_check_load(i32 %arg32, i32 4) |
97 ; DUMP-NEXT: %dest13 = load i32, i32* %srcLocal32, align 1 | 92 ; DUMP-NEXT: %dest13 = load i32, i32* %arg32, align 1 |
98 ; DUMP-NEXT: call void @__asan_check_load(i32 %srcLocal64, i32 8) | 93 ; DUMP-NEXT: call void @__asan_check_load(i32 %arg64, i32 8) |
99 ; DUMP-NEXT: %dest14 = load i64, i64* %srcLocal64, align 1 | 94 ; DUMP-NEXT: %dest14 = load i64, i64* %arg64, align 1 |
100 ; DUMP-NEXT: call void @__asan_check_load(i32 %srcLocal128, i32 16) | 95 ; DUMP-NEXT: call void @__asan_check_load(i32 %arg128, i32 16) |
101 ; DUMP-NEXT: %dest15 = load <4 x i32>, <4 x i32>* %srcLocal128, align 4 | 96 ; DUMP-NEXT: %dest15 = load <4 x i32>, <4 x i32>* %arg128, align 4 |
102 ; DUMP: ret void | 97 ; DUMP: ret void |
103 ; DUMP-NEXT: } | 98 ; DUMP-NEXT: } |
OLD | NEW |