OLD | NEW |
(Empty) | |
| 1 ; Test that direct loads and stores of local variables are not checked. |
| 2 |
| 3 ; REQUIRES: allow_dump |
| 4 |
| 5 ; RUN: %p2i -i %s --args -verbose=inst -threads=0 -fsanitize-address \ |
| 6 ; RUN: | FileCheck --check-prefix=DUMP %s |
| 7 |
| 8 define internal void @foo() { |
| 9 %ptr8 = alloca i8, i32 1, align 4 |
| 10 %ptr16 = alloca i8, i32 2, align 4 |
| 11 %ptr32 = alloca i8, i32 4, align 4 |
| 12 %ptr64 = alloca i8, i32 8, align 4 |
| 13 %ptr128 = alloca i8, i32 16, align 4 |
| 14 |
| 15 %target8 = bitcast i8* %ptr8 to i8* |
| 16 %target16 = bitcast i8* %ptr16 to i16* |
| 17 %target32 = bitcast i8* %ptr32 to i32* |
| 18 %target64 = bitcast i8* %ptr64 to i64* |
| 19 %target128 = bitcast i8* %ptr128 to <4 x i32>* |
| 20 |
| 21 ; unchecked loads |
| 22 %loaded8 = load i8, i8* %target8, align 1 |
| 23 %loaded16 = load i16, i16* %target16, align 1 |
| 24 %loaded32 = load i32, i32* %target32, align 1 |
| 25 %loaded64 = load i64, i64* %target64, align 1 |
| 26 %loaded128 = load <4 x i32>, <4 x i32>* %target128, align 4 |
| 27 |
| 28 ; unchecked stores |
| 29 store i8 %loaded8, i8* %target8, align 1 |
| 30 store i16 %loaded16, i16* %target16, align 1 |
| 31 store i32 %loaded32, i32* %target32, align 1 |
| 32 store i64 %loaded64, i64* %target64, align 1 |
| 33 store <4 x i32> %loaded128, <4 x i32>* %target128, align 4 |
| 34 |
| 35 %addr8 = ptrtoint i8* %ptr8 to i32 |
| 36 %addr16 = ptrtoint i8* %ptr16 to i32 |
| 37 %addr32 = ptrtoint i8* %ptr32 to i32 |
| 38 %addr64 = ptrtoint i8* %ptr64 to i32 |
| 39 %addr128 = ptrtoint i8* %ptr128 to i32 |
| 40 |
| 41 %off8 = add i32 %addr8, -1 |
| 42 %off16 = add i32 %addr16, -1 |
| 43 %off32 = add i32 %addr32, -1 |
| 44 %off64 = add i32 %addr64, -1 |
| 45 %off128 = add i32 %addr128, -1 |
| 46 |
| 47 %offtarget8 = inttoptr i32 %off8 to i8* |
| 48 %offtarget16 = inttoptr i32 %off16 to i16* |
| 49 %offtarget32 = inttoptr i32 %off32 to i32* |
| 50 %offtarget64 = inttoptr i32 %off64 to i64* |
| 51 %offtarget128 = inttoptr i32 %off128 to <4 x i32>* |
| 52 |
| 53 ; checked loads |
| 54 %offloaded8 = load i8, i8* %offtarget8, align 1 |
| 55 %offloaded16 = load i16, i16* %offtarget16, align 1 |
| 56 %offloaded32 = load i32, i32* %offtarget32, align 1 |
| 57 %offloaded64 = load i64, i64* %offtarget64, align 1 |
| 58 %offloaded128 = load <4 x i32>, <4 x i32>* %offtarget128, align 4 |
| 59 |
| 60 ; checked stores |
| 61 store i8 %offloaded8, i8* %offtarget8, align 1 |
| 62 store i16 %offloaded16, i16* %offtarget16, align 1 |
| 63 store i32 %offloaded32, i32* %offtarget32, align 1 |
| 64 store i64 %offloaded64, i64* %offtarget64, align 1 |
| 65 store <4 x i32> %offloaded128, <4 x i32>* %offtarget128, align 4 |
| 66 |
| 67 ret void |
| 68 } |
| 69 |
| 70 ; DUMP-LABEL: ================ Instrumented CFG ================ |
| 71 ; DUMP-NEXT: define internal void @foo() { |
| 72 |
| 73 ; Unchecked loads and stores |
| 74 ; DUMP: %loaded8 = load i8, i8* %ptr8, align 1 |
| 75 ; DUMP-NEXT: %loaded16 = load i16, i16* %ptr16, align 1 |
| 76 ; DUMP-NEXT: %loaded32 = load i32, i32* %ptr32, align 1 |
| 77 ; DUMP-NEXT: %loaded64 = load i64, i64* %ptr64, align 1 |
| 78 ; DUMP-NEXT: %loaded128 = load <4 x i32>, <4 x i32>* %ptr128, align 4 |
| 79 ; DUMP-NEXT: store i8 %loaded8, i8* %ptr8, align 1 |
| 80 ; DUMP-NEXT: store i16 %loaded16, i16* %ptr16, align 1 |
| 81 ; DUMP-NEXT: store i32 %loaded32, i32* %ptr32, align 1 |
| 82 ; DUMP-NEXT: store i64 %loaded64, i64* %ptr64, align 1 |
| 83 ; DUMP-NEXT: store <4 x i32> %loaded128, <4 x i32>* %ptr128, align 4 |
| 84 |
| 85 ; Checked loads and stores |
| 86 ; DUMP: call void @__asan_check_load(i32 %off8, i32 1) |
| 87 ; DUMP-NEXT: %offloaded8 = load i8, i8* %off8, align 1 |
| 88 ; DUMP-NEXT: call void @__asan_check_load(i32 %off16, i32 2) |
| 89 ; DUMP-NEXT: %offloaded16 = load i16, i16* %off16, align 1 |
| 90 ; DUMP-NEXT: call void @__asan_check_load(i32 %off32, i32 4) |
| 91 ; DUMP-NEXT: %offloaded32 = load i32, i32* %off32, align 1 |
| 92 ; DUMP-NEXT: call void @__asan_check_load(i32 %off64, i32 8) |
| 93 ; DUMP-NEXT: %offloaded64 = load i64, i64* %off64, align 1 |
| 94 ; DUMP-NEXT: call void @__asan_check_load(i32 %off128, i32 16) |
| 95 ; DUMP-NEXT: %offloaded128 = load <4 x i32>, <4 x i32>* %off128, align 4 |
| 96 ; DUMP-NEXT: call void @__asan_check_store(i32 %off8, i32 1) |
| 97 ; DUMP-NEXT: store i8 %offloaded8, i8* %off8, align 1 |
| 98 ; DUMP-NEXT: call void @__asan_check_store(i32 %off16, i32 2) |
| 99 ; DUMP-NEXT: store i16 %offloaded16, i16* %off16, align 1 |
| 100 ; DUMP-NEXT: call void @__asan_check_store(i32 %off32, i32 4) |
| 101 ; DUMP-NEXT: store i32 %offloaded32, i32* %off32, align 1 |
| 102 ; DUMP-NEXT: call void @__asan_check_store(i32 %off64, i32 8) |
| 103 ; DUMP-NEXT: store i64 %offloaded64, i64* %off64, align 1 |
| 104 ; DUMP-NEXT: call void @__asan_check_store(i32 %off128, i32 16) |
| 105 ; DUMP-NEXT: store <4 x i32> %offloaded128, <4 x i32>* %off128, align 4 |
OLD | NEW |