Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(174)

Side by Side Diff: tests_lit/asan_tests/elidelocalchecks.ll

Issue 2235023002: Subzero: Elide redundant access checks within basic blocks (Closed) Base URL: https://chromium.googlesource.com/native_client/pnacl-subzero.git@master
Patch Set: Created 4 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 ; Test that direct loads and stores of local variables are not checked. 1 ; Test that direct loads and stores of local variables are not checked.
2 ; Also test that redundant checks of the same variable are elided.
2 3
3 ; REQUIRES: allow_dump 4 ; REQUIRES: allow_dump
4 5
5 ; RUN: %p2i -i %s --args -verbose=inst -threads=0 -fsanitize-address \ 6 ; RUN: %p2i -i %s --args -verbose=inst -threads=0 -fsanitize-address \
6 ; RUN: | FileCheck --check-prefix=DUMP %s 7 ; RUN: | FileCheck --check-prefix=DUMP %s
7 8
8 define internal void @foo() { 9 define internal void @foo() {
9 %ptr8 = alloca i8, i32 1, align 4 10 %ptr8 = alloca i8, i32 1, align 4
10 %ptr16 = alloca i8, i32 2, align 4 11 %ptr16 = alloca i8, i32 2, align 4
11 %ptr32 = alloca i8, i32 4, align 4 12 %ptr32 = alloca i8, i32 4, align 4
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after
43 %off32 = add i32 %addr32, -1 44 %off32 = add i32 %addr32, -1
44 %off64 = add i32 %addr64, -1 45 %off64 = add i32 %addr64, -1
45 %off128 = add i32 %addr128, -1 46 %off128 = add i32 %addr128, -1
46 47
47 %offtarget8 = inttoptr i32 %off8 to i8* 48 %offtarget8 = inttoptr i32 %off8 to i8*
48 %offtarget16 = inttoptr i32 %off16 to i16* 49 %offtarget16 = inttoptr i32 %off16 to i16*
49 %offtarget32 = inttoptr i32 %off32 to i32* 50 %offtarget32 = inttoptr i32 %off32 to i32*
50 %offtarget64 = inttoptr i32 %off64 to i64* 51 %offtarget64 = inttoptr i32 %off64 to i64*
51 %offtarget128 = inttoptr i32 %off128 to <4 x i32>* 52 %offtarget128 = inttoptr i32 %off128 to <4 x i32>*
52 53
54 ; checked stores
55 store i8 42, i8* %offtarget8, align 1
56 store i16 42, i16* %offtarget16, align 1
57 store i32 42, i32* %offtarget32, align 1
58
53 ; checked loads 59 ; checked loads
60 %offloaded64 = load i64, i64* %offtarget64, align 1
61 %offloaded128 = load <4 x i32>, <4 x i32>* %offtarget128, align 4
62
63 ; loads and stores with elided redundant checks
54 %offloaded8 = load i8, i8* %offtarget8, align 1 64 %offloaded8 = load i8, i8* %offtarget8, align 1
55 %offloaded16 = load i16, i16* %offtarget16, align 1 65 %offloaded16 = load i16, i16* %offtarget16, align 1
56 %offloaded32 = load i32, i32* %offtarget32, align 1 66 %offloaded32 = load i32, i32* %offtarget32, align 1
57 %offloaded64 = load i64, i64* %offtarget64, align 1
58 %offloaded128 = load <4 x i32>, <4 x i32>* %offtarget128, align 4
59
60 ; checked stores
61 store i8 %offloaded8, i8* %offtarget8, align 1
62 store i16 %offloaded16, i16* %offtarget16, align 1
63 store i32 %offloaded32, i32* %offtarget32, align 1
64 store i64 %offloaded64, i64* %offtarget64, align 1 67 store i64 %offloaded64, i64* %offtarget64, align 1
65 store <4 x i32> %offloaded128, <4 x i32>* %offtarget128, align 4 68 store <4 x i32> %offloaded128, <4 x i32>* %offtarget128, align 4
66 69
67 ret void 70 ret void
68 } 71 }
69 72
70 ; DUMP-LABEL: ================ Instrumented CFG ================ 73 ; DUMP-LABEL: ================ Instrumented CFG ================
71 ; DUMP-NEXT: define internal void @foo() { 74 ; DUMP-NEXT: define internal void @foo() {
72 75
73 ; Unchecked loads and stores 76 ; Direct unchecked loads and stores
74 ; DUMP: %loaded8 = load i8, i8* %ptr8, align 1 77 ; DUMP: %loaded8 = load i8, i8* %ptr8, align 1
75 ; DUMP-NEXT: %loaded16 = load i16, i16* %ptr16, align 1 78 ; DUMP-NEXT: %loaded16 = load i16, i16* %ptr16, align 1
76 ; DUMP-NEXT: %loaded32 = load i32, i32* %ptr32, align 1 79 ; DUMP-NEXT: %loaded32 = load i32, i32* %ptr32, align 1
77 ; DUMP-NEXT: %loaded64 = load i64, i64* %ptr64, align 1 80 ; DUMP-NEXT: %loaded64 = load i64, i64* %ptr64, align 1
78 ; DUMP-NEXT: %loaded128 = load <4 x i32>, <4 x i32>* %ptr128, align 4 81 ; DUMP-NEXT: %loaded128 = load <4 x i32>, <4 x i32>* %ptr128, align 4
79 ; DUMP-NEXT: store i8 %loaded8, i8* %ptr8, align 1 82 ; DUMP-NEXT: store i8 %loaded8, i8* %ptr8, align 1
80 ; DUMP-NEXT: store i16 %loaded16, i16* %ptr16, align 1 83 ; DUMP-NEXT: store i16 %loaded16, i16* %ptr16, align 1
81 ; DUMP-NEXT: store i32 %loaded32, i32* %ptr32, align 1 84 ; DUMP-NEXT: store i32 %loaded32, i32* %ptr32, align 1
82 ; DUMP-NEXT: store i64 %loaded64, i64* %ptr64, align 1 85 ; DUMP-NEXT: store i64 %loaded64, i64* %ptr64, align 1
83 ; DUMP-NEXT: store <4 x i32> %loaded128, <4 x i32>* %ptr128, align 4 86 ; DUMP-NEXT: store <4 x i32> %loaded128, <4 x i32>* %ptr128, align 4
84 87
85 ; Checked loads and stores 88 ; Checked stores
86 ; DUMP: call void @__asan_check_load(i32 %off8, i32 1) 89 ; DUMP: call void @__asan_check_store(i32 %off8, i32 1)
87 ; DUMP-NEXT: %offloaded8 = load i8, i8* %off8, align 1 90 ; DUMP-NEXT: store i8 42, i8* %off8, align 1
88 ; DUMP-NEXT: call void @__asan_check_load(i32 %off16, i32 2) 91 ; DUMP-NEXT: call void @__asan_check_store(i32 %off16, i32 2)
89 ; DUMP-NEXT: %offloaded16 = load i16, i16* %off16, align 1 92 ; DUMP-NEXT: store i16 42, i16* %off16, align 1
90 ; DUMP-NEXT: call void @__asan_check_load(i32 %off32, i32 4) 93 ; DUMP-NEXT: call void @__asan_check_store(i32 %off32, i32 4)
91 ; DUMP-NEXT: %offloaded32 = load i32, i32* %off32, align 1 94 ; DUMP-NEXT: store i32 42, i32* %off32, align 1
95
96 ; Checked loads
92 ; DUMP-NEXT: call void @__asan_check_load(i32 %off64, i32 8) 97 ; DUMP-NEXT: call void @__asan_check_load(i32 %off64, i32 8)
93 ; DUMP-NEXT: %offloaded64 = load i64, i64* %off64, align 1 98 ; DUMP-NEXT: %offloaded64 = load i64, i64* %off64, align 1
94 ; DUMP-NEXT: call void @__asan_check_load(i32 %off128, i32 16) 99 ; DUMP-NEXT: call void @__asan_check_load(i32 %off128, i32 16)
95 ; DUMP-NEXT: %offloaded128 = load <4 x i32>, <4 x i32>* %off128, align 4 100 ; DUMP-NEXT: %offloaded128 = load <4 x i32>, <4 x i32>* %off128, align 4
96 ; DUMP-NEXT: call void @__asan_check_store(i32 %off8, i32 1) 101
97 ; DUMP-NEXT: store i8 %offloaded8, i8* %off8, align 1 102 ; Loads and stores with elided redundant checks
98 ; DUMP-NEXT: call void @__asan_check_store(i32 %off16, i32 2) 103 ; DUMP-NEXT: %offloaded8 = load i8, i8* %off8, align 1
99 ; DUMP-NEXT: store i16 %offloaded16, i16* %off16, align 1 104 ; DUMP-NEXT: %offloaded16 = load i16, i16* %off16, align 1
100 ; DUMP-NEXT: call void @__asan_check_store(i32 %off32, i32 4) 105 ; DUMP-NEXT: %offloaded32 = load i32, i32* %off32, align 1
101 ; DUMP-NEXT: store i32 %offloaded32, i32* %off32, align 1 106 ; DUMP-NEXT: store i64 %offloaded64, i64* %off64, align 1, beacon %offloaded64
102 ; DUMP-NEXT: call void @__asan_check_store(i32 %off64, i32 8) 107 ; DUMP-NEXT: store <4 x i32> %offloaded128, <4 x i32>* %off128, align 4, beacon %offloaded128
103 ; DUMP-NEXT: store i64 %offloaded64, i64* %off64, align 1
104 ; DUMP-NEXT: call void @__asan_check_store(i32 %off128, i32 16)
105 ; DUMP-NEXT: store <4 x i32> %offloaded128, <4 x i32>* %off128, align 4
OLDNEW
« src/IceASanInstrumentation.cpp ('K') | « src/IceASanInstrumentation.cpp ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698