Index: tests_lit/llvm2ice_tests/nonsfi.ll |
diff --git a/tests_lit/llvm2ice_tests/nonsfi.ll b/tests_lit/llvm2ice_tests/nonsfi.ll |
new file mode 100644 |
index 0000000000000000000000000000000000000000..904f38709ac750c35d6be4e8819e34730735fb49 |
--- /dev/null |
+++ b/tests_lit/llvm2ice_tests/nonsfi.ll |
@@ -0,0 +1,92 @@ |
+; RUN: %p2i -i %s --filetype=obj --assemble --disassemble --args -O2 -nonsfi \ |
+; RUN: | FileCheck %s |
+ |
+@G1 = internal global [4 x i8] zeroinitializer, align 4 |
+ |
+define internal void @testCallRegular() { |
+entry: |
+ call void @testCallRegular() |
+ ret void |
+} |
+; Expect a simple direct call to testCallRegular. |
+; CHECK-LABEL: testCallRegular |
+; CHECK: call {{.*}} R_386_PC32 testCallRegular |
+ |
+define internal double @testCallBuiltin(double %val) { |
+entry: |
+ %result = frem double %val, %val |
+ ret double %result |
+} |
+; Expect a simple direct call to fmod. |
+; CHECK-LABEL: testCallBuiltin |
+; CHECK: call {{.*}} R_386_PC32 fmod |
+ |
+define internal i32 @testLoadBasic() { |
+entry: |
+ %a = bitcast [4 x i8]* @G1 to i32* |
+ %b = load i32, i32* %a, align 1 |
+ ret i32 %b |
+} |
+; Expect a load with a R_386_GOTOFF relocation. |
+; CHECK-LABEL: testLoadBasic |
+; CHECK: mov {{.*}} R_386_GOTOFF G1 |
+ |
+define internal i32 @testLoadFixedOffset() { |
+entry: |
+ %a = ptrtoint [4 x i8]* @G1 to i32 |
+ %a1 = add i32 %a, 4 |
+ %a2 = inttoptr i32 %a1 to i32* |
+ %b = load i32, i32* %a2, align 1 |
+ ret i32 %b |
+} |
+; Expect a load with a R_386_GOTOFF relocation plus an immediate offset. |
+; CHECK-LABEL: testLoadFixedOffset |
+; CHECK: mov {{.*}}+0x4] {{.*}} R_386_GOTOFF G1 |
+ |
+define internal i32 @testLoadIndexed(i32 %idx) { |
+entry: |
+ %a = ptrtoint [4 x i8]* @G1 to i32 |
+ %a0 = mul i32 %idx, 4 |
+ %a1 = add i32 %a0, 12 |
+ %a2 = add i32 %a1, %a |
+ %a3 = inttoptr i32 %a2 to i32* |
+ %b = load i32, i32* %a3, align 1 |
+ ret i32 %b |
+} |
+; CHECK-LABEL: testLoadIndexed |
+ |
+define internal i32 @testLoadIndexedBase(i32 %base, i32 %idx) { |
+entry: |
+ %a = ptrtoint [4 x i8]* @G1 to i32 |
+ %a0 = mul i32 %idx, 4 |
+ %a1 = add i32 %a0, 12 |
+ %a2 = add i32 %a1, %a |
+ %a3 = add i32 %a2, %base |
+ %a4 = inttoptr i32 %a3 to i32* |
+ %b = load i32, i32* %a4, align 1 |
+ ret i32 %b |
+} |
+; CHECK-LABEL: testLoadIndexedBase |
+ |
+define internal i32 @testLoadOpt() { |
+entry: |
+ %a = bitcast [4 x i8]* @G1 to i32* |
+ %b = load i32, i32* %a, align 1 |
+ %c = bitcast [4 x i8]* @G1 to i32* |
+ %d = load i32, i32* %a, align 1 |
+ %e = add i32 %b, %d |
+ ret i32 %e |
+} |
+; Expect a load-folding optimization with a R_386_GOTOFF relocation. |
+; CHECK-LABEL: testLoadOpt |
+ |
+define internal void @testRMW() { |
+entry: |
+ %a = bitcast [4 x i8]* @G1 to i32* |
+ %b = load i32, i32* %a, align 1 |
+ %c = add i32 %b, 1 |
+ store i32 %c, i32* %a, align 1 |
+ ret void |
+} |
+; Expect an RMW optimization with a R_386_GOTOFF relocation. |
+; CHECK-LABEL: testRMW |