| Index: crosstest/test_arith.ll
|
| diff --git a/crosstest/test_arith.ll b/crosstest/test_arith.ll
|
| new file mode 100644
|
| index 0000000000000000000000000000000000000000..4cecde05fbd5e0a68b6e39b0756d68a75b7b4ad6
|
| --- /dev/null
|
| +++ b/crosstest/test_arith.ll
|
| @@ -0,0 +1,2165 @@
|
| +define internal i32 @_Z7testAddbb(i32 %a, i32 %b) {
|
| +entry:
|
| + %b.arg_trunc = trunc i32 %b to i1
|
| + %a.arg_trunc = trunc i32 %a to i1
|
| + %conv = zext i1 %a.arg_trunc to i32
|
| + %add = sext i1 %b.arg_trunc to i32
|
| + %tobool4 = icmp ne i32 %conv, %add
|
| + %tobool4.ret_ext = zext i1 %tobool4 to i32
|
| + ret i32 %tobool4.ret_ext
|
| +}
|
| +
|
| +define internal i32 @_Z7testAddhh(i32 %a, i32 %b) {
|
| +entry:
|
| + %b.arg_trunc = trunc i32 %b to i8
|
| + %a.arg_trunc = trunc i32 %a to i8
|
| + %conv = zext i8 %a.arg_trunc to i32
|
| + %conv1 = zext i8 %b.arg_trunc to i32
|
| + %add = add i32 %conv1, %conv
|
| +; %conv2 = trunc i32 %add to i8
|
| + %conv2 = add i8 %a.arg_trunc, %b.arg_trunc ; INSERTED
|
| + %conv2.ret_ext = zext i8 %conv2 to i32
|
| + ret i32 %conv2.ret_ext
|
| +}
|
| +
|
| +define internal i32 @_Z7testAddtt(i32 %a, i32 %b) {
|
| +entry:
|
| + %b.arg_trunc = trunc i32 %b to i16
|
| + %a.arg_trunc = trunc i32 %a to i16
|
| + %conv = zext i16 %a.arg_trunc to i32
|
| + %conv1 = zext i16 %b.arg_trunc to i32
|
| + %add = add i32 %conv1, %conv
|
| +; %conv2 = trunc i32 %add to i16
|
| + %conv2 = add i16 %a.arg_trunc, %b.arg_trunc ; INSERTED
|
| + %conv2.ret_ext = zext i16 %conv2 to i32
|
| + ret i32 %conv2.ret_ext
|
| +}
|
| +
|
| +define internal i32 @_Z7testAddjj(i32 %a, i32 %b) {
|
| +entry:
|
| + %add = add i32 %b, %a
|
| + ret i32 %add
|
| +}
|
| +
|
| +define internal i64 @_Z7testAddyy(i64 %a, i64 %b) {
|
| +entry:
|
| + %add = add i64 %b, %a
|
| + ret i64 %add
|
| +}
|
| +
|
| +define internal <4 x i32> @_Z7testAddDv4_jS_(<4 x i32> %a, <4 x i32> %b) {
|
| +entry:
|
| + %add = add <4 x i32> %b, %a
|
| + ret <4 x i32> %add
|
| +}
|
| +
|
| +define internal <8 x i16> @_Z7testAddDv8_tS_(<8 x i16> %a, <8 x i16> %b) {
|
| +entry:
|
| + %add = add <8 x i16> %b, %a
|
| + ret <8 x i16> %add
|
| +}
|
| +
|
| +define internal <16 x i8> @_Z7testAddDv16_hS_(<16 x i8> %a, <16 x i8> %b) {
|
| +entry:
|
| + %add = add <16 x i8> %b, %a
|
| + ret <16 x i8> %add
|
| +}
|
| +
|
| +define internal i32 @_Z7testSubbb(i32 %a, i32 %b) {
|
| +entry:
|
| + %b.arg_trunc = trunc i32 %b to i1
|
| + %a.arg_trunc = trunc i32 %a to i1
|
| + %tobool4 = xor i1 %a.arg_trunc, %b.arg_trunc
|
| + %tobool4.ret_ext = zext i1 %tobool4 to i32
|
| + ret i32 %tobool4.ret_ext
|
| +}
|
| +
|
| +define internal i32 @_Z7testSubhh(i32 %a, i32 %b) {
|
| +entry:
|
| + %b.arg_trunc = trunc i32 %b to i8
|
| + %a.arg_trunc = trunc i32 %a to i8
|
| + %conv = zext i8 %a.arg_trunc to i32
|
| + %conv1 = zext i8 %b.arg_trunc to i32
|
| + %sub = sub i32 %conv, %conv1
|
| +; %conv2 = trunc i32 %sub to i8
|
| + %conv2 = sub i8 %a.arg_trunc, %b.arg_trunc ; INSERTED
|
| + %conv2.ret_ext = zext i8 %conv2 to i32
|
| + ret i32 %conv2.ret_ext
|
| +}
|
| +
|
| +define internal i32 @_Z7testSubtt(i32 %a, i32 %b) {
|
| +entry:
|
| + %b.arg_trunc = trunc i32 %b to i16
|
| + %a.arg_trunc = trunc i32 %a to i16
|
| + %conv = zext i16 %a.arg_trunc to i32
|
| + %conv1 = zext i16 %b.arg_trunc to i32
|
| + %sub = sub i32 %conv, %conv1
|
| +; %conv2 = trunc i32 %sub to i16
|
| + %conv2 = sub i16 %a.arg_trunc, %b.arg_trunc ; INSERTED
|
| + %conv2.ret_ext = zext i16 %conv2 to i32
|
| + ret i32 %conv2.ret_ext
|
| +}
|
| +
|
| +define internal i32 @_Z7testSubjj(i32 %a, i32 %b) {
|
| +entry:
|
| + %sub = sub i32 %a, %b
|
| + ret i32 %sub
|
| +}
|
| +
|
| +define internal i64 @_Z7testSubyy(i64 %a, i64 %b) {
|
| +entry:
|
| + %sub = sub i64 %a, %b
|
| + ret i64 %sub
|
| +}
|
| +
|
| +define internal <4 x i32> @_Z7testSubDv4_jS_(<4 x i32> %a, <4 x i32> %b) {
|
| +entry:
|
| + %sub = sub <4 x i32> %a, %b
|
| + ret <4 x i32> %sub
|
| +}
|
| +
|
| +define internal <8 x i16> @_Z7testSubDv8_tS_(<8 x i16> %a, <8 x i16> %b) {
|
| +entry:
|
| + %sub = sub <8 x i16> %a, %b
|
| + ret <8 x i16> %sub
|
| +}
|
| +
|
| +define internal <16 x i8> @_Z7testSubDv16_hS_(<16 x i8> %a, <16 x i8> %b) {
|
| +entry:
|
| + %sub = sub <16 x i8> %a, %b
|
| + ret <16 x i8> %sub
|
| +}
|
| +
|
| +define internal i32 @_Z7testMulbb(i32 %a, i32 %b) {
|
| +entry:
|
| + %b.arg_trunc = trunc i32 %b to i1
|
| + %a.arg_trunc = trunc i32 %a to i1
|
| + %tobool4 = and i1 %a.arg_trunc, %b.arg_trunc
|
| + %tobool4.ret_ext = zext i1 %tobool4 to i32
|
| + ret i32 %tobool4.ret_ext
|
| +}
|
| +
|
| +define internal i32 @_Z7testMulhh(i32 %a, i32 %b) {
|
| +entry:
|
| + %b.arg_trunc = trunc i32 %b to i8
|
| + %a.arg_trunc = trunc i32 %a to i8
|
| + %conv = zext i8 %a.arg_trunc to i32
|
| + %conv1 = zext i8 %b.arg_trunc to i32
|
| + %mul = mul i32 %conv1, %conv
|
| +; %conv2 = trunc i32 %mul to i8
|
| + %conv2 = mul i8 %a.arg_trunc, %b.arg_trunc ; INSERTED
|
| + %conv2.ret_ext = zext i8 %conv2 to i32
|
| + ret i32 %conv2.ret_ext
|
| +}
|
| +
|
| +define internal i32 @_Z7testMultt(i32 %a, i32 %b) {
|
| +entry:
|
| + %b.arg_trunc = trunc i32 %b to i16
|
| + %a.arg_trunc = trunc i32 %a to i16
|
| + %conv = zext i16 %a.arg_trunc to i32
|
| + %conv1 = zext i16 %b.arg_trunc to i32
|
| + %mul = mul i32 %conv1, %conv
|
| +; %conv2 = trunc i32 %mul to i16
|
| + %conv2 = mul i16 %a.arg_trunc, %b.arg_trunc ; INSERTED
|
| + %conv2.ret_ext = zext i16 %conv2 to i32
|
| + ret i32 %conv2.ret_ext
|
| +}
|
| +
|
| +define internal i32 @_Z7testMuljj(i32 %a, i32 %b) {
|
| +entry:
|
| + %mul = mul i32 %b, %a
|
| + ret i32 %mul
|
| +}
|
| +
|
| +define internal i64 @_Z7testMulyy(i64 %a, i64 %b) {
|
| +entry:
|
| + %mul = mul i64 %b, %a
|
| + ret i64 %mul
|
| +}
|
| +
|
| +define internal <4 x i32> @_Z7testMulDv4_jS_(<4 x i32> %a, <4 x i32> %b) {
|
| +entry:
|
| + %mul = mul <4 x i32> %b, %a
|
| + ret <4 x i32> %mul
|
| +}
|
| +
|
| +define internal <8 x i16> @_Z7testMulDv8_tS_(<8 x i16> %a, <8 x i16> %b) {
|
| +entry:
|
| + %mul = mul <8 x i16> %b, %a
|
| + ret <8 x i16> %mul
|
| +}
|
| +
|
| +define internal <16 x i8> @_Z7testMulDv16_hS_(<16 x i8> %a, <16 x i8> %b) {
|
| +entry:
|
| + %mul = mul <16 x i8> %b, %a
|
| + ret <16 x i8> %mul
|
| +}
|
| +
|
| +define internal i32 @_Z8testUdivbb(i32 %a, i32 %b) {
|
| +entry:
|
| + %a.arg_trunc = trunc i32 %a to i1
|
| + %a.arg_trunc.ret_ext = zext i1 %a.arg_trunc to i32
|
| + ret i32 %a.arg_trunc.ret_ext
|
| +}
|
| +
|
| +define internal i32 @_Z8testUdivhh(i32 %a, i32 %b) {
|
| +entry:
|
| + %b.arg_trunc = trunc i32 %b to i8
|
| + %a.arg_trunc = trunc i32 %a to i8
|
| + %div3 = udiv i8 %a.arg_trunc, %b.arg_trunc
|
| + %div3.ret_ext = zext i8 %div3 to i32
|
| + ret i32 %div3.ret_ext
|
| +}
|
| +
|
| +define internal i32 @_Z8testUdivtt(i32 %a, i32 %b) {
|
| +entry:
|
| + %b.arg_trunc = trunc i32 %b to i16
|
| + %a.arg_trunc = trunc i32 %a to i16
|
| + %div3 = udiv i16 %a.arg_trunc, %b.arg_trunc
|
| + %div3.ret_ext = zext i16 %div3 to i32
|
| + ret i32 %div3.ret_ext
|
| +}
|
| +
|
| +define internal i32 @_Z8testUdivjj(i32 %a, i32 %b) {
|
| +entry:
|
| + %div = udiv i32 %a, %b
|
| + ret i32 %div
|
| +}
|
| +
|
| +define internal i64 @_Z8testUdivyy(i64 %a, i64 %b) {
|
| +entry:
|
| + %div = udiv i64 %a, %b
|
| + ret i64 %div
|
| +}
|
| +
|
| +define internal <4 x i32> @_Z8testUdivDv4_jS_(<4 x i32> %a, <4 x i32> %b) {
|
| +entry:
|
| + %div = udiv <4 x i32> %a, %b
|
| + ret <4 x i32> %div
|
| +}
|
| +
|
| +define internal <8 x i16> @_Z8testUdivDv8_tS_(<8 x i16> %a, <8 x i16> %b) {
|
| +entry:
|
| + %div = udiv <8 x i16> %a, %b
|
| + ret <8 x i16> %div
|
| +}
|
| +
|
| +define internal <16 x i8> @_Z8testUdivDv16_hS_(<16 x i8> %a, <16 x i8> %b) {
|
| +entry:
|
| + %div = udiv <16 x i8> %a, %b
|
| + ret <16 x i8> %div
|
| +}
|
| +
|
| +define internal i32 @_Z8testUrembb(i32 %a, i32 %b) {
|
| +entry:
|
| + %.ret_ext = zext i1 false to i32
|
| + ret i32 %.ret_ext
|
| +}
|
| +
|
| +define internal i32 @_Z8testUremhh(i32 %a, i32 %b) {
|
| +entry:
|
| + %b.arg_trunc = trunc i32 %b to i8
|
| + %a.arg_trunc = trunc i32 %a to i8
|
| + %0 = urem i8 %a.arg_trunc, %b.arg_trunc
|
| + %.ret_ext = zext i8 %0 to i32
|
| + ret i32 %.ret_ext
|
| +}
|
| +
|
| +define internal i32 @_Z8testUremtt(i32 %a, i32 %b) {
|
| +entry:
|
| + %b.arg_trunc = trunc i32 %b to i16
|
| + %a.arg_trunc = trunc i32 %a to i16
|
| + %0 = urem i16 %a.arg_trunc, %b.arg_trunc
|
| + %.ret_ext = zext i16 %0 to i32
|
| + ret i32 %.ret_ext
|
| +}
|
| +
|
| +define internal i32 @_Z8testUremjj(i32 %a, i32 %b) {
|
| +entry:
|
| + %rem = urem i32 %a, %b
|
| + ret i32 %rem
|
| +}
|
| +
|
| +define internal i64 @_Z8testUremyy(i64 %a, i64 %b) {
|
| +entry:
|
| + %rem = urem i64 %a, %b
|
| + ret i64 %rem
|
| +}
|
| +
|
| +define internal <4 x i32> @_Z8testUremDv4_jS_(<4 x i32> %a, <4 x i32> %b) {
|
| +entry:
|
| + %rem = urem <4 x i32> %a, %b
|
| + ret <4 x i32> %rem
|
| +}
|
| +
|
| +define internal <8 x i16> @_Z8testUremDv8_tS_(<8 x i16> %a, <8 x i16> %b) {
|
| +entry:
|
| + %rem = urem <8 x i16> %a, %b
|
| + ret <8 x i16> %rem
|
| +}
|
| +
|
| +define internal <16 x i8> @_Z8testUremDv16_hS_(<16 x i8> %a, <16 x i8> %b) {
|
| +entry:
|
| + %rem = urem <16 x i8> %a, %b
|
| + ret <16 x i8> %rem
|
| +}
|
| +
|
| +define internal i32 @_Z7testShlbb(i32 %a, i32 %b) {
|
| +entry:
|
| + %b.arg_trunc = trunc i32 %b to i1
|
| + %a.arg_trunc = trunc i32 %a to i1
|
| + %conv = zext i1 %a.arg_trunc to i32
|
| + %conv3 = zext i1 %b.arg_trunc to i32
|
| + %shl = shl i32 %conv, %conv3
|
| + %tobool4 = icmp ne i32 %shl, 0
|
| + %tobool4.ret_ext = zext i1 %tobool4 to i32
|
| + ret i32 %tobool4.ret_ext
|
| +}
|
| +
|
| +define internal i32 @_Z7testShlhh(i32 %a, i32 %b) {
|
| +entry:
|
| + %b.arg_trunc = trunc i32 %b to i8
|
| + %a.arg_trunc = trunc i32 %a to i8
|
| + %conv = zext i8 %a.arg_trunc to i32
|
| + %conv1 = zext i8 %b.arg_trunc to i32
|
| + %shl = shl i32 %conv, %conv1
|
| +; %conv2 = trunc i32 %shl to i8
|
| + %conv2 = shl i8 %a.arg_trunc, %b.arg_trunc ; INSERTED
|
| + %conv2.ret_ext = zext i8 %conv2 to i32
|
| + ret i32 %conv2.ret_ext
|
| +}
|
| +
|
| +define internal i32 @_Z7testShltt(i32 %a, i32 %b) {
|
| +entry:
|
| + %b.arg_trunc = trunc i32 %b to i16
|
| + %a.arg_trunc = trunc i32 %a to i16
|
| + %conv = zext i16 %a.arg_trunc to i32
|
| + %conv1 = zext i16 %b.arg_trunc to i32
|
| + %shl = shl i32 %conv, %conv1
|
| +; %conv2 = trunc i32 %shl to i16
|
| + %conv2 = shl i16 %a.arg_trunc, %b.arg_trunc ; INSERTED
|
| + %conv2.ret_ext = zext i16 %conv2 to i32
|
| + ret i32 %conv2.ret_ext
|
| +}
|
| +
|
| +define internal i32 @_Z7testShljj(i32 %a, i32 %b) {
|
| +entry:
|
| + %shl = shl i32 %a, %b
|
| + ret i32 %shl
|
| +}
|
| +
|
| +define internal i64 @_Z7testShlyy(i64 %a, i64 %b) {
|
| +entry:
|
| + %shl = shl i64 %a, %b
|
| + ret i64 %shl
|
| +}
|
| +
|
| +define internal <4 x i32> @_Z7testShlDv4_jS_(<4 x i32> %a, <4 x i32> %b) {
|
| +entry:
|
| + %shl = shl <4 x i32> %a, %b
|
| + ret <4 x i32> %shl
|
| +}
|
| +
|
| +define internal <8 x i16> @_Z7testShlDv8_tS_(<8 x i16> %a, <8 x i16> %b) {
|
| +entry:
|
| + %shl = shl <8 x i16> %a, %b
|
| + ret <8 x i16> %shl
|
| +}
|
| +
|
| +define internal <16 x i8> @_Z7testShlDv16_hS_(<16 x i8> %a, <16 x i8> %b) {
|
| +entry:
|
| + %shl = shl <16 x i8> %a, %b
|
| + ret <16 x i8> %shl
|
| +}
|
| +
|
| +define internal i32 @_Z8testLshrbb(i32 %a, i32 %b) {
|
| +entry:
|
| + %b.arg_trunc = trunc i32 %b to i1
|
| + %a.arg_trunc = trunc i32 %a to i1
|
| + %conv = zext i1 %a.arg_trunc to i32
|
| + %conv3 = zext i1 %b.arg_trunc to i32
|
| + %shr = lshr i32 %conv, %conv3
|
| + %tobool4 = icmp ne i32 %shr, 0
|
| + %tobool4.ret_ext = zext i1 %tobool4 to i32
|
| + ret i32 %tobool4.ret_ext
|
| +}
|
| +
|
| +define internal i32 @_Z8testLshrhh(i32 %a, i32 %b) {
|
| +entry:
|
| + %b.arg_trunc = trunc i32 %b to i8
|
| + %a.arg_trunc = trunc i32 %a to i8
|
| + %conv = zext i8 %a.arg_trunc to i32
|
| + %conv1 = zext i8 %b.arg_trunc to i32
|
| + %shr = lshr i32 %conv, %conv1
|
| +; %conv2 = trunc i32 %shr to i8
|
| + %conv2 = lshr i8 %a.arg_trunc, %b.arg_trunc ; INSERTED
|
| + %conv2.ret_ext = zext i8 %conv2 to i32
|
| + ret i32 %conv2.ret_ext
|
| +}
|
| +
|
| +define internal i32 @_Z8testLshrtt(i32 %a, i32 %b) {
|
| +entry:
|
| + %b.arg_trunc = trunc i32 %b to i16
|
| + %a.arg_trunc = trunc i32 %a to i16
|
| + %conv = zext i16 %a.arg_trunc to i32
|
| + %conv1 = zext i16 %b.arg_trunc to i32
|
| + %shr = lshr i32 %conv, %conv1
|
| +; %conv2 = trunc i32 %shr to i16
|
| + %conv2 = lshr i16 %a.arg_trunc, %b.arg_trunc ; INSERTED
|
| + %conv2.ret_ext = zext i16 %conv2 to i32
|
| + ret i32 %conv2.ret_ext
|
| +}
|
| +
|
| +define internal i32 @_Z8testLshrjj(i32 %a, i32 %b) {
|
| +entry:
|
| + %shr = lshr i32 %a, %b
|
| + ret i32 %shr
|
| +}
|
| +
|
| +define internal i64 @_Z8testLshryy(i64 %a, i64 %b) {
|
| +entry:
|
| + %shr = lshr i64 %a, %b
|
| + ret i64 %shr
|
| +}
|
| +
|
| +define internal <4 x i32> @_Z8testLshrDv4_jS_(<4 x i32> %a, <4 x i32> %b) {
|
| +entry:
|
| + %shr = lshr <4 x i32> %a, %b
|
| + ret <4 x i32> %shr
|
| +}
|
| +
|
| +define internal <8 x i16> @_Z8testLshrDv8_tS_(<8 x i16> %a, <8 x i16> %b) {
|
| +entry:
|
| + %shr = lshr <8 x i16> %a, %b
|
| + ret <8 x i16> %shr
|
| +}
|
| +
|
| +define internal <16 x i8> @_Z8testLshrDv16_hS_(<16 x i8> %a, <16 x i8> %b) {
|
| +entry:
|
| + %shr = lshr <16 x i8> %a, %b
|
| + ret <16 x i8> %shr
|
| +}
|
| +
|
| +define internal i32 @_Z7testAndbb(i32 %a, i32 %b) {
|
| +entry:
|
| + %b.arg_trunc = trunc i32 %b to i1
|
| + %a.arg_trunc = trunc i32 %a to i1
|
| + %and5 = and i1 %a.arg_trunc, %b.arg_trunc
|
| + %and5.ret_ext = zext i1 %and5 to i32
|
| + ret i32 %and5.ret_ext
|
| +}
|
| +
|
| +define internal i32 @_Z7testAndhh(i32 %a, i32 %b) {
|
| +entry:
|
| + %b.arg_trunc = trunc i32 %b to i8
|
| + %a.arg_trunc = trunc i32 %a to i8
|
| + %and3 = and i8 %b.arg_trunc, %a.arg_trunc
|
| + %and3.ret_ext = zext i8 %and3 to i32
|
| + ret i32 %and3.ret_ext
|
| +}
|
| +
|
| +define internal i32 @_Z7testAndtt(i32 %a, i32 %b) {
|
| +entry:
|
| + %b.arg_trunc = trunc i32 %b to i16
|
| + %a.arg_trunc = trunc i32 %a to i16
|
| + %and3 = and i16 %b.arg_trunc, %a.arg_trunc
|
| + %and3.ret_ext = zext i16 %and3 to i32
|
| + ret i32 %and3.ret_ext
|
| +}
|
| +
|
| +define internal i32 @_Z7testAndjj(i32 %a, i32 %b) {
|
| +entry:
|
| + %and = and i32 %b, %a
|
| + ret i32 %and
|
| +}
|
| +
|
| +define internal i64 @_Z7testAndyy(i64 %a, i64 %b) {
|
| +entry:
|
| + %and = and i64 %b, %a
|
| + ret i64 %and
|
| +}
|
| +
|
| +define internal <4 x i32> @_Z7testAndDv4_jS_(<4 x i32> %a, <4 x i32> %b) {
|
| +entry:
|
| + %and = and <4 x i32> %b, %a
|
| + ret <4 x i32> %and
|
| +}
|
| +
|
| +define internal <8 x i16> @_Z7testAndDv8_tS_(<8 x i16> %a, <8 x i16> %b) {
|
| +entry:
|
| + %and = and <8 x i16> %b, %a
|
| + ret <8 x i16> %and
|
| +}
|
| +
|
| +define internal <16 x i8> @_Z7testAndDv16_hS_(<16 x i8> %a, <16 x i8> %b) {
|
| +entry:
|
| + %and = and <16 x i8> %b, %a
|
| + ret <16 x i8> %and
|
| +}
|
| +
|
| +define internal i32 @_Z6testOrbb(i32 %a, i32 %b) {
|
| +entry:
|
| + %b.arg_trunc = trunc i32 %b to i1
|
| + %a.arg_trunc = trunc i32 %a to i1
|
| + %or5 = or i1 %a.arg_trunc, %b.arg_trunc
|
| + %or5.ret_ext = zext i1 %or5 to i32
|
| + ret i32 %or5.ret_ext
|
| +}
|
| +
|
| +define internal i32 @_Z6testOrhh(i32 %a, i32 %b) {
|
| +entry:
|
| + %b.arg_trunc = trunc i32 %b to i8
|
| + %a.arg_trunc = trunc i32 %a to i8
|
| + %or3 = or i8 %b.arg_trunc, %a.arg_trunc
|
| + %or3.ret_ext = zext i8 %or3 to i32
|
| + ret i32 %or3.ret_ext
|
| +}
|
| +
|
| +define internal i32 @_Z6testOrtt(i32 %a, i32 %b) {
|
| +entry:
|
| + %b.arg_trunc = trunc i32 %b to i16
|
| + %a.arg_trunc = trunc i32 %a to i16
|
| + %or3 = or i16 %b.arg_trunc, %a.arg_trunc
|
| + %or3.ret_ext = zext i16 %or3 to i32
|
| + ret i32 %or3.ret_ext
|
| +}
|
| +
|
| +define internal i32 @_Z6testOrjj(i32 %a, i32 %b) {
|
| +entry:
|
| + %or = or i32 %b, %a
|
| + ret i32 %or
|
| +}
|
| +
|
| +define internal i64 @_Z6testOryy(i64 %a, i64 %b) {
|
| +entry:
|
| + %or = or i64 %b, %a
|
| + ret i64 %or
|
| +}
|
| +
|
| +define internal <4 x i32> @_Z6testOrDv4_jS_(<4 x i32> %a, <4 x i32> %b) {
|
| +entry:
|
| + %or = or <4 x i32> %b, %a
|
| + ret <4 x i32> %or
|
| +}
|
| +
|
| +define internal <8 x i16> @_Z6testOrDv8_tS_(<8 x i16> %a, <8 x i16> %b) {
|
| +entry:
|
| + %or = or <8 x i16> %b, %a
|
| + ret <8 x i16> %or
|
| +}
|
| +
|
| +define internal <16 x i8> @_Z6testOrDv16_hS_(<16 x i8> %a, <16 x i8> %b) {
|
| +entry:
|
| + %or = or <16 x i8> %b, %a
|
| + ret <16 x i8> %or
|
| +}
|
| +
|
| +define internal i32 @_Z7testXorbb(i32 %a, i32 %b) {
|
| +entry:
|
| + %b.arg_trunc = trunc i32 %b to i1
|
| + %a.arg_trunc = trunc i32 %a to i1
|
| + %xor5 = xor i1 %a.arg_trunc, %b.arg_trunc
|
| + %xor5.ret_ext = zext i1 %xor5 to i32
|
| + ret i32 %xor5.ret_ext
|
| +}
|
| +
|
| +define internal i32 @_Z7testXorhh(i32 %a, i32 %b) {
|
| +entry:
|
| + %b.arg_trunc = trunc i32 %b to i8
|
| + %a.arg_trunc = trunc i32 %a to i8
|
| + %xor3 = xor i8 %b.arg_trunc, %a.arg_trunc
|
| + %xor3.ret_ext = zext i8 %xor3 to i32
|
| + ret i32 %xor3.ret_ext
|
| +}
|
| +
|
| +define internal i32 @_Z7testXortt(i32 %a, i32 %b) {
|
| +entry:
|
| + %b.arg_trunc = trunc i32 %b to i16
|
| + %a.arg_trunc = trunc i32 %a to i16
|
| + %xor3 = xor i16 %b.arg_trunc, %a.arg_trunc
|
| + %xor3.ret_ext = zext i16 %xor3 to i32
|
| + ret i32 %xor3.ret_ext
|
| +}
|
| +
|
| +define internal i32 @_Z7testXorjj(i32 %a, i32 %b) {
|
| +entry:
|
| + %xor = xor i32 %b, %a
|
| + ret i32 %xor
|
| +}
|
| +
|
| +define internal i64 @_Z7testXoryy(i64 %a, i64 %b) {
|
| +entry:
|
| + %xor = xor i64 %b, %a
|
| + ret i64 %xor
|
| +}
|
| +
|
| +define internal <4 x i32> @_Z7testXorDv4_jS_(<4 x i32> %a, <4 x i32> %b) {
|
| +entry:
|
| + %xor = xor <4 x i32> %a, %b
|
| + ret <4 x i32> %xor
|
| +}
|
| +
|
| +define internal <8 x i16> @_Z7testXorDv8_tS_(<8 x i16> %a, <8 x i16> %b) {
|
| +entry:
|
| + %xor = xor <8 x i16> %a, %b
|
| + ret <8 x i16> %xor
|
| +}
|
| +
|
| +define internal <16 x i8> @_Z7testXorDv16_hS_(<16 x i8> %a, <16 x i8> %b) {
|
| +entry:
|
| + %xor = xor <16 x i8> %a, %b
|
| + ret <16 x i8> %xor
|
| +}
|
| +
|
| +define internal i32 @_Z8testSdivbb(i32 %a, i32 %b) {
|
| +entry:
|
| + %a.arg_trunc = trunc i32 %a to i1
|
| + %a.arg_trunc.ret_ext = zext i1 %a.arg_trunc to i32
|
| + ret i32 %a.arg_trunc.ret_ext
|
| +}
|
| +
|
| +define internal i32 @_Z8testSdivaa(i32 %a, i32 %b) {
|
| +entry:
|
| + %b.arg_trunc = trunc i32 %b to i8
|
| + %a.arg_trunc = trunc i32 %a to i8
|
| + %conv = sext i8 %a.arg_trunc to i32
|
| + %conv1 = sext i8 %b.arg_trunc to i32
|
| + %div = sdiv i32 %conv, %conv1
|
| +; %conv2 = trunc i32 %div to i8
|
| + %conv2 = sdiv i8 %a.arg_trunc, %b.arg_trunc ; INSERTED
|
| + %conv2.ret_ext = sext i8 %conv2 to i32
|
| + ret i32 %conv2.ret_ext
|
| +}
|
| +
|
| +define internal i32 @_Z8testSdivss(i32 %a, i32 %b) {
|
| +entry:
|
| + %b.arg_trunc = trunc i32 %b to i16
|
| + %a.arg_trunc = trunc i32 %a to i16
|
| + %conv = sext i16 %a.arg_trunc to i32
|
| + %conv1 = sext i16 %b.arg_trunc to i32
|
| + %div = sdiv i32 %conv, %conv1
|
| +; %conv2 = trunc i32 %div to i16
|
| + %conv2 = sdiv i16 %a.arg_trunc, %b.arg_trunc ; INSERTED
|
| + %conv2.ret_ext = sext i16 %conv2 to i32
|
| + ret i32 %conv2.ret_ext
|
| +}
|
| +
|
| +define internal i32 @_Z8testSdivii(i32 %a, i32 %b) {
|
| +entry:
|
| + %div = sdiv i32 %a, %b
|
| + ret i32 %div
|
| +}
|
| +
|
| +define internal i64 @_Z8testSdivxx(i64 %a, i64 %b) {
|
| +entry:
|
| + %div = sdiv i64 %a, %b
|
| + ret i64 %div
|
| +}
|
| +
|
| +define internal <4 x i32> @_Z8testSdivDv4_iS_(<4 x i32> %a, <4 x i32> %b) {
|
| +entry:
|
| + %div = sdiv <4 x i32> %a, %b
|
| + ret <4 x i32> %div
|
| +}
|
| +
|
| +define internal <8 x i16> @_Z8testSdivDv8_sS_(<8 x i16> %a, <8 x i16> %b) {
|
| +entry:
|
| + %div = sdiv <8 x i16> %a, %b
|
| + ret <8 x i16> %div
|
| +}
|
| +
|
| +define internal <16 x i8> @_Z8testSdivDv16_aS_(<16 x i8> %a, <16 x i8> %b) {
|
| +entry:
|
| + %div = sdiv <16 x i8> %a, %b
|
| + ret <16 x i8> %div
|
| +}
|
| +
|
| +define internal i32 @_Z8testSrembb(i32 %a, i32 %b) {
|
| +entry:
|
| + %.ret_ext = zext i1 false to i32
|
| + ret i32 %.ret_ext
|
| +}
|
| +
|
| +define internal i32 @_Z8testSremaa(i32 %a, i32 %b) {
|
| +entry:
|
| + %b.arg_trunc = trunc i32 %b to i8
|
| + %a.arg_trunc = trunc i32 %a to i8
|
| + %conv = sext i8 %a.arg_trunc to i32
|
| + %conv1 = sext i8 %b.arg_trunc to i32
|
| + %rem = srem i32 %conv, %conv1
|
| +; %conv2 = trunc i32 %rem to i8
|
| + %conv2 = srem i8 %a.arg_trunc, %b.arg_trunc ; INSERTED
|
| + %conv2.ret_ext = sext i8 %conv2 to i32
|
| + ret i32 %conv2.ret_ext
|
| +}
|
| +
|
| +define internal i32 @_Z8testSremss(i32 %a, i32 %b) {
|
| +entry:
|
| + %b.arg_trunc = trunc i32 %b to i16
|
| + %a.arg_trunc = trunc i32 %a to i16
|
| + %conv = sext i16 %a.arg_trunc to i32
|
| + %conv1 = sext i16 %b.arg_trunc to i32
|
| + %rem = srem i32 %conv, %conv1
|
| +; %conv2 = trunc i32 %rem to i16
|
| + %conv2 = srem i16 %a.arg_trunc, %b.arg_trunc ; INSERTED
|
| + %conv2.ret_ext = sext i16 %conv2 to i32
|
| + ret i32 %conv2.ret_ext
|
| +}
|
| +
|
| +define internal i32 @_Z8testSremii(i32 %a, i32 %b) {
|
| +entry:
|
| + %rem = srem i32 %a, %b
|
| + ret i32 %rem
|
| +}
|
| +
|
| +define internal i64 @_Z8testSremxx(i64 %a, i64 %b) {
|
| +entry:
|
| + %rem = srem i64 %a, %b
|
| + ret i64 %rem
|
| +}
|
| +
|
| +define internal <4 x i32> @_Z8testSremDv4_iS_(<4 x i32> %a, <4 x i32> %b) {
|
| +entry:
|
| + %rem = srem <4 x i32> %a, %b
|
| + ret <4 x i32> %rem
|
| +}
|
| +
|
| +define internal <8 x i16> @_Z8testSremDv8_sS_(<8 x i16> %a, <8 x i16> %b) {
|
| +entry:
|
| + %rem = srem <8 x i16> %a, %b
|
| + ret <8 x i16> %rem
|
| +}
|
| +
|
| +define internal <16 x i8> @_Z8testSremDv16_aS_(<16 x i8> %a, <16 x i8> %b) {
|
| +entry:
|
| + %rem = srem <16 x i8> %a, %b
|
| + ret <16 x i8> %rem
|
| +}
|
| +
|
| +define internal i32 @_Z8testAshrbb(i32 %a, i32 %b) {
|
| +entry:
|
| + %b.arg_trunc = trunc i32 %b to i1
|
| + %a.arg_trunc = trunc i32 %a to i1
|
| + %conv = zext i1 %a.arg_trunc to i32
|
| + %conv3 = zext i1 %b.arg_trunc to i32
|
| + %shr = lshr i32 %conv, %conv3
|
| + %tobool4 = icmp ne i32 %shr, 0
|
| + %tobool4.ret_ext = zext i1 %tobool4 to i32
|
| + ret i32 %tobool4.ret_ext
|
| +}
|
| +
|
| +define internal i32 @_Z8testAshraa(i32 %a, i32 %b) {
|
| +entry:
|
| + %b.arg_trunc = trunc i32 %b to i8
|
| + %a.arg_trunc = trunc i32 %a to i8
|
| + %conv = sext i8 %a.arg_trunc to i32
|
| + %conv1 = sext i8 %b.arg_trunc to i32
|
| + %shr = ashr i32 %conv, %conv1
|
| +; %conv2 = trunc i32 %shr to i8
|
| + %conv2 = ashr i8 %a.arg_trunc, %b.arg_trunc ; INSERTED
|
| + %conv2.ret_ext = sext i8 %conv2 to i32
|
| + ret i32 %conv2.ret_ext
|
| +}
|
| +
|
| +define internal i32 @_Z8testAshrss(i32 %a, i32 %b) {
|
| +entry:
|
| + %b.arg_trunc = trunc i32 %b to i16
|
| + %a.arg_trunc = trunc i32 %a to i16
|
| + %conv = sext i16 %a.arg_trunc to i32
|
| + %conv1 = sext i16 %b.arg_trunc to i32
|
| + %shr = ashr i32 %conv, %conv1
|
| +; %conv2 = trunc i32 %shr to i16
|
| + %conv2 = ashr i16 %a.arg_trunc, %b.arg_trunc ; INSERTED
|
| + %conv2.ret_ext = sext i16 %conv2 to i32
|
| + ret i32 %conv2.ret_ext
|
| +}
|
| +
|
| +define internal i32 @_Z8testAshrii(i32 %a, i32 %b) {
|
| +entry:
|
| + %shr = ashr i32 %a, %b
|
| + ret i32 %shr
|
| +}
|
| +
|
| +define internal i64 @_Z8testAshrxx(i64 %a, i64 %b) {
|
| +entry:
|
| + %shr = ashr i64 %a, %b
|
| + ret i64 %shr
|
| +}
|
| +
|
| +define internal <4 x i32> @_Z8testAshrDv4_iS_(<4 x i32> %a, <4 x i32> %b) {
|
| +entry:
|
| + %shr = ashr <4 x i32> %a, %b
|
| + ret <4 x i32> %shr
|
| +}
|
| +
|
| +define internal <8 x i16> @_Z8testAshrDv8_sS_(<8 x i16> %a, <8 x i16> %b) {
|
| +entry:
|
| + %shr = ashr <8 x i16> %a, %b
|
| + ret <8 x i16> %shr
|
| +}
|
| +
|
| +define internal <16 x i8> @_Z8testAshrDv16_aS_(<16 x i8> %a, <16 x i8> %b) {
|
| +entry:
|
| + %shr = ashr <16 x i8> %a, %b
|
| + ret <16 x i8> %shr
|
| +}
|
| +
|
| +define internal float @_Z8testFaddff(float %a, float %b) {
|
| +entry:
|
| + %add = fadd float %a, %b
|
| + ret float %add
|
| +}
|
| +
|
| +define internal double @_Z8testFadddd(double %a, double %b) {
|
| +entry:
|
| + %add = fadd double %a, %b
|
| + ret double %add
|
| +}
|
| +
|
| +define internal <4 x float> @_Z8testFaddDv4_fS_(<4 x float> %a, <4 x float> %b) {
|
| +entry:
|
| + %add = fadd <4 x float> %a, %b
|
| + ret <4 x float> %add
|
| +}
|
| +
|
| +define internal float @_Z8testFsubff(float %a, float %b) {
|
| +entry:
|
| + %sub = fsub float %a, %b
|
| + ret float %sub
|
| +}
|
| +
|
| +define internal double @_Z8testFsubdd(double %a, double %b) {
|
| +entry:
|
| + %sub = fsub double %a, %b
|
| + ret double %sub
|
| +}
|
| +
|
| +define internal <4 x float> @_Z8testFsubDv4_fS_(<4 x float> %a, <4 x float> %b) {
|
| +entry:
|
| + %sub = fsub <4 x float> %a, %b
|
| + ret <4 x float> %sub
|
| +}
|
| +
|
| +define internal float @_Z8testFmulff(float %a, float %b) {
|
| +entry:
|
| + %mul = fmul float %a, %b
|
| + ret float %mul
|
| +}
|
| +
|
| +define internal double @_Z8testFmuldd(double %a, double %b) {
|
| +entry:
|
| + %mul = fmul double %a, %b
|
| + ret double %mul
|
| +}
|
| +
|
| +define internal <4 x float> @_Z8testFmulDv4_fS_(<4 x float> %a, <4 x float> %b) {
|
| +entry:
|
| + %mul = fmul <4 x float> %a, %b
|
| + ret <4 x float> %mul
|
| +}
|
| +
|
| +define internal float @_Z8testFdivff(float %a, float %b) {
|
| +entry:
|
| + %div = fdiv float %a, %b
|
| + ret float %div
|
| +}
|
| +
|
| +define internal double @_Z8testFdivdd(double %a, double %b) {
|
| +entry:
|
| + %div = fdiv double %a, %b
|
| + ret double %div
|
| +}
|
| +
|
| +define internal <4 x float> @_Z8testFdivDv4_fS_(<4 x float> %a, <4 x float> %b) {
|
| +entry:
|
| + %div = fdiv <4 x float> %a, %b
|
| + ret <4 x float> %div
|
| +}
|
| +
|
| +define internal float @_Z8testFremff(float %a, float %b) {
|
| +entry:
|
| + %call = tail call float @_Z6myFremff(float %a, float %b)
|
| + ret float %call
|
| +}
|
| +
|
| +declare float @_Z6myFremff(float, float)
|
| +
|
| +define internal double @_Z8testFremdd(double %a, double %b) {
|
| +entry:
|
| + %call = tail call double @_Z6myFremdd(double %a, double %b)
|
| + ret double %call
|
| +}
|
| +
|
| +declare double @_Z6myFremdd(double, double)
|
| +
|
| +define internal <4 x float> @_Z8testFremDv4_fS_(<4 x float> %a, <4 x float> %b) {
|
| +entry:
|
| + %call = tail call <4 x float> @_Z6myFremDv4_fS_(<4 x float> %a, <4 x float> %b)
|
| + ret <4 x float> %call
|
| +}
|
| +
|
| +declare <4 x float> @_Z6myFremDv4_fS_(<4 x float>, <4 x float>)
|
| +
|
| +define internal i32 @_Z15testMultiplyBy0bb(i32 %a, i32) {
|
| +entry:
|
| + %.ret_ext = zext i1 false to i32
|
| + ret i32 %.ret_ext
|
| +}
|
| +
|
| +define internal i32 @_Z18testMultiplyByNeg0bb(i32 %a, i32) {
|
| +entry:
|
| + %.ret_ext = zext i1 false to i32
|
| + ret i32 %.ret_ext
|
| +}
|
| +
|
| +define internal i32 @_Z15testMultiplyBy0hh(i32 %a, i32) {
|
| +entry:
|
| + %.ret_ext = zext i8 0 to i32
|
| + ret i32 %.ret_ext
|
| +}
|
| +
|
| +define internal i32 @_Z18testMultiplyByNeg0hh(i32 %a, i32) {
|
| +entry:
|
| + %.ret_ext = zext i8 0 to i32
|
| + ret i32 %.ret_ext
|
| +}
|
| +
|
| +define internal i32 @_Z15testMultiplyBy0tt(i32 %a, i32) {
|
| +entry:
|
| + %.ret_ext = zext i16 0 to i32
|
| + ret i32 %.ret_ext
|
| +}
|
| +
|
| +define internal i32 @_Z18testMultiplyByNeg0tt(i32 %a, i32) {
|
| +entry:
|
| + %.ret_ext = zext i16 0 to i32
|
| + ret i32 %.ret_ext
|
| +}
|
| +
|
| +define internal i32 @_Z15testMultiplyBy0jj(i32 %a, i32) {
|
| +entry:
|
| + ret i32 0
|
| +}
|
| +
|
| +define internal i32 @_Z18testMultiplyByNeg0jj(i32 %a, i32) {
|
| +entry:
|
| + ret i32 0
|
| +}
|
| +
|
| +define internal i64 @_Z15testMultiplyBy0yy(i64 %a, i64) {
|
| +entry:
|
| + ret i64 0
|
| +}
|
| +
|
| +define internal i64 @_Z18testMultiplyByNeg0yy(i64 %a, i64) {
|
| +entry:
|
| + ret i64 0
|
| +}
|
| +
|
| +define internal i32 @_Z15testMultiplyBy1bb(i32 %a, i32) {
|
| +entry:
|
| + %a.arg_trunc = trunc i32 %a to i1
|
| + %a.arg_trunc.ret_ext = zext i1 %a.arg_trunc to i32
|
| + ret i32 %a.arg_trunc.ret_ext
|
| +}
|
| +
|
| +define internal i32 @_Z18testMultiplyByNeg1bb(i32 %a, i32) {
|
| +entry:
|
| + %a.arg_trunc = trunc i32 %a to i1
|
| + %a.arg_trunc.ret_ext = zext i1 %a.arg_trunc to i32
|
| + ret i32 %a.arg_trunc.ret_ext
|
| +}
|
| +
|
| +define internal i32 @_Z15testMultiplyBy1hh(i32 %a, i32) {
|
| +entry:
|
| + %a.arg_trunc = trunc i32 %a to i8
|
| + %a.arg_trunc.ret_ext = zext i8 %a.arg_trunc to i32
|
| + ret i32 %a.arg_trunc.ret_ext
|
| +}
|
| +
|
| +define internal i32 @_Z18testMultiplyByNeg1hh(i32 %a, i32) {
|
| +entry:
|
| + %a.arg_trunc = trunc i32 %a to i8
|
| + %conv = zext i8 %a.arg_trunc to i32
|
| + %mul = sub i32 0, %conv
|
| + %conv1 = trunc i32 %mul to i8
|
| + %conv1.ret_ext = zext i8 %conv1 to i32
|
| + ret i32 %conv1.ret_ext
|
| +}
|
| +
|
| +define internal i32 @_Z15testMultiplyBy1tt(i32 %a, i32) {
|
| +entry:
|
| + %a.arg_trunc = trunc i32 %a to i16
|
| + %a.arg_trunc.ret_ext = zext i16 %a.arg_trunc to i32
|
| + ret i32 %a.arg_trunc.ret_ext
|
| +}
|
| +
|
| +define internal i32 @_Z18testMultiplyByNeg1tt(i32 %a, i32) {
|
| +entry:
|
| + %a.arg_trunc = trunc i32 %a to i16
|
| + %conv = zext i16 %a.arg_trunc to i32
|
| + %mul = sub i32 0, %conv
|
| + %conv1 = trunc i32 %mul to i16
|
| + %conv1.ret_ext = zext i16 %conv1 to i32
|
| + ret i32 %conv1.ret_ext
|
| +}
|
| +
|
| +define internal i32 @_Z15testMultiplyBy1jj(i32 %a, i32) {
|
| +entry:
|
| + ret i32 %a
|
| +}
|
| +
|
| +define internal i32 @_Z18testMultiplyByNeg1jj(i32 %a, i32) {
|
| +entry:
|
| + %mul = sub i32 0, %a
|
| + ret i32 %mul
|
| +}
|
| +
|
| +define internal i64 @_Z15testMultiplyBy1yy(i64 %a, i64) {
|
| +entry:
|
| + ret i64 %a
|
| +}
|
| +
|
| +define internal i64 @_Z18testMultiplyByNeg1yy(i64 %a, i64) {
|
| +entry:
|
| + %mul = sub i64 0, %a
|
| + ret i64 %mul
|
| +}
|
| +
|
| +define internal i32 @_Z15testMultiplyBy2bb(i32 %a, i32) {
|
| +entry:
|
| + %a.arg_trunc = trunc i32 %a to i1
|
| + %a.arg_trunc.ret_ext = zext i1 %a.arg_trunc to i32
|
| + ret i32 %a.arg_trunc.ret_ext
|
| +}
|
| +
|
| +define internal i32 @_Z18testMultiplyByNeg2bb(i32 %a, i32) {
|
| +entry:
|
| + %a.arg_trunc = trunc i32 %a to i1
|
| + %a.arg_trunc.ret_ext = zext i1 %a.arg_trunc to i32
|
| + ret i32 %a.arg_trunc.ret_ext
|
| +}
|
| +
|
| +define internal i32 @_Z15testMultiplyBy2hh(i32 %a, i32) {
|
| +entry:
|
| + %a.arg_trunc = trunc i32 %a to i8
|
| + %conv = zext i8 %a.arg_trunc to i32
|
| + %mul = shl i32 %conv, 1
|
| + %conv1 = trunc i32 %mul to i8
|
| + %conv1.ret_ext = zext i8 %conv1 to i32
|
| + ret i32 %conv1.ret_ext
|
| +}
|
| +
|
| +define internal i32 @_Z18testMultiplyByNeg2hh(i32 %a, i32) {
|
| +entry:
|
| + %a.arg_trunc = trunc i32 %a to i8
|
| + %conv = zext i8 %a.arg_trunc to i32
|
| + %mul = mul i32 %conv, -2
|
| + %conv1 = trunc i32 %mul to i8
|
| + %conv1.ret_ext = zext i8 %conv1 to i32
|
| + ret i32 %conv1.ret_ext
|
| +}
|
| +
|
| +define internal i32 @_Z15testMultiplyBy2tt(i32 %a, i32) {
|
| +entry:
|
| + %a.arg_trunc = trunc i32 %a to i16
|
| + %conv = zext i16 %a.arg_trunc to i32
|
| + %mul = shl i32 %conv, 1
|
| + %conv1 = trunc i32 %mul to i16
|
| + %conv1.ret_ext = zext i16 %conv1 to i32
|
| + ret i32 %conv1.ret_ext
|
| +}
|
| +
|
| +define internal i32 @_Z18testMultiplyByNeg2tt(i32 %a, i32) {
|
| +entry:
|
| + %a.arg_trunc = trunc i32 %a to i16
|
| + %conv = zext i16 %a.arg_trunc to i32
|
| + %mul = mul i32 %conv, -2
|
| + %conv1 = trunc i32 %mul to i16
|
| + %conv1.ret_ext = zext i16 %conv1 to i32
|
| + ret i32 %conv1.ret_ext
|
| +}
|
| +
|
| +define internal i32 @_Z15testMultiplyBy2jj(i32 %a, i32) {
|
| +entry:
|
| + %mul = shl i32 %a, 1
|
| + ret i32 %mul
|
| +}
|
| +
|
| +define internal i32 @_Z18testMultiplyByNeg2jj(i32 %a, i32) {
|
| +entry:
|
| + %mul = mul i32 %a, -2
|
| + ret i32 %mul
|
| +}
|
| +
|
| +define internal i64 @_Z15testMultiplyBy2yy(i64 %a, i64) {
|
| +entry:
|
| + %mul = shl i64 %a, 1
|
| + ret i64 %mul
|
| +}
|
| +
|
| +define internal i64 @_Z18testMultiplyByNeg2yy(i64 %a, i64) {
|
| +entry:
|
| + %mul = mul i64 %a, -2
|
| + ret i64 %mul
|
| +}
|
| +
|
| +define internal i32 @_Z15testMultiplyBy3bb(i32 %a, i32) {
|
| +entry:
|
| + %a.arg_trunc = trunc i32 %a to i1
|
| + %a.arg_trunc.ret_ext = zext i1 %a.arg_trunc to i32
|
| + ret i32 %a.arg_trunc.ret_ext
|
| +}
|
| +
|
| +define internal i32 @_Z18testMultiplyByNeg3bb(i32 %a, i32) {
|
| +entry:
|
| + %a.arg_trunc = trunc i32 %a to i1
|
| + %a.arg_trunc.ret_ext = zext i1 %a.arg_trunc to i32
|
| + ret i32 %a.arg_trunc.ret_ext
|
| +}
|
| +
|
| +define internal i32 @_Z15testMultiplyBy3hh(i32 %a, i32) {
|
| +entry:
|
| + %a.arg_trunc = trunc i32 %a to i8
|
| + %conv = zext i8 %a.arg_trunc to i32
|
| + %mul = mul i32 %conv, 3
|
| + %conv1 = trunc i32 %mul to i8
|
| + %conv1.ret_ext = zext i8 %conv1 to i32
|
| + ret i32 %conv1.ret_ext
|
| +}
|
| +
|
| +define internal i32 @_Z18testMultiplyByNeg3hh(i32 %a, i32) {
|
| +entry:
|
| + %a.arg_trunc = trunc i32 %a to i8
|
| + %conv = zext i8 %a.arg_trunc to i32
|
| + %mul = mul i32 %conv, -3
|
| + %conv1 = trunc i32 %mul to i8
|
| + %conv1.ret_ext = zext i8 %conv1 to i32
|
| + ret i32 %conv1.ret_ext
|
| +}
|
| +
|
| +define internal i32 @_Z15testMultiplyBy3tt(i32 %a, i32) {
|
| +entry:
|
| + %a.arg_trunc = trunc i32 %a to i16
|
| + %conv = zext i16 %a.arg_trunc to i32
|
| + %mul = mul i32 %conv, 3
|
| + %conv1 = trunc i32 %mul to i16
|
| + %conv1.ret_ext = zext i16 %conv1 to i32
|
| + ret i32 %conv1.ret_ext
|
| +}
|
| +
|
| +define internal i32 @_Z18testMultiplyByNeg3tt(i32 %a, i32) {
|
| +entry:
|
| + %a.arg_trunc = trunc i32 %a to i16
|
| + %conv = zext i16 %a.arg_trunc to i32
|
| + %mul = mul i32 %conv, -3
|
| + %conv1 = trunc i32 %mul to i16
|
| + %conv1.ret_ext = zext i16 %conv1 to i32
|
| + ret i32 %conv1.ret_ext
|
| +}
|
| +
|
| +define internal i32 @_Z15testMultiplyBy3jj(i32 %a, i32) {
|
| +entry:
|
| + %mul = mul i32 %a, 3
|
| + ret i32 %mul
|
| +}
|
| +
|
| +define internal i32 @_Z18testMultiplyByNeg3jj(i32 %a, i32) {
|
| +entry:
|
| + %mul = mul i32 %a, -3
|
| + ret i32 %mul
|
| +}
|
| +
|
| +define internal i64 @_Z15testMultiplyBy3yy(i64 %a, i64) {
|
| +entry:
|
| + %mul = mul i64 %a, 3
|
| + ret i64 %mul
|
| +}
|
| +
|
| +define internal i64 @_Z18testMultiplyByNeg3yy(i64 %a, i64) {
|
| +entry:
|
| + %mul = mul i64 %a, -3
|
| + ret i64 %mul
|
| +}
|
| +
|
| +define internal i32 @_Z15testMultiplyBy4bb(i32 %a, i32) {
|
| +entry:
|
| + %a.arg_trunc = trunc i32 %a to i1
|
| + %a.arg_trunc.ret_ext = zext i1 %a.arg_trunc to i32
|
| + ret i32 %a.arg_trunc.ret_ext
|
| +}
|
| +
|
| +define internal i32 @_Z18testMultiplyByNeg4bb(i32 %a, i32) {
|
| +entry:
|
| + %a.arg_trunc = trunc i32 %a to i1
|
| + %a.arg_trunc.ret_ext = zext i1 %a.arg_trunc to i32
|
| + ret i32 %a.arg_trunc.ret_ext
|
| +}
|
| +
|
| +define internal i32 @_Z15testMultiplyBy4hh(i32 %a, i32) {
|
| +entry:
|
| + %a.arg_trunc = trunc i32 %a to i8
|
| + %conv = zext i8 %a.arg_trunc to i32
|
| + %mul = shl i32 %conv, 2
|
| + %conv1 = trunc i32 %mul to i8
|
| + %conv1.ret_ext = zext i8 %conv1 to i32
|
| + ret i32 %conv1.ret_ext
|
| +}
|
| +
|
| +define internal i32 @_Z18testMultiplyByNeg4hh(i32 %a, i32) {
|
| +entry:
|
| + %a.arg_trunc = trunc i32 %a to i8
|
| + %conv = zext i8 %a.arg_trunc to i32
|
| + %mul = mul i32 %conv, -4
|
| + %conv1 = trunc i32 %mul to i8
|
| + %conv1.ret_ext = zext i8 %conv1 to i32
|
| + ret i32 %conv1.ret_ext
|
| +}
|
| +
|
| +define internal i32 @_Z15testMultiplyBy4tt(i32 %a, i32) {
|
| +entry:
|
| + %a.arg_trunc = trunc i32 %a to i16
|
| + %conv = zext i16 %a.arg_trunc to i32
|
| + %mul = shl i32 %conv, 2
|
| + %conv1 = trunc i32 %mul to i16
|
| + %conv1.ret_ext = zext i16 %conv1 to i32
|
| + ret i32 %conv1.ret_ext
|
| +}
|
| +
|
| +define internal i32 @_Z18testMultiplyByNeg4tt(i32 %a, i32) {
|
| +entry:
|
| + %a.arg_trunc = trunc i32 %a to i16
|
| + %conv = zext i16 %a.arg_trunc to i32
|
| + %mul = mul i32 %conv, -4
|
| + %conv1 = trunc i32 %mul to i16
|
| + %conv1.ret_ext = zext i16 %conv1 to i32
|
| + ret i32 %conv1.ret_ext
|
| +}
|
| +
|
| +define internal i32 @_Z15testMultiplyBy4jj(i32 %a, i32) {
|
| +entry:
|
| + %mul = shl i32 %a, 2
|
| + ret i32 %mul
|
| +}
|
| +
|
| +define internal i32 @_Z18testMultiplyByNeg4jj(i32 %a, i32) {
|
| +entry:
|
| + %mul = mul i32 %a, -4
|
| + ret i32 %mul
|
| +}
|
| +
|
| +define internal i64 @_Z15testMultiplyBy4yy(i64 %a, i64) {
|
| +entry:
|
| + %mul = shl i64 %a, 2
|
| + ret i64 %mul
|
| +}
|
| +
|
| +define internal i64 @_Z18testMultiplyByNeg4yy(i64 %a, i64) {
|
| +entry:
|
| + %mul = mul i64 %a, -4
|
| + ret i64 %mul
|
| +}
|
| +
|
| +define internal i32 @_Z15testMultiplyBy5bb(i32 %a, i32) {
|
| +entry:
|
| + %a.arg_trunc = trunc i32 %a to i1
|
| + %a.arg_trunc.ret_ext = zext i1 %a.arg_trunc to i32
|
| + ret i32 %a.arg_trunc.ret_ext
|
| +}
|
| +
|
| +define internal i32 @_Z18testMultiplyByNeg5bb(i32 %a, i32) {
|
| +entry:
|
| + %a.arg_trunc = trunc i32 %a to i1
|
| + %a.arg_trunc.ret_ext = zext i1 %a.arg_trunc to i32
|
| + ret i32 %a.arg_trunc.ret_ext
|
| +}
|
| +
|
| +define internal i32 @_Z15testMultiplyBy5hh(i32 %a, i32) {
|
| +entry:
|
| + %a.arg_trunc = trunc i32 %a to i8
|
| + %conv = zext i8 %a.arg_trunc to i32
|
| + %mul = mul i32 %conv, 5
|
| + %conv1 = trunc i32 %mul to i8
|
| + %conv1.ret_ext = zext i8 %conv1 to i32
|
| + ret i32 %conv1.ret_ext
|
| +}
|
| +
|
| +define internal i32 @_Z18testMultiplyByNeg5hh(i32 %a, i32) {
|
| +entry:
|
| + %a.arg_trunc = trunc i32 %a to i8
|
| + %conv = zext i8 %a.arg_trunc to i32
|
| + %mul = mul i32 %conv, -5
|
| + %conv1 = trunc i32 %mul to i8
|
| + %conv1.ret_ext = zext i8 %conv1 to i32
|
| + ret i32 %conv1.ret_ext
|
| +}
|
| +
|
| +define internal i32 @_Z15testMultiplyBy5tt(i32 %a, i32) {
|
| +entry:
|
| + %a.arg_trunc = trunc i32 %a to i16
|
| + %conv = zext i16 %a.arg_trunc to i32
|
| + %mul = mul i32 %conv, 5
|
| + %conv1 = trunc i32 %mul to i16
|
| + %conv1.ret_ext = zext i16 %conv1 to i32
|
| + ret i32 %conv1.ret_ext
|
| +}
|
| +
|
| +define internal i32 @_Z18testMultiplyByNeg5tt(i32 %a, i32) {
|
| +entry:
|
| + %a.arg_trunc = trunc i32 %a to i16
|
| + %conv = zext i16 %a.arg_trunc to i32
|
| + %mul = mul i32 %conv, -5
|
| + %conv1 = trunc i32 %mul to i16
|
| + %conv1.ret_ext = zext i16 %conv1 to i32
|
| + ret i32 %conv1.ret_ext
|
| +}
|
| +
|
| +define internal i32 @_Z15testMultiplyBy5jj(i32 %a, i32) {
|
| +entry:
|
| + %mul = mul i32 %a, 5
|
| + ret i32 %mul
|
| +}
|
| +
|
| +define internal i32 @_Z18testMultiplyByNeg5jj(i32 %a, i32) {
|
| +entry:
|
| + %mul = mul i32 %a, -5
|
| + ret i32 %mul
|
| +}
|
| +
|
| +define internal i64 @_Z15testMultiplyBy5yy(i64 %a, i64) {
|
| +entry:
|
| + %mul = mul i64 %a, 5
|
| + ret i64 %mul
|
| +}
|
| +
|
| +define internal i64 @_Z18testMultiplyByNeg5yy(i64 %a, i64) {
|
| +entry:
|
| + %mul = mul i64 %a, -5
|
| + ret i64 %mul
|
| +}
|
| +
|
| +define internal i32 @_Z15testMultiplyBy7bb(i32 %a, i32) {
|
| +entry:
|
| + %a.arg_trunc = trunc i32 %a to i1
|
| + %a.arg_trunc.ret_ext = zext i1 %a.arg_trunc to i32
|
| + ret i32 %a.arg_trunc.ret_ext
|
| +}
|
| +
|
| +define internal i32 @_Z18testMultiplyByNeg7bb(i32 %a, i32) {
|
| +entry:
|
| + %a.arg_trunc = trunc i32 %a to i1
|
| + %a.arg_trunc.ret_ext = zext i1 %a.arg_trunc to i32
|
| + ret i32 %a.arg_trunc.ret_ext
|
| +}
|
| +
|
| +define internal i32 @_Z15testMultiplyBy7hh(i32 %a, i32) {
|
| +entry:
|
| + %a.arg_trunc = trunc i32 %a to i8
|
| + %conv = zext i8 %a.arg_trunc to i32
|
| + %mul = mul i32 %conv, 7
|
| + %conv1 = trunc i32 %mul to i8
|
| + %conv1.ret_ext = zext i8 %conv1 to i32
|
| + ret i32 %conv1.ret_ext
|
| +}
|
| +
|
| +define internal i32 @_Z18testMultiplyByNeg7hh(i32 %a, i32) {
|
| +entry:
|
| + %a.arg_trunc = trunc i32 %a to i8
|
| + %conv = zext i8 %a.arg_trunc to i32
|
| + %mul = mul i32 %conv, -7
|
| + %conv1 = trunc i32 %mul to i8
|
| + %conv1.ret_ext = zext i8 %conv1 to i32
|
| + ret i32 %conv1.ret_ext
|
| +}
|
| +
|
| +define internal i32 @_Z15testMultiplyBy7tt(i32 %a, i32) {
|
| +entry:
|
| + %a.arg_trunc = trunc i32 %a to i16
|
| + %conv = zext i16 %a.arg_trunc to i32
|
| + %mul = mul i32 %conv, 7
|
| + %conv1 = trunc i32 %mul to i16
|
| + %conv1.ret_ext = zext i16 %conv1 to i32
|
| + ret i32 %conv1.ret_ext
|
| +}
|
| +
|
| +define internal i32 @_Z18testMultiplyByNeg7tt(i32 %a, i32) {
|
| +entry:
|
| + %a.arg_trunc = trunc i32 %a to i16
|
| + %conv = zext i16 %a.arg_trunc to i32
|
| + %mul = mul i32 %conv, -7
|
| + %conv1 = trunc i32 %mul to i16
|
| + %conv1.ret_ext = zext i16 %conv1 to i32
|
| + ret i32 %conv1.ret_ext
|
| +}
|
| +
|
| +define internal i32 @_Z15testMultiplyBy7jj(i32 %a, i32) {
|
| +entry:
|
| + %mul = mul i32 %a, 7
|
| + ret i32 %mul
|
| +}
|
| +
|
| +define internal i32 @_Z18testMultiplyByNeg7jj(i32 %a, i32) {
|
| +entry:
|
| + %mul = mul i32 %a, -7
|
| + ret i32 %mul
|
| +}
|
| +
|
| +define internal i64 @_Z15testMultiplyBy7yy(i64 %a, i64) {
|
| +entry:
|
| + %mul = mul i64 %a, 7
|
| + ret i64 %mul
|
| +}
|
| +
|
| +define internal i64 @_Z18testMultiplyByNeg7yy(i64 %a, i64) {
|
| +entry:
|
| + %mul = mul i64 %a, -7
|
| + ret i64 %mul
|
| +}
|
| +
|
| +define internal i32 @_Z15testMultiplyBy8bb(i32 %a, i32) {
|
| +entry:
|
| + %a.arg_trunc = trunc i32 %a to i1
|
| + %a.arg_trunc.ret_ext = zext i1 %a.arg_trunc to i32
|
| + ret i32 %a.arg_trunc.ret_ext
|
| +}
|
| +
|
| +define internal i32 @_Z18testMultiplyByNeg8bb(i32 %a, i32) {
|
| +entry:
|
| + %a.arg_trunc = trunc i32 %a to i1
|
| + %a.arg_trunc.ret_ext = zext i1 %a.arg_trunc to i32
|
| + ret i32 %a.arg_trunc.ret_ext
|
| +}
|
| +
|
| +define internal i32 @_Z15testMultiplyBy8hh(i32 %a, i32) {
|
| +entry:
|
| + %a.arg_trunc = trunc i32 %a to i8
|
| + %conv = zext i8 %a.arg_trunc to i32
|
| + %mul = shl i32 %conv, 3
|
| + %conv1 = trunc i32 %mul to i8
|
| + %conv1.ret_ext = zext i8 %conv1 to i32
|
| + ret i32 %conv1.ret_ext
|
| +}
|
| +
|
| +define internal i32 @_Z18testMultiplyByNeg8hh(i32 %a, i32) {
|
| +entry:
|
| + %a.arg_trunc = trunc i32 %a to i8
|
| + %conv = zext i8 %a.arg_trunc to i32
|
| + %mul = mul i32 %conv, -8
|
| + %conv1 = trunc i32 %mul to i8
|
| + %conv1.ret_ext = zext i8 %conv1 to i32
|
| + ret i32 %conv1.ret_ext
|
| +}
|
| +
|
| +define internal i32 @_Z15testMultiplyBy8tt(i32 %a, i32) {
|
| +entry:
|
| + %a.arg_trunc = trunc i32 %a to i16
|
| + %conv = zext i16 %a.arg_trunc to i32
|
| + %mul = shl i32 %conv, 3
|
| + %conv1 = trunc i32 %mul to i16
|
| + %conv1.ret_ext = zext i16 %conv1 to i32
|
| + ret i32 %conv1.ret_ext
|
| +}
|
| +
|
| +define internal i32 @_Z18testMultiplyByNeg8tt(i32 %a, i32) {
|
| +entry:
|
| + %a.arg_trunc = trunc i32 %a to i16
|
| + %conv = zext i16 %a.arg_trunc to i32
|
| + %mul = mul i32 %conv, -8
|
| + %conv1 = trunc i32 %mul to i16
|
| + %conv1.ret_ext = zext i16 %conv1 to i32
|
| + ret i32 %conv1.ret_ext
|
| +}
|
| +
|
| +define internal i32 @_Z15testMultiplyBy8jj(i32 %a, i32) {
|
| +entry:
|
| + %mul = shl i32 %a, 3
|
| + ret i32 %mul
|
| +}
|
| +
|
| +define internal i32 @_Z18testMultiplyByNeg8jj(i32 %a, i32) {
|
| +entry:
|
| + %mul = mul i32 %a, -8
|
| + ret i32 %mul
|
| +}
|
| +
|
| +define internal i64 @_Z15testMultiplyBy8yy(i64 %a, i64) {
|
| +entry:
|
| + %mul = shl i64 %a, 3
|
| + ret i64 %mul
|
| +}
|
| +
|
| +define internal i64 @_Z18testMultiplyByNeg8yy(i64 %a, i64) {
|
| +entry:
|
| + %mul = mul i64 %a, -8
|
| + ret i64 %mul
|
| +}
|
| +
|
| +define internal i32 @_Z15testMultiplyBy9bb(i32 %a, i32) {
|
| +entry:
|
| + %a.arg_trunc = trunc i32 %a to i1
|
| + %a.arg_trunc.ret_ext = zext i1 %a.arg_trunc to i32
|
| + ret i32 %a.arg_trunc.ret_ext
|
| +}
|
| +
|
| +define internal i32 @_Z18testMultiplyByNeg9bb(i32 %a, i32) {
|
| +entry:
|
| + %a.arg_trunc = trunc i32 %a to i1
|
| + %a.arg_trunc.ret_ext = zext i1 %a.arg_trunc to i32
|
| + ret i32 %a.arg_trunc.ret_ext
|
| +}
|
| +
|
| +define internal i32 @_Z15testMultiplyBy9hh(i32 %a, i32) {
|
| +entry:
|
| + %a.arg_trunc = trunc i32 %a to i8
|
| + %conv = zext i8 %a.arg_trunc to i32
|
| + %mul = mul i32 %conv, 9
|
| + %conv1 = trunc i32 %mul to i8
|
| + %conv1.ret_ext = zext i8 %conv1 to i32
|
| + ret i32 %conv1.ret_ext
|
| +}
|
| +
|
| +define internal i32 @_Z18testMultiplyByNeg9hh(i32 %a, i32) {
|
| +entry:
|
| + %a.arg_trunc = trunc i32 %a to i8
|
| + %conv = zext i8 %a.arg_trunc to i32
|
| + %mul = mul i32 %conv, -9
|
| + %conv1 = trunc i32 %mul to i8
|
| + %conv1.ret_ext = zext i8 %conv1 to i32
|
| + ret i32 %conv1.ret_ext
|
| +}
|
| +
|
| +define internal i32 @_Z15testMultiplyBy9tt(i32 %a, i32) {
|
| +entry:
|
| + %a.arg_trunc = trunc i32 %a to i16
|
| + %conv = zext i16 %a.arg_trunc to i32
|
| + %mul = mul i32 %conv, 9
|
| + %conv1 = trunc i32 %mul to i16
|
| + %conv1.ret_ext = zext i16 %conv1 to i32
|
| + ret i32 %conv1.ret_ext
|
| +}
|
| +
|
| +define internal i32 @_Z18testMultiplyByNeg9tt(i32 %a, i32) {
|
| +entry:
|
| + %a.arg_trunc = trunc i32 %a to i16
|
| + %conv = zext i16 %a.arg_trunc to i32
|
| + %mul = mul i32 %conv, -9
|
| + %conv1 = trunc i32 %mul to i16
|
| + %conv1.ret_ext = zext i16 %conv1 to i32
|
| + ret i32 %conv1.ret_ext
|
| +}
|
| +
|
| +define internal i32 @_Z15testMultiplyBy9jj(i32 %a, i32) {
|
| +entry:
|
| + %mul = mul i32 %a, 9
|
| + ret i32 %mul
|
| +}
|
| +
|
| +define internal i32 @_Z18testMultiplyByNeg9jj(i32 %a, i32) {
|
| +entry:
|
| + %mul = mul i32 %a, -9
|
| + ret i32 %mul
|
| +}
|
| +
|
| +define internal i64 @_Z15testMultiplyBy9yy(i64 %a, i64) {
|
| +entry:
|
| + %mul = mul i64 %a, 9
|
| + ret i64 %mul
|
| +}
|
| +
|
| +define internal i64 @_Z18testMultiplyByNeg9yy(i64 %a, i64) {
|
| +entry:
|
| + %mul = mul i64 %a, -9
|
| + ret i64 %mul
|
| +}
|
| +
|
| +define internal i32 @_Z16testMultiplyBy10bb(i32 %a, i32) {
|
| +entry:
|
| + %a.arg_trunc = trunc i32 %a to i1
|
| + %a.arg_trunc.ret_ext = zext i1 %a.arg_trunc to i32
|
| + ret i32 %a.arg_trunc.ret_ext
|
| +}
|
| +
|
| +define internal i32 @_Z19testMultiplyByNeg10bb(i32 %a, i32) {
|
| +entry:
|
| + %a.arg_trunc = trunc i32 %a to i1
|
| + %a.arg_trunc.ret_ext = zext i1 %a.arg_trunc to i32
|
| + ret i32 %a.arg_trunc.ret_ext
|
| +}
|
| +
|
| +define internal i32 @_Z16testMultiplyBy10hh(i32 %a, i32) {
|
| +entry:
|
| + %a.arg_trunc = trunc i32 %a to i8
|
| + %conv = zext i8 %a.arg_trunc to i32
|
| + %mul = mul i32 %conv, 10
|
| + %conv1 = trunc i32 %mul to i8
|
| + %conv1.ret_ext = zext i8 %conv1 to i32
|
| + ret i32 %conv1.ret_ext
|
| +}
|
| +
|
| +define internal i32 @_Z19testMultiplyByNeg10hh(i32 %a, i32) {
|
| +entry:
|
| + %a.arg_trunc = trunc i32 %a to i8
|
| + %conv = zext i8 %a.arg_trunc to i32
|
| + %mul = mul i32 %conv, -10
|
| + %conv1 = trunc i32 %mul to i8
|
| + %conv1.ret_ext = zext i8 %conv1 to i32
|
| + ret i32 %conv1.ret_ext
|
| +}
|
| +
|
| +define internal i32 @_Z16testMultiplyBy10tt(i32 %a, i32) {
|
| +entry:
|
| + %a.arg_trunc = trunc i32 %a to i16
|
| + %conv = zext i16 %a.arg_trunc to i32
|
| + %mul = mul i32 %conv, 10
|
| + %conv1 = trunc i32 %mul to i16
|
| + %conv1.ret_ext = zext i16 %conv1 to i32
|
| + ret i32 %conv1.ret_ext
|
| +}
|
| +
|
| +define internal i32 @_Z19testMultiplyByNeg10tt(i32 %a, i32) {
|
| +entry:
|
| + %a.arg_trunc = trunc i32 %a to i16
|
| + %conv = zext i16 %a.arg_trunc to i32
|
| + %mul = mul i32 %conv, -10
|
| + %conv1 = trunc i32 %mul to i16
|
| + %conv1.ret_ext = zext i16 %conv1 to i32
|
| + ret i32 %conv1.ret_ext
|
| +}
|
| +
|
| +define internal i32 @_Z16testMultiplyBy10jj(i32 %a, i32) {
|
| +entry:
|
| + %mul = mul i32 %a, 10
|
| + ret i32 %mul
|
| +}
|
| +
|
| +define internal i32 @_Z19testMultiplyByNeg10jj(i32 %a, i32) {
|
| +entry:
|
| + %mul = mul i32 %a, -10
|
| + ret i32 %mul
|
| +}
|
| +
|
| +define internal i64 @_Z16testMultiplyBy10yy(i64 %a, i64) {
|
| +entry:
|
| + %mul = mul i64 %a, 10
|
| + ret i64 %mul
|
| +}
|
| +
|
| +define internal i64 @_Z19testMultiplyByNeg10yy(i64 %a, i64) {
|
| +entry:
|
| + %mul = mul i64 %a, -10
|
| + ret i64 %mul
|
| +}
|
| +
|
| +define internal i32 @_Z16testMultiplyBy25bb(i32 %a, i32) {
|
| +entry:
|
| + %a.arg_trunc = trunc i32 %a to i1
|
| + %a.arg_trunc.ret_ext = zext i1 %a.arg_trunc to i32
|
| + ret i32 %a.arg_trunc.ret_ext
|
| +}
|
| +
|
| +define internal i32 @_Z19testMultiplyByNeg25bb(i32 %a, i32) {
|
| +entry:
|
| + %a.arg_trunc = trunc i32 %a to i1
|
| + %a.arg_trunc.ret_ext = zext i1 %a.arg_trunc to i32
|
| + ret i32 %a.arg_trunc.ret_ext
|
| +}
|
| +
|
| +define internal i32 @_Z16testMultiplyBy25hh(i32 %a, i32) {
|
| +entry:
|
| + %a.arg_trunc = trunc i32 %a to i8
|
| + %conv = zext i8 %a.arg_trunc to i32
|
| + %mul = mul i32 %conv, 25
|
| + %conv1 = trunc i32 %mul to i8
|
| + %conv1.ret_ext = zext i8 %conv1 to i32
|
| + ret i32 %conv1.ret_ext
|
| +}
|
| +
|
| +define internal i32 @_Z19testMultiplyByNeg25hh(i32 %a, i32) {
|
| +entry:
|
| + %a.arg_trunc = trunc i32 %a to i8
|
| + %conv = zext i8 %a.arg_trunc to i32
|
| + %mul = mul i32 %conv, -25
|
| + %conv1 = trunc i32 %mul to i8
|
| + %conv1.ret_ext = zext i8 %conv1 to i32
|
| + ret i32 %conv1.ret_ext
|
| +}
|
| +
|
| +define internal i32 @_Z16testMultiplyBy25tt(i32 %a, i32) {
|
| +entry:
|
| + %a.arg_trunc = trunc i32 %a to i16
|
| + %conv = zext i16 %a.arg_trunc to i32
|
| + %mul = mul i32 %conv, 25
|
| + %conv1 = trunc i32 %mul to i16
|
| + %conv1.ret_ext = zext i16 %conv1 to i32
|
| + ret i32 %conv1.ret_ext
|
| +}
|
| +
|
| +define internal i32 @_Z19testMultiplyByNeg25tt(i32 %a, i32) {
|
| +entry:
|
| + %a.arg_trunc = trunc i32 %a to i16
|
| + %conv = zext i16 %a.arg_trunc to i32
|
| + %mul = mul i32 %conv, -25
|
| + %conv1 = trunc i32 %mul to i16
|
| + %conv1.ret_ext = zext i16 %conv1 to i32
|
| + ret i32 %conv1.ret_ext
|
| +}
|
| +
|
| +define internal i32 @_Z16testMultiplyBy25jj(i32 %a, i32) {
|
| +entry:
|
| + %mul = mul i32 %a, 25
|
| + ret i32 %mul
|
| +}
|
| +
|
| +define internal i32 @_Z19testMultiplyByNeg25jj(i32 %a, i32) {
|
| +entry:
|
| + %mul = mul i32 %a, -25
|
| + ret i32 %mul
|
| +}
|
| +
|
| +define internal i64 @_Z16testMultiplyBy25yy(i64 %a, i64) {
|
| +entry:
|
| + %mul = mul i64 %a, 25
|
| + ret i64 %mul
|
| +}
|
| +
|
| +define internal i64 @_Z19testMultiplyByNeg25yy(i64 %a, i64) {
|
| +entry:
|
| + %mul = mul i64 %a, -25
|
| + ret i64 %mul
|
| +}
|
| +
|
| +define internal i32 @_Z17testMultiplyBy100bb(i32 %a, i32) {
|
| +entry:
|
| + %a.arg_trunc = trunc i32 %a to i1
|
| + %a.arg_trunc.ret_ext = zext i1 %a.arg_trunc to i32
|
| + ret i32 %a.arg_trunc.ret_ext
|
| +}
|
| +
|
| +define internal i32 @_Z20testMultiplyByNeg100bb(i32 %a, i32) {
|
| +entry:
|
| + %a.arg_trunc = trunc i32 %a to i1
|
| + %a.arg_trunc.ret_ext = zext i1 %a.arg_trunc to i32
|
| + ret i32 %a.arg_trunc.ret_ext
|
| +}
|
| +
|
| +define internal i32 @_Z17testMultiplyBy100hh(i32 %a, i32) {
|
| +entry:
|
| + %a.arg_trunc = trunc i32 %a to i8
|
| + %conv = zext i8 %a.arg_trunc to i32
|
| + %mul = mul i32 %conv, 100
|
| + %conv1 = trunc i32 %mul to i8
|
| + %conv1.ret_ext = zext i8 %conv1 to i32
|
| + ret i32 %conv1.ret_ext
|
| +}
|
| +
|
| +define internal i32 @_Z20testMultiplyByNeg100hh(i32 %a, i32) {
|
| +entry:
|
| + %a.arg_trunc = trunc i32 %a to i8
|
| + %conv = zext i8 %a.arg_trunc to i32
|
| + %mul = mul i32 %conv, -100
|
| + %conv1 = trunc i32 %mul to i8
|
| + %conv1.ret_ext = zext i8 %conv1 to i32
|
| + ret i32 %conv1.ret_ext
|
| +}
|
| +
|
| +define internal i32 @_Z17testMultiplyBy100tt(i32 %a, i32) {
|
| +entry:
|
| + %a.arg_trunc = trunc i32 %a to i16
|
| + %conv = zext i16 %a.arg_trunc to i32
|
| + %mul = mul i32 %conv, 100
|
| + %conv1 = trunc i32 %mul to i16
|
| + %conv1.ret_ext = zext i16 %conv1 to i32
|
| + ret i32 %conv1.ret_ext
|
| +}
|
| +
|
| +define internal i32 @_Z20testMultiplyByNeg100tt(i32 %a, i32) {
|
| +entry:
|
| + %a.arg_trunc = trunc i32 %a to i16
|
| + %conv = zext i16 %a.arg_trunc to i32
|
| + %mul = mul i32 %conv, -100
|
| + %conv1 = trunc i32 %mul to i16
|
| + %conv1.ret_ext = zext i16 %conv1 to i32
|
| + ret i32 %conv1.ret_ext
|
| +}
|
| +
|
| +define internal i32 @_Z17testMultiplyBy100jj(i32 %a, i32) {
|
| +entry:
|
| + %mul = mul i32 %a, 100
|
| + ret i32 %mul
|
| +}
|
| +
|
| +define internal i32 @_Z20testMultiplyByNeg100jj(i32 %a, i32) {
|
| +entry:
|
| + %mul = mul i32 %a, -100
|
| + ret i32 %mul
|
| +}
|
| +
|
| +define internal i64 @_Z17testMultiplyBy100yy(i64 %a, i64) {
|
| +entry:
|
| + %mul = mul i64 %a, 100
|
| + ret i64 %mul
|
| +}
|
| +
|
| +define internal i64 @_Z20testMultiplyByNeg100yy(i64 %a, i64) {
|
| +entry:
|
| + %mul = mul i64 %a, -100
|
| + ret i64 %mul
|
| +}
|
| +
|
| +define internal i32 @_Z17testMultiplyBy232bb(i32 %a, i32) {
|
| +entry:
|
| + %a.arg_trunc = trunc i32 %a to i1
|
| + %a.arg_trunc.ret_ext = zext i1 %a.arg_trunc to i32
|
| + ret i32 %a.arg_trunc.ret_ext
|
| +}
|
| +
|
| +define internal i32 @_Z20testMultiplyByNeg232bb(i32 %a, i32) {
|
| +entry:
|
| + %a.arg_trunc = trunc i32 %a to i1
|
| + %a.arg_trunc.ret_ext = zext i1 %a.arg_trunc to i32
|
| + ret i32 %a.arg_trunc.ret_ext
|
| +}
|
| +
|
| +define internal i32 @_Z17testMultiplyBy232hh(i32 %a, i32) {
|
| +entry:
|
| + %a.arg_trunc = trunc i32 %a to i8
|
| + %conv = zext i8 %a.arg_trunc to i32
|
| + %mul = mul i32 %conv, 232
|
| + %conv1 = trunc i32 %mul to i8
|
| + %conv1.ret_ext = zext i8 %conv1 to i32
|
| + ret i32 %conv1.ret_ext
|
| +}
|
| +
|
| +define internal i32 @_Z20testMultiplyByNeg232hh(i32 %a, i32) {
|
| +entry:
|
| + %a.arg_trunc = trunc i32 %a to i8
|
| + %conv = zext i8 %a.arg_trunc to i32
|
| + %mul = mul i32 %conv, -232
|
| + %conv1 = trunc i32 %mul to i8
|
| + %conv1.ret_ext = zext i8 %conv1 to i32
|
| + ret i32 %conv1.ret_ext
|
| +}
|
| +
|
| +define internal i32 @_Z17testMultiplyBy232tt(i32 %a, i32) {
|
| +entry:
|
| + %a.arg_trunc = trunc i32 %a to i16
|
| + %conv = zext i16 %a.arg_trunc to i32
|
| + %mul = mul i32 %conv, 232
|
| + %conv1 = trunc i32 %mul to i16
|
| + %conv1.ret_ext = zext i16 %conv1 to i32
|
| + ret i32 %conv1.ret_ext
|
| +}
|
| +
|
| +define internal i32 @_Z20testMultiplyByNeg232tt(i32 %a, i32) {
|
| +entry:
|
| + %a.arg_trunc = trunc i32 %a to i16
|
| + %conv = zext i16 %a.arg_trunc to i32
|
| + %mul = mul i32 %conv, -232
|
| + %conv1 = trunc i32 %mul to i16
|
| + %conv1.ret_ext = zext i16 %conv1 to i32
|
| + ret i32 %conv1.ret_ext
|
| +}
|
| +
|
| +define internal i32 @_Z17testMultiplyBy232jj(i32 %a, i32) {
|
| +entry:
|
| + %mul = mul i32 %a, 232
|
| + ret i32 %mul
|
| +}
|
| +
|
| +define internal i32 @_Z20testMultiplyByNeg232jj(i32 %a, i32) {
|
| +entry:
|
| + %mul = mul i32 %a, -232
|
| + ret i32 %mul
|
| +}
|
| +
|
| +define internal i64 @_Z17testMultiplyBy232yy(i64 %a, i64) {
|
| +entry:
|
| + %mul = mul i64 %a, 232
|
| + ret i64 %mul
|
| +}
|
| +
|
| +define internal i64 @_Z20testMultiplyByNeg232yy(i64 %a, i64) {
|
| +entry:
|
| + %mul = mul i64 %a, -232
|
| + ret i64 %mul
|
| +}
|
| +
|
| +define internal i32 @_Z24testMultiplyBy0x00FFF001bb(i32 %a, i32) {
|
| +entry:
|
| + %a.arg_trunc = trunc i32 %a to i1
|
| + %a.arg_trunc.ret_ext = zext i1 %a.arg_trunc to i32
|
| + ret i32 %a.arg_trunc.ret_ext
|
| +}
|
| +
|
| +define internal i32 @_Z27testMultiplyByNeg0x00FFF001bb(i32 %a, i32) {
|
| +entry:
|
| + %a.arg_trunc = trunc i32 %a to i1
|
| + %a.arg_trunc.ret_ext = zext i1 %a.arg_trunc to i32
|
| + ret i32 %a.arg_trunc.ret_ext
|
| +}
|
| +
|
| +define internal i32 @_Z24testMultiplyBy0x00FFF001hh(i32 %a, i32) {
|
| +entry:
|
| + %a.arg_trunc = trunc i32 %a to i8
|
| + %conv = zext i8 %a.arg_trunc to i32
|
| + %mul = mul i32 %conv, 16773121
|
| + %conv1 = trunc i32 %mul to i8
|
| + %conv1.ret_ext = zext i8 %conv1 to i32
|
| + ret i32 %conv1.ret_ext
|
| +}
|
| +
|
| +define internal i32 @_Z27testMultiplyByNeg0x00FFF001hh(i32 %a, i32) {
|
| +entry:
|
| + %a.arg_trunc = trunc i32 %a to i8
|
| + %conv = zext i8 %a.arg_trunc to i32
|
| + %mul = mul i32 %conv, -16773121
|
| + %conv1 = trunc i32 %mul to i8
|
| + %conv1.ret_ext = zext i8 %conv1 to i32
|
| + ret i32 %conv1.ret_ext
|
| +}
|
| +
|
| +define internal i32 @_Z24testMultiplyBy0x00FFF001tt(i32 %a, i32) {
|
| +entry:
|
| + %a.arg_trunc = trunc i32 %a to i16
|
| + %conv = zext i16 %a.arg_trunc to i32
|
| + %mul = mul i32 %conv, 16773121
|
| + %conv1 = trunc i32 %mul to i16
|
| + %conv1.ret_ext = zext i16 %conv1 to i32
|
| + ret i32 %conv1.ret_ext
|
| +}
|
| +
|
| +define internal i32 @_Z27testMultiplyByNeg0x00FFF001tt(i32 %a, i32) {
|
| +entry:
|
| + %a.arg_trunc = trunc i32 %a to i16
|
| + %conv = zext i16 %a.arg_trunc to i32
|
| + %mul = mul i32 %conv, -16773121
|
| + %conv1 = trunc i32 %mul to i16
|
| + %conv1.ret_ext = zext i16 %conv1 to i32
|
| + ret i32 %conv1.ret_ext
|
| +}
|
| +
|
| +define internal i32 @_Z24testMultiplyBy0x00FFF001jj(i32 %a, i32) {
|
| +entry:
|
| + %mul = mul i32 %a, 16773121
|
| + ret i32 %mul
|
| +}
|
| +
|
| +define internal i32 @_Z27testMultiplyByNeg0x00FFF001jj(i32 %a, i32) {
|
| +entry:
|
| + %mul = mul i32 %a, -16773121
|
| + ret i32 %mul
|
| +}
|
| +
|
| +define internal i64 @_Z24testMultiplyBy0x00FFF001yy(i64 %a, i64) {
|
| +entry:
|
| + %mul = mul i64 %a, 16773121
|
| + ret i64 %mul
|
| +}
|
| +
|
| +define internal i64 @_Z27testMultiplyByNeg0x00FFF001yy(i64 %a, i64) {
|
| +entry:
|
| + %mul = mul i64 %a, -16773121
|
| + ret i64 %mul
|
| +}
|
| +
|
| +define internal i32 @_Z24testMultiplyBy0x01000000bb(i32 %a, i32) {
|
| +entry:
|
| + %a.arg_trunc = trunc i32 %a to i1
|
| + %a.arg_trunc.ret_ext = zext i1 %a.arg_trunc to i32
|
| + ret i32 %a.arg_trunc.ret_ext
|
| +}
|
| +
|
| +define internal i32 @_Z27testMultiplyByNeg0x01000000bb(i32 %a, i32) {
|
| +entry:
|
| + %a.arg_trunc = trunc i32 %a to i1
|
| + %a.arg_trunc.ret_ext = zext i1 %a.arg_trunc to i32
|
| + ret i32 %a.arg_trunc.ret_ext
|
| +}
|
| +
|
| +define internal i32 @_Z24testMultiplyBy0x01000000hh(i32 %a, i32) {
|
| +entry:
|
| + %.ret_ext = zext i8 0 to i32
|
| + ret i32 %.ret_ext
|
| +}
|
| +
|
| +define internal i32 @_Z27testMultiplyByNeg0x01000000hh(i32 %a, i32) {
|
| +entry:
|
| + %.ret_ext = zext i8 0 to i32
|
| + ret i32 %.ret_ext
|
| +}
|
| +
|
| +define internal i32 @_Z24testMultiplyBy0x01000000tt(i32 %a, i32) {
|
| +entry:
|
| + %.ret_ext = zext i16 0 to i32
|
| + ret i32 %.ret_ext
|
| +}
|
| +
|
| +define internal i32 @_Z27testMultiplyByNeg0x01000000tt(i32 %a, i32) {
|
| +entry:
|
| + %.ret_ext = zext i16 0 to i32
|
| + ret i32 %.ret_ext
|
| +}
|
| +
|
| +define internal i32 @_Z24testMultiplyBy0x01000000jj(i32 %a, i32) {
|
| +entry:
|
| + %mul = shl i32 %a, 24
|
| + ret i32 %mul
|
| +}
|
| +
|
| +define internal i32 @_Z27testMultiplyByNeg0x01000000jj(i32 %a, i32) {
|
| +entry:
|
| + %mul = mul i32 %a, -16777216
|
| + ret i32 %mul
|
| +}
|
| +
|
| +define internal i64 @_Z24testMultiplyBy0x01000000yy(i64 %a, i64) {
|
| +entry:
|
| + %mul = shl i64 %a, 24
|
| + ret i64 %mul
|
| +}
|
| +
|
| +define internal i64 @_Z27testMultiplyByNeg0x01000000yy(i64 %a, i64) {
|
| +entry:
|
| + %mul = mul i64 %a, -16777216
|
| + ret i64 %mul
|
| +}
|
| +
|
| +define internal i32 @_Z24testMultiplyBy0x7FFFF07Fbb(i32 %a, i32) {
|
| +entry:
|
| + %a.arg_trunc = trunc i32 %a to i1
|
| + %a.arg_trunc.ret_ext = zext i1 %a.arg_trunc to i32
|
| + ret i32 %a.arg_trunc.ret_ext
|
| +}
|
| +
|
| +define internal i32 @_Z27testMultiplyByNeg0x7FFFF07Fbb(i32 %a, i32) {
|
| +entry:
|
| + %a.arg_trunc = trunc i32 %a to i1
|
| + %a.arg_trunc.ret_ext = zext i1 %a.arg_trunc to i32
|
| + ret i32 %a.arg_trunc.ret_ext
|
| +}
|
| +
|
| +define internal i32 @_Z24testMultiplyBy0x7FFFF07Fhh(i32 %a, i32) {
|
| +entry:
|
| + %a.arg_trunc = trunc i32 %a to i8
|
| + %conv = zext i8 %a.arg_trunc to i32
|
| + %mul = mul i32 %conv, 2147479679
|
| + %conv1 = trunc i32 %mul to i8
|
| + %conv1.ret_ext = zext i8 %conv1 to i32
|
| + ret i32 %conv1.ret_ext
|
| +}
|
| +
|
| +define internal i32 @_Z27testMultiplyByNeg0x7FFFF07Fhh(i32 %a, i32) {
|
| +entry:
|
| + %a.arg_trunc = trunc i32 %a to i8
|
| + %conv = zext i8 %a.arg_trunc to i32
|
| + %mul = mul i32 %conv, -2147479679
|
| + %conv1 = trunc i32 %mul to i8
|
| + %conv1.ret_ext = zext i8 %conv1 to i32
|
| + ret i32 %conv1.ret_ext
|
| +}
|
| +
|
| +define internal i32 @_Z24testMultiplyBy0x7FFFF07Ftt(i32 %a, i32) {
|
| +entry:
|
| + %a.arg_trunc = trunc i32 %a to i16
|
| + %conv = zext i16 %a.arg_trunc to i32
|
| + %mul = mul i32 %conv, 2147479679
|
| + %conv1 = trunc i32 %mul to i16
|
| + %conv1.ret_ext = zext i16 %conv1 to i32
|
| + ret i32 %conv1.ret_ext
|
| +}
|
| +
|
| +define internal i32 @_Z27testMultiplyByNeg0x7FFFF07Ftt(i32 %a, i32) {
|
| +entry:
|
| + %a.arg_trunc = trunc i32 %a to i16
|
| + %conv = zext i16 %a.arg_trunc to i32
|
| + %mul = mul i32 %conv, -2147479679
|
| + %conv1 = trunc i32 %mul to i16
|
| + %conv1.ret_ext = zext i16 %conv1 to i32
|
| + ret i32 %conv1.ret_ext
|
| +}
|
| +
|
| +define internal i32 @_Z24testMultiplyBy0x7FFFF07Fjj(i32 %a, i32) {
|
| +entry:
|
| + %mul = mul i32 %a, 2147479679
|
| + ret i32 %mul
|
| +}
|
| +
|
| +define internal i32 @_Z27testMultiplyByNeg0x7FFFF07Fjj(i32 %a, i32) {
|
| +entry:
|
| + %mul = mul i32 %a, -2147479679
|
| + ret i32 %mul
|
| +}
|
| +
|
| +define internal i64 @_Z24testMultiplyBy0x7FFFF07Fyy(i64 %a, i64) {
|
| +entry:
|
| + %mul = mul i64 %a, 2147479679
|
| + ret i64 %mul
|
| +}
|
| +
|
| +define internal i64 @_Z27testMultiplyByNeg0x7FFFF07Fyy(i64 %a, i64) {
|
| +entry:
|
| + %mul = mul i64 %a, -2147479679
|
| + ret i64 %mul
|
| +}
|
| +
|
| +define internal i32 @_Z24testMultiplyBy0x80000000bb(i32 %a, i32) {
|
| +entry:
|
| + %a.arg_trunc = trunc i32 %a to i1
|
| + %a.arg_trunc.ret_ext = zext i1 %a.arg_trunc to i32
|
| + ret i32 %a.arg_trunc.ret_ext
|
| +}
|
| +
|
| +define internal i32 @_Z27testMultiplyByNeg0x80000000bb(i32 %a, i32) {
|
| +entry:
|
| + %a.arg_trunc = trunc i32 %a to i1
|
| + %a.arg_trunc.ret_ext = zext i1 %a.arg_trunc to i32
|
| + ret i32 %a.arg_trunc.ret_ext
|
| +}
|
| +
|
| +define internal i32 @_Z24testMultiplyBy0x80000000hh(i32 %a, i32) {
|
| +entry:
|
| + %.ret_ext = zext i8 0 to i32
|
| + ret i32 %.ret_ext
|
| +}
|
| +
|
| +define internal i32 @_Z27testMultiplyByNeg0x80000000hh(i32 %a, i32) {
|
| +entry:
|
| + %.ret_ext = zext i8 0 to i32
|
| + ret i32 %.ret_ext
|
| +}
|
| +
|
| +define internal i32 @_Z24testMultiplyBy0x80000000tt(i32 %a, i32) {
|
| +entry:
|
| + %.ret_ext = zext i16 0 to i32
|
| + ret i32 %.ret_ext
|
| +}
|
| +
|
| +define internal i32 @_Z27testMultiplyByNeg0x80000000tt(i32 %a, i32) {
|
| +entry:
|
| + %.ret_ext = zext i16 0 to i32
|
| + ret i32 %.ret_ext
|
| +}
|
| +
|
| +define internal i32 @_Z24testMultiplyBy0x80000000jj(i32 %a, i32) {
|
| +entry:
|
| + %mul = shl i32 %a, 31
|
| + ret i32 %mul
|
| +}
|
| +
|
| +define internal i32 @_Z27testMultiplyByNeg0x80000000jj(i32 %a, i32) {
|
| +entry:
|
| + %mul = shl i32 %a, 31
|
| + ret i32 %mul
|
| +}
|
| +
|
| +define internal i64 @_Z24testMultiplyBy0x80000000yy(i64 %a, i64) {
|
| +entry:
|
| + %mul = shl i64 %a, 31
|
| + ret i64 %mul
|
| +}
|
| +
|
| +define internal i64 @_Z27testMultiplyByNeg0x80000000yy(i64 %a, i64) {
|
| +entry:
|
| + %mul = shl i64 %a, 31
|
| + ret i64 %mul
|
| +}
|
|
|