OLD | NEW |
(Empty) | |
| 1 ; RUN: opt -globalize-constant-vectors %s -S | FileCheck %s |
| 2 ; RUN: opt -globalize-constant-vectors %s -S | FileCheck -check-prefix=C16xi8 %s |
| 3 ; RUN: opt -globalize-constant-vectors %s -S | FileCheck -check-prefix=C8xi16 %s |
| 4 ; RUN: opt -globalize-constant-vectors %s -S | FileCheck -check-prefix=C4xi32 %s |
| 5 ; RUN: opt -globalize-constant-vectors %s -S | FileCheck -check-prefix=C4xfloat
%s |
| 6 ; RUN: opt -globalize-constant-vectors %s -S | FileCheck -check-prefix=Cbranch %
s |
| 7 ; RUN: opt -globalize-constant-vectors %s -S | FileCheck -check-prefix=Cduplicat
e %s |
| 8 |
| 9 ; The datalayout is needed to determine the alignment of the globals. |
| 10 target datalayout = "e-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64
:64:64-p:32:32:32-v128:32:32" |
| 11 |
| 12 ; Globals shouldn't get globalized. |
| 13 ; CHECK: @global_should_stay_untouched = internal constant <4 x i32> <i32 1337,
i32 0, i32 0, i32 0> |
| 14 @global_should_stay_untouched = internal constant <4 x i32> <i32 1337, i32 0, i3
2 0, i32 0> |
| 15 |
| 16 ; 16xi8 vectors should get globalized. |
| 17 define void @test16xi8(<16 x i8> %in) { |
| 18 %nonsquares = add <16 x i8> %in, <i8 2, i8 3, i8 5, i8 6, i8 7, i8 8, i8 10, i
8 11, i8 12, i8 13, i8 14, i8 15, i8 17, i8 18, i8 19, i8 20> |
| 19 %sort = add <16 x i8> <i8 0, i8 1, i8 3, i8 5, i8 9, i8 11, i8 14, i8 17, i8 2
5, i8 27, i8 30, i8 33, i8 38, i8 41, i8 45, i8 49>, %in |
| 20 ret void |
| 21 } |
| 22 ; C16xi8: @[[C1:[_a-z0-9]+]] = internal constant <16 x i8> <i8 2, i8 3, i8 5, i8
6, i8 7, i8 8, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 17, i8 18, i8 19, i
8 20>, align 4 |
| 23 ; C16xi8: @[[C2:[_a-z0-9]+]] = internal constant <16 x i8> <i8 0, i8 1, i8 3, i8
5, i8 9, i8 11, i8 14, i8 17, i8 25, i8 27, i8 30, i8 33, i8 38, i8 41, i8 45,
i8 49>, align 4 |
| 24 ; C16xi8: define void @test16xi8(<16 x i8> %in) { |
| 25 ; C16xi8-NEXT: %[[M1:[_a-z0-9]+]] = load <16 x i8>* @[[C1]], align 4 |
| 26 ; C16xi8-NEXT: %[[M2:[_a-z0-9]+]] = load <16 x i8>* @[[C2]], align 4 |
| 27 ; C16xi8-NEXT: %nonsquares = add <16 x i8> %in, %[[M1]] |
| 28 ; C16xi8-NEXT: %sort = add <16 x i8> %[[M2]], %in |
| 29 ; C16xi8-NEXT: ret void |
| 30 |
| 31 ; 8xi16 vectors should get globalized. |
| 32 define void @test8xi16(<8 x i16> %in) { |
| 33 %fib = add <8 x i16> %in, <i16 0, i16 1, i16 1, i16 2, i16 3, i16 5, i16 8, i1
6 13> |
| 34 %answer = add <8 x i16> <i16 42, i16 42, i16 42, i16 42, i16 42, i16 42, i16 4
2, i16 42>, %in |
| 35 ret void |
| 36 } |
| 37 ; C8xi16: @[[C1:[_a-z0-9]+]] = internal constant <8 x i16> <i16 0, i16 1, i16 1,
i16 2, i16 3, i16 5, i16 8, i16 13>, align 4 |
| 38 ; C8xi16: @[[C2:[_a-z0-9]+]] = internal constant <8 x i16> <i16 42, i16 42, i16
42, i16 42, i16 42, i16 42, i16 42, i16 42>, align 4 |
| 39 ; C8xi16: define void @test8xi16(<8 x i16> %in) { |
| 40 ; C8xi16-NEXT: %[[M1:[_a-z0-9]+]] = load <8 x i16>* @[[C1]], align 4 |
| 41 ; C8xi16-NEXT: %[[M2:[_a-z0-9]+]] = load <8 x i16>* @[[C2]], align 4 |
| 42 ; C8xi16-NEXT: %fib = add <8 x i16> %in, %[[M1]] |
| 43 ; C8xi16-NEXT: %answer = add <8 x i16> %[[M2]], %in |
| 44 ; C8xi16-NEXT: ret void |
| 45 |
| 46 ; 4xi32 vectors should get globalized. |
| 47 define void @test4xi32(<4 x i32> %in) { |
| 48 %tetrahedral = add <4 x i32> %in, <i32 1, i32 4, i32 10, i32 20> |
| 49 %serauqs = add <4 x i32> <i32 1, i32 4, i32 9, i32 61>, %in |
| 50 ret void |
| 51 } |
| 52 ; C4xi32: @[[C1:[_a-z0-9]+]] = internal constant <4 x i32> <i32 1, i32 4, i32 10
, i32 20>, align 4 |
| 53 ; C4xi32: @[[C2:[_a-z0-9]+]] = internal constant <4 x i32> <i32 1, i32 4, i32 9,
i32 61>, align 4 |
| 54 ; C4xi32: define void @test4xi32(<4 x i32> %in) { |
| 55 ; C4xi32-NEXT: %[[M1:[_a-z0-9]+]] = load <4 x i32>* @[[C1]], align 4 |
| 56 ; C4xi32-NEXT: %[[M2:[_a-z0-9]+]] = load <4 x i32>* @[[C2]], align 4 |
| 57 ; C4xi32-NEXT: %tetrahedral = add <4 x i32> %in, %[[M1]] |
| 58 ; C4xi32-NEXT: %serauqs = add <4 x i32> %[[M2]], %in |
| 59 ; C4xi32-NEXT: ret void |
| 60 |
| 61 ; 4xfloat vectors should get globalized. |
| 62 define void @test4xfloat(<4 x float> %in) { |
| 63 %polyhex = fadd <4 x float> %in, <float 1., float 1., float 3., float 7.> |
| 64 %poset = fadd <4 x float> <float 1., float 1., float 3., float 19.>, %in |
| 65 ret void |
| 66 } |
| 67 ; C4xfloat: @[[C1:[_a-z0-9]+]] = internal constant <4 x float> <float 1.000000e+
00, float 1.000000e+00, float 3.000000e+00, float 7.000000e+00>, align 4 |
| 68 ; C4xfloat: @[[C2:[_a-z0-9]+]] = internal constant <4 x float> <float 1.000000e+
00, float 1.000000e+00, float 3.000000e+00, float 1.900000e+01>, align 4 |
| 69 ; C4xfloat: define void @test4xfloat(<4 x float> %in) { |
| 70 ; C4xfloat-NEXT: %[[M1:[_a-z0-9]+]] = load <4 x float>* @[[C1]], align 4 |
| 71 ; C4xfloat-NEXT: %[[M2:[_a-z0-9]+]] = load <4 x float>* @[[C2]], align 4 |
| 72 ; C4xfloat-NEXT: %polyhex = fadd <4 x float> %in, %[[M1]] |
| 73 ; C4xfloat-NEXT: %poset = fadd <4 x float> %[[M2]], %in |
| 74 ; C4xfloat-NEXT: ret void |
| 75 |
| 76 ; Globalized constant loads have to dominate their use. |
| 77 define void @testbranch(i1 %cond, <4 x i32> %in) { |
| 78 br i1 %cond, label %lhs, label %rhs |
| 79 lhs: |
| 80 %from_lhs = add <4 x i32> %in, <i32 1, i32 1, i32 2, i32 2> |
| 81 br label %done |
| 82 rhs: |
| 83 %from_rhs = add <4 x i32> <i32 2, i32 2, i32 1, i32 1>, %in |
| 84 br label %done |
| 85 done: |
| 86 %merged = phi <4 x i32> [ %from_lhs, %lhs ], [ %from_rhs, %rhs ] |
| 87 ret void |
| 88 } |
| 89 ; Cbranch: @[[C1:[_a-z0-9]+]] = internal constant <4 x i32> <i32 1, i32 1, i32 2
, i32 2>, align 4 |
| 90 ; Cbranch: @[[C2:[_a-z0-9]+]] = internal constant <4 x i32> <i32 2, i32 2, i32 1
, i32 1>, align 4 |
| 91 ; Cbranch: define void @testbranch(i1 %cond, <4 x i32> %in) { |
| 92 ; Cbranch-NEXT: %[[M1:[_a-z0-9]+]] = load <4 x i32>* @[[C1]], align 4 |
| 93 ; Cbranch-NEXT: %[[M2:[_a-z0-9]+]] = load <4 x i32>* @[[C2]], align 4 |
| 94 ; Cbranch-NEXT: br i1 %cond, label %lhs, label %rhs |
| 95 ; Cbranch: lhs: |
| 96 ; Cbranch-NEXT: %from_lhs = add <4 x i32> %in, %[[M1]] |
| 97 ; Cbranch-NEXT: br label %done |
| 98 ; Cbranch: rhs: |
| 99 ; Cbranch-NEXT: %from_rhs = add <4 x i32> %[[M2]], %in |
| 100 ; Cbranch-NEXT: br label %done |
| 101 ; Cbranch: done: |
| 102 ; Cbranch-NEXT: %merged = phi <4 x i32> [ %from_lhs, %lhs ], [ %from_rhs, %rhs ] |
| 103 ; Cbranch-NEXT: ret void |
| 104 |
| 105 ; Globalizing redundant constants between functions should materialize |
| 106 ; them in each function. |
| 107 define void @testduplicate1() { |
| 108 %foo = add <4 x i32> <i32 1, i32 1, i32 1, i32 1>, <i32 0, i32 0, i32 0, i32 0
> |
| 109 ret void |
| 110 } |
| 111 define void @testduplicate2() { |
| 112 %foo = add <4 x i32> <i32 1, i32 1, i32 1, i32 1>, <i32 0, i32 0, i32 0, i32 0
> |
| 113 ret void |
| 114 } |
| 115 ; Cduplicate: @[[C1:[_a-z0-9]+]] = internal constant <4 x i32> <i32 1, i32 1, i3
2 1, i32 1>, align 4 |
| 116 ; Cduplicate: @[[C2:[_a-z0-9]+]] = internal constant <4 x i32> <i32 1, i32 1, i3
2 1, i32 1>, align 4 |
| 117 ; Cduplicate: define void @testduplicate1() { |
| 118 ; Cduplicate-NEXT: %[[M1:[_a-z0-9]+]] = load <4 x i32>* @[[C1]], align 4 |
| 119 ; Cduplicate-NEXT: %foo = add <4 x i32> %[[M1]], zeroinitializer |
| 120 ; Cduplicate-NEXT: ret void |
| 121 ; Cduplicate: define void @testduplicate2() { |
| 122 ; Cduplicate-NEXT: %[[M1:[_a-z0-9]+]] = load <4 x i32>* @[[C2]], align 4 |
| 123 ; Cduplicate-NEXT: %foo = add <4 x i32> %[[M1]], zeroinitializer |
| 124 ; Cduplicate-NEXT: ret void |
OLD | NEW |