Index: test/NaCl/PNaClABI/instructions.ll |
diff --git a/test/NaCl/PNaClABI/instructions.ll b/test/NaCl/PNaClABI/instructions.ll |
index 1a3947c93d5474ee08882259028ca976855de6be..dacc29d697fd680c4bd52ee265e347fb9966302f 100644 |
--- a/test/NaCl/PNaClABI/instructions.ll |
+++ b/test/NaCl/PNaClABI/instructions.ll |
@@ -145,10 +145,10 @@ define internal void @memory() { |
; Memory operations |
%a1 = alloca i8, i32 4 |
%ptr = inttoptr i32 0 to i32* |
- %a2 = load i32* %ptr, align 1 |
+ %a2 = load i32, i32* %ptr, align 1 |
store i32 undef, i32* %ptr, align 1 |
; CHECK-NOT: disallowed |
- %a4 = getelementptr { i32, i32}* undef ; CHECK-NEXT: disallowed: bad instruction opcode: {{.*}} getelementptr |
+ %a4 = getelementptr { i32, i32}, { i32, i32}* undef ; CHECK-NEXT: disallowed: bad instruction opcode: {{.*}} getelementptr |
ret void |
} |
@@ -160,10 +160,10 @@ define internal void @vector_memory() { |
%ptr4xi32 = inttoptr i32 0 to <4 x i32>* |
%ptr4xfloat = inttoptr i32 0 to <4 x float>* |
- %l16xi8 = load <16 x i8>* %ptr16xi8, align 1 |
- %l8xi16 = load <8 x i16>* %ptr8xi16, align 2 |
- %l4xi32 = load <4 x i32>* %ptr4xi32, align 4 |
- %l4xfloat = load <4 x float>* %ptr4xfloat, align 4 |
+ %l16xi8 = load <16 x i8>, <16 x i8>* %ptr16xi8, align 1 |
+ %l8xi16 = load <8 x i16>, <8 x i16>* %ptr8xi16, align 2 |
+ %l4xi32 = load <4 x i32>, <4 x i32>* %ptr4xi32, align 4 |
+ %l4xfloat = load <4 x float>, <4 x float>* %ptr4xfloat, align 4 |
store <16 x i8> undef, <16 x i8>* %ptr16xi8, align 1 |
store <8 x i16> undef, <8 x i16>* %ptr8xi16, align 2 |
@@ -200,23 +200,23 @@ define internal void @vector_memory() { |
%ptr8xdouble = inttoptr i32 0 to <8 x double>* ; CHECK-NEXT: disallowed: bad result type: <8 x double>* |
; i1 vector pointers are simply disallowed, their alignment is inconsequential. |
- %l4xi1 = load <4 x i1>* %ptr4xi1, align 1 ; CHECK-NEXT: disallowed: bad pointer: %l4xi1 = load <4 x i1>* %ptr4xi1, align 1 |
- %l8xi1 = load <8 x i1>* %ptr8xi1, align 1 ; CHECK-NEXT: disallowed: bad pointer: %l8xi1 = load <8 x i1>* %ptr8xi1, align 1 |
- %l16xi1 = load <16 x i1>* %ptr16xi1, align 1 ; CHECK-NEXT: disallowed: bad pointer: %l16xi1 = load <16 x i1>* %ptr16xi1, align 1 |
+ %l4xi1 = load <4 x i1>, <4 x i1>* %ptr4xi1, align 1 ; CHECK-NEXT: disallowed: bad pointer: %l4xi1 = load <4 x i1>, <4 x i1>* %ptr4xi1, align 1 |
+ %l8xi1 = load <8 x i1>, <8 x i1>* %ptr8xi1, align 1 ; CHECK-NEXT: disallowed: bad pointer: %l8xi1 = load <8 x i1>, <8 x i1>* %ptr8xi1, align 1 |
+ %l16xi1 = load <16 x i1>, <16 x i1>* %ptr16xi1, align 1 ; CHECK-NEXT: disallowed: bad pointer: %l16xi1 = load <16 x i1>, <16 x i1>* %ptr16xi1, align 1 |
store <4 x i1> undef, <4 x i1>* %ptr4xi1, align 1 ; CHECK-NEXT: disallowed: bad pointer: store <4 x i1> undef, <4 x i1>* %ptr4xi1, align 1 |
store <8 x i1> undef, <8 x i1>* %ptr8xi1, align 1 ; CHECK-NEXT: disallowed: bad pointer: store <8 x i1> undef, <8 x i1>* %ptr8xi1, align 1 |
store <16 x i1> undef, <16 x i1>* %ptr16xi1, align 1 ; CHECK-NEXT: disallowed: bad pointer: store <16 x i1> undef, <16 x i1>* %ptr16xi1, align 1 |
; Under- or over-aligned load/store are disallowed. |
- %a1_8xi16 = load <8 x i16>* %ptr8xi16, align 1 ; CHECK-NEXT: disallowed: bad alignment: %a1_8xi16 = load <8 x i16>* %ptr8xi16, align 1 |
- %a1_4xi32 = load <4 x i32>* %ptr4xi32, align 1 ; CHECK-NEXT: disallowed: bad alignment: %a1_4xi32 = load <4 x i32>* %ptr4xi32, align 1 |
- %a1_4xfloat = load <4 x float>* %ptr4xfloat, align 1 ; CHECK-NEXT: disallowed: bad alignment: %a1_4xfloat = load <4 x float>* %ptr4xfloat, align 1 |
+ %a1_8xi16 = load <8 x i16>, <8 x i16>* %ptr8xi16, align 1 ; CHECK-NEXT: disallowed: bad alignment: %a1_8xi16 = load <8 x i16>, <8 x i16>* %ptr8xi16, align 1 |
+ %a1_4xi32 = load <4 x i32>, <4 x i32>* %ptr4xi32, align 1 ; CHECK-NEXT: disallowed: bad alignment: %a1_4xi32 = load <4 x i32>, <4 x i32>* %ptr4xi32, align 1 |
+ %a1_4xfloat = load <4 x float>, <4 x float>* %ptr4xfloat, align 1 ; CHECK-NEXT: disallowed: bad alignment: %a1_4xfloat = load <4 x float>, <4 x float>* %ptr4xfloat, align 1 |
- %a16_16xi8 = load <16 x i8>* %ptr16xi8, align 16 ; CHECK-NEXT: disallowed: bad alignment: %a16_16xi8 = load <16 x i8>* %ptr16xi8, align 16 |
- %a16_8xi16 = load <8 x i16>* %ptr8xi16, align 16 ; CHECK-NEXT: disallowed: bad alignment: %a16_8xi16 = load <8 x i16>* %ptr8xi16, align 16 |
- %a16_4xi32 = load <4 x i32>* %ptr4xi32, align 16 ; CHECK-NEXT: disallowed: bad alignment: %a16_4xi32 = load <4 x i32>* %ptr4xi32, align 16 |
- %a16_4xfloat = load <4 x float>* %ptr4xfloat, align 16 ; CHECK-NEXT: disallowed: bad alignment: %a16_4xfloat = load <4 x float>* %ptr4xfloat, align 16 |
+ %a16_16xi8 = load <16 x i8>, <16 x i8>* %ptr16xi8, align 16 ; CHECK-NEXT: disallowed: bad alignment: %a16_16xi8 = load <16 x i8>, <16 x i8>* %ptr16xi8, align 16 |
+ %a16_8xi16 = load <8 x i16>, <8 x i16>* %ptr8xi16, align 16 ; CHECK-NEXT: disallowed: bad alignment: %a16_8xi16 = load <8 x i16>, <8 x i16>* %ptr8xi16, align 16 |
+ %a16_4xi32 = load <4 x i32>, <4 x i32>* %ptr4xi32, align 16 ; CHECK-NEXT: disallowed: bad alignment: %a16_4xi32 = load <4 x i32>, <4 x i32>* %ptr4xi32, align 16 |
+ %a16_4xfloat = load <4 x float>, <4 x float>* %ptr4xfloat, align 16 ; CHECK-NEXT: disallowed: bad alignment: %a16_4xfloat = load <4 x float>, <4 x float>* %ptr4xfloat, align 16 |
store <8 x i16> undef, <8 x i16>* %ptr8xi16, align 1 ; CHECK-NEXT: disallowed: bad alignment: store <8 x i16> undef, <8 x i16>* %ptr8xi16, align 1 |
store <4 x i32> undef, <4 x i32>* %ptr4xi32, align 1 ; CHECK-NEXT: disallowed: bad alignment: store <4 x i32> undef, <4 x i32>* %ptr4xi32, align 1 |
@@ -235,8 +235,8 @@ define internal void @atomic() { |
%ptr = inttoptr i32 0 to i32* |
; CHECK-NOT: disallowed |
- %la = load atomic i32* %ptr seq_cst, align 4 ; CHECK: disallowed: atomic load: {{.*}} load atomic |
- %lv = load volatile i32* %ptr, align 4 ; CHECK: disallowed: volatile load: {{.*}} load volatile |
+ %la = load atomic i32, i32* %ptr seq_cst, align 4 ; CHECK: disallowed: atomic load: {{.*}} load atomic |
+ %lv = load volatile i32, i32* %ptr, align 4 ; CHECK: disallowed: volatile load: {{.*}} load volatile |
store atomic i32 undef, i32* %ptr seq_cst, align 4 ; CHECK: disallowed: atomic store: store atomic |
store volatile i32 undef, i32* %ptr, align 4 ; CHECK: disallowed: volatile store: store volatile |
fence acq_rel ; CHECK: disallowed: bad instruction opcode: fence |
@@ -250,8 +250,8 @@ define internal void @atomic_vector() { |
%ptr = inttoptr i32 0 to <4 x i32>* |
; CHECK-NOT: disallowed |
- %la = load atomic <4 x i32>* %ptr seq_cst, align 1 ; CHECK: disallowed: atomic load: {{.*}} load atomic |
- %lv = load volatile <4 x i32>* %ptr, align 1 ; CHECK: disallowed: volatile load: {{.*}} load volatile |
+ %la = load atomic <4 x i32>, <4 x i32>* %ptr seq_cst, align 1 ; CHECK: disallowed: atomic load: {{.*}} load atomic |
+ %lv = load volatile <4 x i32>, <4 x i32>* %ptr, align 1 ; CHECK: disallowed: volatile load: {{.*}} load volatile |
store atomic <4 x i32> undef, <4 x i32>* %ptr seq_cst, align 1 ; CHECK: disallowed: atomic store: store atomic |
store volatile <4 x i32> undef, <4 x i32>* %ptr, align 1 ; CHECK: disallowed: volatile store: store volatile |
ret void |
@@ -413,7 +413,7 @@ define internal i32 @va_arg(i32 %va_list_as_int) { |
define internal void @constantexpr() { |
; CHECK: ERROR: Function constantexpr |
- ptrtoint i8* getelementptr ([4 x i8]* @global_var, i32 1, i32 0) to i32 |
+ ptrtoint i8* getelementptr ([4 x i8], [4 x i8]* @global_var, i32 1, i32 0) to i32 |
ret void |
} |
; CHECK-NOT: disallowed |