OLD | NEW |
| (Empty) |
1 #include "Test.h" | |
2 #include "Sk4x.h" | |
3 | |
4 #define ASSERT_EQ(a, b) REPORTER_ASSERT(r, a.equal(b).allTrue()) | |
5 #define ASSERT_NE(a, b) REPORTER_ASSERT(r, a.notEqual(b).allTrue()) | |
6 | |
7 DEF_TEST(Sk4x_Construction, r) { | |
8 Sk4f uninitialized; | |
9 Sk4f zero(0,0,0,0); | |
10 Sk4f foo(1,2,3,4), | |
11 bar(foo), | |
12 baz = bar; | |
13 ASSERT_EQ(foo, bar); | |
14 ASSERT_EQ(bar, baz); | |
15 ASSERT_EQ(baz, foo); | |
16 } | |
17 | |
18 struct AlignedFloats { | |
19 Sk4f forces16ByteAlignment; // On 64-bit machines, the stack starts 128-bi
t aligned, | |
20 float fs[5]; // but not necessarily so on 32-bit. Adding a
n Sk4f forces it. | |
21 }; | |
22 | |
23 DEF_TEST(Sk4x_LoadStore, r) { | |
24 AlignedFloats aligned; | |
25 // fs will be 16-byte aligned, fs+1 not. | |
26 float* fs = aligned.fs; | |
27 for (int i = 0; i < 5; i++) { // set to 5,6,7,8,9 | |
28 fs[i] = float(i+5); | |
29 } | |
30 | |
31 Sk4f foo = Sk4f::Load(fs); | |
32 Sk4f bar = Sk4f::LoadAligned(fs); | |
33 ASSERT_EQ(foo, bar); | |
34 | |
35 foo = Sk4f::Load(fs+1); | |
36 ASSERT_NE(foo, bar); | |
37 | |
38 foo.storeAligned(fs); | |
39 bar.store(fs+1); | |
40 REPORTER_ASSERT(r, fs[0] == 6 && | |
41 fs[1] == 5 && | |
42 fs[2] == 6 && | |
43 fs[3] == 7 && | |
44 fs[4] == 8); | |
45 } | |
46 | |
47 DEF_TEST(Sk4x_Conversions, r) { | |
48 // Assuming IEEE floats. | |
49 Sk4f zerof(0,0,0,0); | |
50 Sk4i zeroi(0,0,0,0); | |
51 ASSERT_EQ(zeroi, zerof.cast<Sk4i>()); | |
52 ASSERT_EQ(zeroi, zerof.reinterpret<Sk4i>()); | |
53 ASSERT_EQ(zerof, zeroi.cast<Sk4f>()); | |
54 ASSERT_EQ(zerof, zeroi.reinterpret<Sk4f>()); | |
55 | |
56 Sk4f twof(2,2,2,2); | |
57 Sk4i twoi(2,2,2,2); | |
58 ASSERT_EQ(twoi, twof.cast<Sk4i>()); | |
59 ASSERT_NE(twoi, twof.reinterpret<Sk4i>()); | |
60 ASSERT_EQ(twof, twoi.cast<Sk4f>()); | |
61 ASSERT_NE(twof, twoi.reinterpret<Sk4f>()); | |
62 | |
63 ASSERT_EQ(Sk4i(0,0,0,0), Sk4f(0.5f, 0.49f, 0.51f, 0.99f).cast<Sk4i>()); | |
64 ASSERT_EQ(Sk4i(1,1,1,1), Sk4f(1.5f, 1.49f, 1.51f, 1.99f).cast<Sk4i>()); | |
65 } | |
66 | |
67 DEF_TEST(Sk4x_Bits, r) { | |
68 ASSERT_EQ(Sk4i(0,0,0,0).bitNot(), Sk4i(-1,-1,-1,-1)); | |
69 | |
70 Sk4i a(2,3,4,5), | |
71 b(1,3,5,7); | |
72 ASSERT_EQ(Sk4i(0,3,4,5), a & b); | |
73 ASSERT_EQ(Sk4i(3,3,5,7), a | b); | |
74 } | |
75 | |
76 DEF_TEST(Sk4x_Arith, r) { | |
77 ASSERT_EQ(Sk4f(4,6,8,10), Sk4f(1,2,3,4) + Sk4f(3,4,5,6)); | |
78 ASSERT_EQ(Sk4f(-2,-2,-2,-2), Sk4f(1,2,3,4) - Sk4f(3,4,5,6)); | |
79 ASSERT_EQ(Sk4f(3,8,15,24), Sk4f(1,2,3,4) * Sk4f(3,4,5,6)); | |
80 | |
81 ASSERT_EQ(Sk4f(-1,-2,-3,-4), -Sk4f(1,2,3,4)); | |
82 | |
83 float third = 1.0f/3.0f; | |
84 ASSERT_EQ(Sk4f(1*third, 0.5f, 0.6f, 2*third), Sk4f(1,2,3,4) / Sk4f(3,4,5,6))
; | |
85 ASSERT_EQ(Sk4i(4,6,8,10), Sk4i(1,2,3,4) + Sk4i(3,4,5,6)); | |
86 ASSERT_EQ(Sk4i(-2,-2,-2,-2), Sk4i(1,2,3,4) - Sk4i(3,4,5,6)); | |
87 ASSERT_EQ(Sk4i(3,8,15,24), Sk4i(1,2,3,4) * Sk4i(3,4,5,6)); | |
88 } | |
89 | |
90 DEF_TEST(Sk4x_ExplicitPromotion, r) { | |
91 ASSERT_EQ(Sk4f(2,4,6,8), Sk4f(1,2,3,4) * Sk4f(2.0f)); | |
92 } | |
93 | |
94 DEF_TEST(Sk4x_Sqrt, r) { | |
95 Sk4f squares(4, 16, 25, 121), | |
96 roots(2, 4, 5, 11); | |
97 // .sqrt() should be pretty precise. | |
98 Sk4f error = roots.subtract(squares.sqrt()); | |
99 REPORTER_ASSERT(r, (error > Sk4f(-0.000001f)).allTrue()); | |
100 REPORTER_ASSERT(r, (error < Sk4f(+0.000001f)).allTrue()); | |
101 | |
102 // .rsqrt() isn't so precise (for SSE), but should be pretty close. | |
103 error = roots.subtract(squares.multiply(squares.rsqrt())); | |
104 REPORTER_ASSERT(r, (error > Sk4f(-0.01f)).allTrue()); | |
105 REPORTER_ASSERT(r, (error < Sk4f(+0.01f)).allTrue()); | |
106 } | |
107 | |
108 DEF_TEST(Sk4x_Comparison, r) { | |
109 ASSERT_EQ(Sk4f(1,2,3,4), Sk4f(1,2,3,4)); | |
110 ASSERT_NE(Sk4f(4,3,2,1), Sk4f(1,2,3,4)); | |
111 | |
112 ASSERT_EQ(Sk4i(-1,-1,0,-1), Sk4f(1,2,5,4) == Sk4f(1,2,3,4)); | |
113 | |
114 ASSERT_EQ(Sk4i(-1,-1,-1,-1), Sk4f(1,2,3,4) < Sk4f(2,3,4,5)); | |
115 ASSERT_EQ(Sk4i(-1,-1,-1,-1), Sk4f(1,2,3,4) <= Sk4f(2,3,4,5)); | |
116 ASSERT_EQ(Sk4i(0,0,0,0), Sk4f(1,2,3,4) > Sk4f(2,3,4,5)); | |
117 ASSERT_EQ(Sk4i(0,0,0,0), Sk4f(1,2,3,4) >= Sk4f(2,3,4,5)); | |
118 | |
119 ASSERT_EQ(Sk4i(1,2,3,4), Sk4i(1,2,3,4)); | |
120 ASSERT_NE(Sk4i(4,3,2,1), Sk4i(1,2,3,4)); | |
121 | |
122 ASSERT_EQ(Sk4i(-1,-1,0,-1), Sk4i(1,2,5,4) == Sk4i(1,2,3,4)); | |
123 | |
124 ASSERT_EQ(Sk4i(-1,-1,-1,-1), Sk4i(1,2,3,4) < Sk4i(2,3,4,5)); | |
125 ASSERT_EQ(Sk4i(-1,-1,-1,-1), Sk4i(1,2,3,4) <= Sk4i(2,3,4,5)); | |
126 ASSERT_EQ(Sk4i(0,0,0,0), Sk4i(1,2,3,4) > Sk4i(2,3,4,5)); | |
127 ASSERT_EQ(Sk4i(0,0,0,0), Sk4i(1,2,3,4) >= Sk4i(2,3,4,5)); | |
128 } | |
129 | |
130 DEF_TEST(Sk4x_MinMax, r) { | |
131 ASSERT_EQ(Sk4f(1,2,2,1), Sk4f::Min(Sk4f(1,2,3,4), Sk4f(4,3,2,1))); | |
132 ASSERT_EQ(Sk4f(4,3,3,4), Sk4f::Max(Sk4f(1,2,3,4), Sk4f(4,3,2,1))); | |
133 ASSERT_EQ(Sk4i(1,2,2,1), Sk4i::Min(Sk4i(1,2,3,4), Sk4i(4,3,2,1))); | |
134 ASSERT_EQ(Sk4i(4,3,3,4), Sk4i::Max(Sk4i(1,2,3,4), Sk4i(4,3,2,1))); | |
135 } | |
136 | |
137 DEF_TEST(Sk4x_Swizzle, r) { | |
138 ASSERT_EQ(Sk4f(1,2,3,4).badc(), Sk4f(2,1,4,3)); | |
139 ASSERT_EQ(Sk4f(1,2,3,4).aacc(), Sk4f(1,1,3,3)); | |
140 ASSERT_EQ(Sk4f(1,2,3,4).bbdd(), Sk4f(2,2,4,4)); | |
141 | |
142 ASSERT_EQ(Sk4i(1,2,3,4).badc(), Sk4i(2,1,4,3)); | |
143 ASSERT_EQ(Sk4i(1,2,3,4).aacc(), Sk4i(1,1,3,3)); | |
144 ASSERT_EQ(Sk4i(1,2,3,4).bbdd(), Sk4i(2,2,4,4)); | |
145 } | |
OLD | NEW |