Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(297)

Side by Side Diff: simd/jcgryext-altivec.c

Issue 1953443002: Update to libjpeg_turbo 1.4.90 (Closed) Base URL: https://chromium.googlesource.com/chromium/deps/libjpeg_turbo.git@master
Patch Set: Created 4 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « simd/jcgray-sse2-64.asm ('k') | simd/jcgryext-mmx.asm » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
(Empty)
1 /*
2 * AltiVec optimizations for libjpeg-turbo
3 *
4 * Copyright (C) 2014-2015, D. R. Commander.
5 * Copyright (C) 2014, Jay Foad.
6 * All rights reserved.
7 * This software is provided 'as-is', without any express or implied
8 * warranty. In no event will the authors be held liable for any damages
9 * arising from the use of this software.
10 *
11 * Permission is granted to anyone to use this software for any purpose,
12 * including commercial applications, and to alter it and redistribute it
13 * freely, subject to the following restrictions:
14 *
15 * 1. The origin of this software must not be misrepresented; you must not
16 * claim that you wrote the original software. If you use this software
17 * in a product, an acknowledgment in the product documentation would be
18 * appreciated but is not required.
19 * 2. Altered source versions must be plainly marked as such, and must not be
20 * misrepresented as being the original software.
21 * 3. This notice may not be removed or altered from any source distribution.
22 */
23
24 /* This file is included by jcgray-altivec.c */
25
26
27 void jsimd_rgb_gray_convert_altivec (JDIMENSION img_width,
28 JSAMPARRAY input_buf,
29 JSAMPIMAGE output_buf,
30 JDIMENSION output_row, int num_rows)
31 {
32 JSAMPROW inptr, outptr;
33 int pitch = img_width * RGB_PIXELSIZE, num_cols;
34 #if __BIG_ENDIAN__
35 int offset;
36 unsigned char __attribute__((aligned(16))) tmpbuf[RGB_PIXELSIZE * 16];
37 #endif
38
39 __vector unsigned char rgb0, rgb1 = {0}, rgb2 = {0},
40 rgbg0, rgbg1, rgbg2, rgbg3, y;
41 #if __BIG_ENDIAN__ || RGB_PIXELSIZE == 4
42 __vector unsigned char rgb3 = {0};
43 #endif
44 #if __BIG_ENDIAN__ && RGB_PIXELSIZE == 4
45 __vector unsigned char rgb4 = {0};
46 #endif
47 __vector short rg0, rg1, rg2, rg3, bg0, bg1, bg2, bg3;
48 __vector unsigned short yl, yh;
49 __vector int y0, y1, y2, y3;
50
51 /* Constants */
52 __vector short pw_f0299_f0337 = { __4X2(F_0_299, F_0_337) },
53 pw_f0114_f0250 = { __4X2(F_0_114, F_0_250) };
54 __vector int pd_onehalf = { __4X(ONE_HALF) };
55 __vector unsigned char pb_zero = { __16X(0) },
56 #if __BIG_ENDIAN__
57 shift_pack_index = {0,1,4,5,8,9,12,13,16,17,20,21,24,25,28,29};
58 #else
59 shift_pack_index = {2,3,6,7,10,11,14,15,18,19,22,23,26,27,30,31};
60 #endif
61
62 while (--num_rows >= 0) {
63 inptr = *input_buf++;
64 outptr = output_buf[0][output_row];
65 output_row++;
66
67 for (num_cols = pitch; num_cols > 0;
68 num_cols -= RGB_PIXELSIZE * 16, inptr += RGB_PIXELSIZE * 16,
69 outptr += 16) {
70
71 #if __BIG_ENDIAN__
72 /* Load 16 pixels == 48 or 64 bytes */
73 offset = (size_t)inptr & 15;
74 if (offset) {
75 __vector unsigned char unaligned_shift_index;
76 int bytes = num_cols + offset;
77
78 if (bytes < (RGB_PIXELSIZE + 1) * 16 && (bytes & 15)) {
79 /* Slow path to prevent buffer overread. Since there is no way to
80 * read a partial AltiVec register, overread would occur on the last
81 * chunk of the last image row if the right edge is not on a 16-byte
82 * boundary. It could also occur on other rows if the bytes per row
83 * is low enough. Since we can't determine whether we're on the last
84 * image row, we have to assume every row is the last.
85 */
86 memcpy(tmpbuf, inptr, min(num_cols, RGB_PIXELSIZE * 16));
87 rgb0 = vec_ld(0, tmpbuf);
88 rgb1 = vec_ld(16, tmpbuf);
89 rgb2 = vec_ld(32, tmpbuf);
90 #if RGB_PIXELSIZE == 4
91 rgb3 = vec_ld(48, tmpbuf);
92 #endif
93 } else {
94 /* Fast path */
95 rgb0 = vec_ld(0, inptr);
96 if (bytes > 16)
97 rgb1 = vec_ld(16, inptr);
98 if (bytes > 32)
99 rgb2 = vec_ld(32, inptr);
100 if (bytes > 48)
101 rgb3 = vec_ld(48, inptr);
102 #if RGB_PIXELSIZE == 4
103 if (bytes > 64)
104 rgb4 = vec_ld(64, inptr);
105 #endif
106 unaligned_shift_index = vec_lvsl(0, inptr);
107 rgb0 = vec_perm(rgb0, rgb1, unaligned_shift_index);
108 rgb1 = vec_perm(rgb1, rgb2, unaligned_shift_index);
109 rgb2 = vec_perm(rgb2, rgb3, unaligned_shift_index);
110 #if RGB_PIXELSIZE == 4
111 rgb3 = vec_perm(rgb3, rgb4, unaligned_shift_index);
112 #endif
113 }
114 } else {
115 if (num_cols < RGB_PIXELSIZE * 16 && (num_cols & 15)) {
116 /* Slow path */
117 memcpy(tmpbuf, inptr, min(num_cols, RGB_PIXELSIZE * 16));
118 rgb0 = vec_ld(0, tmpbuf);
119 rgb1 = vec_ld(16, tmpbuf);
120 rgb2 = vec_ld(32, tmpbuf);
121 #if RGB_PIXELSIZE == 4
122 rgb3 = vec_ld(48, tmpbuf);
123 #endif
124 } else {
125 /* Fast path */
126 rgb0 = vec_ld(0, inptr);
127 if (num_cols > 16)
128 rgb1 = vec_ld(16, inptr);
129 if (num_cols > 32)
130 rgb2 = vec_ld(32, inptr);
131 #if RGB_PIXELSIZE == 4
132 if (num_cols > 48)
133 rgb3 = vec_ld(48, inptr);
134 #endif
135 }
136 }
137 #else
138 /* Little endian */
139 rgb0 = vec_vsx_ld(0, inptr);
140 if (num_cols > 16)
141 rgb1 = vec_vsx_ld(16, inptr);
142 if (num_cols > 32)
143 rgb2 = vec_vsx_ld(32, inptr);
144 #if RGB_PIXELSIZE == 4
145 if (num_cols > 48)
146 rgb3 = vec_vsx_ld(48, inptr);
147 #endif
148 #endif
149
150 #if RGB_PIXELSIZE == 3
151 /* rgb0 = R0 G0 B0 R1 G1 B1 R2 G2 B2 R3 G3 B3 R4 G4 B4 R5
152 * rgb1 = G5 B5 R6 G6 B6 R7 G7 B7 R8 G8 B8 R9 G9 B9 Ra Ga
153 * rgb2 = Ba Rb Gb Bb Rc Gc Bc Rd Gd Bd Re Ge Be Rf Gf Bf
154 *
155 * rgbg0 = R0 G0 R1 G1 R2 G2 R3 G3 B0 G0 B1 G1 B2 G2 B3 G3
156 * rgbg1 = R4 G4 R5 G5 R6 G6 R7 G7 B4 G4 B5 G5 B6 G6 B7 G7
157 * rgbg2 = R8 G8 R9 G9 Ra Ga Rb Gb B8 G8 B9 G9 Ba Ga Bb Gb
158 * rgbg3 = Rc Gc Rd Gd Re Ge Rf Gf Bc Gc Bd Gd Be Ge Bf Gf
159 */
160 rgbg0 = vec_perm(rgb0, rgb0, (__vector unsigned char)RGBG_INDEX0);
161 rgbg1 = vec_perm(rgb0, rgb1, (__vector unsigned char)RGBG_INDEX1);
162 rgbg2 = vec_perm(rgb1, rgb2, (__vector unsigned char)RGBG_INDEX2);
163 rgbg3 = vec_perm(rgb2, rgb2, (__vector unsigned char)RGBG_INDEX3);
164 #else
165 /* rgb0 = R0 G0 B0 X0 R1 G1 B1 X1 R2 G2 B2 X2 R3 G3 B3 X3
166 * rgb1 = R4 G4 B4 X4 R5 G5 B5 X5 R6 G6 B6 X6 R7 G7 B7 X7
167 * rgb2 = R8 G8 B8 X8 R9 G9 B9 X9 Ra Ga Ba Xa Rb Gb Bb Xb
168 * rgb3 = Rc Gc Bc Xc Rd Gd Bd Xd Re Ge Be Xe Rf Gf Bf Xf
169 *
170 * rgbg0 = R0 G0 R1 G1 R2 G2 R3 G3 B0 G0 B1 G1 B2 G2 B3 G3
171 * rgbg1 = R4 G4 R5 G5 R6 G6 R7 G7 B4 G4 B5 G5 B6 G6 B7 G7
172 * rgbg2 = R8 G8 R9 G9 Ra Ga Rb Gb B8 G8 B9 G9 Ba Ga Bb Gb
173 * rgbg3 = Rc Gc Rd Gd Re Ge Rf Gf Bc Gc Bd Gd Be Ge Bf Gf
174 */
175 rgbg0 = vec_perm(rgb0, rgb0, (__vector unsigned char)RGBG_INDEX);
176 rgbg1 = vec_perm(rgb1, rgb1, (__vector unsigned char)RGBG_INDEX);
177 rgbg2 = vec_perm(rgb2, rgb2, (__vector unsigned char)RGBG_INDEX);
178 rgbg3 = vec_perm(rgb3, rgb3, (__vector unsigned char)RGBG_INDEX);
179 #endif
180
181 /* rg0 = R0 G0 R1 G1 R2 G2 R3 G3
182 * bg0 = B0 G0 B1 G1 B2 G2 B3 G3
183 * ...
184 *
185 * NOTE: We have to use vec_merge*() here because vec_unpack*() doesn't
186 * support unsigned vectors.
187 */
188 rg0 = (__vector signed short)VEC_UNPACKHU(rgbg0);
189 bg0 = (__vector signed short)VEC_UNPACKLU(rgbg0);
190 rg1 = (__vector signed short)VEC_UNPACKHU(rgbg1);
191 bg1 = (__vector signed short)VEC_UNPACKLU(rgbg1);
192 rg2 = (__vector signed short)VEC_UNPACKHU(rgbg2);
193 bg2 = (__vector signed short)VEC_UNPACKLU(rgbg2);
194 rg3 = (__vector signed short)VEC_UNPACKHU(rgbg3);
195 bg3 = (__vector signed short)VEC_UNPACKLU(rgbg3);
196
197 /* (Original)
198 * Y = 0.29900 * R + 0.58700 * G + 0.11400 * B
199 *
200 * (This implementation)
201 * Y = 0.29900 * R + 0.33700 * G + 0.11400 * B + 0.25000 * G
202 */
203
204 /* Calculate Y values */
205
206 y0 = vec_msums(rg0, pw_f0299_f0337, pd_onehalf);
207 y1 = vec_msums(rg1, pw_f0299_f0337, pd_onehalf);
208 y2 = vec_msums(rg2, pw_f0299_f0337, pd_onehalf);
209 y3 = vec_msums(rg3, pw_f0299_f0337, pd_onehalf);
210 y0 = vec_msums(bg0, pw_f0114_f0250, y0);
211 y1 = vec_msums(bg1, pw_f0114_f0250, y1);
212 y2 = vec_msums(bg2, pw_f0114_f0250, y2);
213 y3 = vec_msums(bg3, pw_f0114_f0250, y3);
214 /* Clever way to avoid 4 shifts + 2 packs. This packs the high word from
215 * each dword into a new 16-bit vector, which is the equivalent of
216 * descaling the 32-bit results (right-shifting by 16 bits) and then
217 * packing them.
218 */
219 yl = vec_perm((__vector unsigned short)y0, (__vector unsigned short)y1,
220 shift_pack_index);
221 yh = vec_perm((__vector unsigned short)y2, (__vector unsigned short)y3,
222 shift_pack_index);
223 y = vec_pack(yl, yh);
224 vec_st(y, 0, outptr);
225 }
226 }
227 }
OLDNEW
« no previous file with comments | « simd/jcgray-sse2-64.asm ('k') | simd/jcgryext-mmx.asm » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698