Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(451)

Side by Side Diff: source/libvpx/vpx/src/vpx_image.c

Issue 11555023: libvpx: Add VP9 decoder. (Closed) Base URL: svn://chrome-svn/chrome/trunk/deps/third_party/libvpx/
Patch Set: Created 8 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 /* 1 /*
2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved. 2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
3 * 3 *
4 * Use of this source code is governed by a BSD-style license 4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source 5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found 6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may 7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree. 8 * be found in the AUTHORS file in the root of the source tree.
9 */ 9 */
10 10
11 11
12 #include <stdlib.h> 12 #include <stdlib.h>
13 #include <string.h> 13 #include <string.h>
14 #include "vpx/vpx_image.h" 14 #include "vpx/vpx_image.h"
15 15
16 #define ADDRESS_STORAGE_SIZE sizeof(size_t) 16 #define ADDRESS_STORAGE_SIZE sizeof(size_t)
17 /*returns an addr aligned to the byte boundary specified by align*/ 17 /*returns an addr aligned to the byte boundary specified by align*/
18 #define align_addr(addr,align) (void*)(((size_t)(addr) + ((align) - 1)) & (size_ t)-(align)) 18 #define align_addr(addr,align) (void*)(((size_t)(addr) + ((align) - 1)) & (size_ t)-(align))
19 19
20 /* Memalign code is copied from vpx_mem.c */ 20 /* Memalign code is copied from vpx_mem.c */
21 static void *img_buf_memalign(size_t align, size_t size) 21 static void *img_buf_memalign(size_t align, size_t size) {
22 { 22 void *addr,
23 void *addr, 23 * x = NULL;
24 * x = NULL;
25 24
26 addr = malloc(size + align - 1 + ADDRESS_STORAGE_SIZE); 25 addr = malloc(size + align - 1 + ADDRESS_STORAGE_SIZE);
27 26
28 if (addr) 27 if (addr) {
29 { 28 x = align_addr((unsigned char *)addr + ADDRESS_STORAGE_SIZE, (int)align);
30 x = align_addr((unsigned char *)addr + ADDRESS_STORAGE_SIZE, (int)align) ; 29 /* save the actual malloc address */
31 /* save the actual malloc address */ 30 ((size_t *)x)[-1] = (size_t)addr;
32 ((size_t *)x)[-1] = (size_t)addr; 31 }
33 }
34 32
35 return x; 33 return x;
36 } 34 }
37 35
38 static void img_buf_free(void *memblk) 36 static void img_buf_free(void *memblk) {
39 { 37 if (memblk) {
40 if (memblk) 38 void *addr = (void *)(((size_t *)memblk)[-1]);
41 { 39 free(addr);
42 void *addr = (void *)(((size_t *)memblk)[-1]); 40 }
43 free(addr);
44 }
45 } 41 }
46 42
47 static vpx_image_t *img_alloc_helper(vpx_image_t *img, 43 static vpx_image_t *img_alloc_helper(vpx_image_t *img,
48 vpx_img_fmt_t fmt, 44 vpx_img_fmt_t fmt,
49 unsigned int d_w, 45 unsigned int d_w,
50 unsigned int d_h, 46 unsigned int d_h,
51 unsigned int buf_align, 47 unsigned int buf_align,
52 unsigned int stride_align, 48 unsigned int stride_align,
53 unsigned char *img_data) 49 unsigned char *img_data) {
54 {
55 50
56 unsigned int h, w, s, xcs, ycs, bps; 51 unsigned int h, w, s, xcs, ycs, bps;
57 int align; 52 int align;
58 53
59 /* Treat align==0 like align==1 */ 54 /* Treat align==0 like align==1 */
60 if (!buf_align) 55 if (!buf_align)
61 buf_align = 1; 56 buf_align = 1;
62 57
63 /* Validate alignment (must be power of 2) */ 58 /* Validate alignment (must be power of 2) */
64 if (buf_align & (buf_align - 1)) 59 if (buf_align & (buf_align - 1))
65 goto fail; 60 goto fail;
66 61
67 /* Treat align==0 like align==1 */ 62 /* Treat align==0 like align==1 */
68 if (!stride_align) 63 if (!stride_align)
69 stride_align = 1; 64 stride_align = 1;
70 65
71 /* Validate alignment (must be power of 2) */ 66 /* Validate alignment (must be power of 2) */
72 if (stride_align & (stride_align - 1)) 67 if (stride_align & (stride_align - 1))
73 goto fail; 68 goto fail;
74 69
75 /* Get sample size for this format */ 70 /* Get sample size for this format */
76 switch (fmt) 71 switch (fmt) {
77 {
78 case VPX_IMG_FMT_RGB32: 72 case VPX_IMG_FMT_RGB32:
79 case VPX_IMG_FMT_RGB32_LE: 73 case VPX_IMG_FMT_RGB32_LE:
80 case VPX_IMG_FMT_ARGB: 74 case VPX_IMG_FMT_ARGB:
81 case VPX_IMG_FMT_ARGB_LE: 75 case VPX_IMG_FMT_ARGB_LE:
82 bps = 32; 76 bps = 32;
83 break; 77 break;
84 case VPX_IMG_FMT_RGB24: 78 case VPX_IMG_FMT_RGB24:
85 case VPX_IMG_FMT_BGR24: 79 case VPX_IMG_FMT_BGR24:
86 bps = 24; 80 bps = 24;
87 break; 81 break;
88 case VPX_IMG_FMT_RGB565: 82 case VPX_IMG_FMT_RGB565:
89 case VPX_IMG_FMT_RGB565_LE: 83 case VPX_IMG_FMT_RGB565_LE:
90 case VPX_IMG_FMT_RGB555: 84 case VPX_IMG_FMT_RGB555:
91 case VPX_IMG_FMT_RGB555_LE: 85 case VPX_IMG_FMT_RGB555_LE:
92 case VPX_IMG_FMT_UYVY: 86 case VPX_IMG_FMT_UYVY:
93 case VPX_IMG_FMT_YUY2: 87 case VPX_IMG_FMT_YUY2:
94 case VPX_IMG_FMT_YVYU: 88 case VPX_IMG_FMT_YVYU:
95 bps = 16; 89 bps = 16;
96 break; 90 break;
97 case VPX_IMG_FMT_I420: 91 case VPX_IMG_FMT_I420:
98 case VPX_IMG_FMT_YV12: 92 case VPX_IMG_FMT_YV12:
99 case VPX_IMG_FMT_VPXI420: 93 case VPX_IMG_FMT_VPXI420:
100 case VPX_IMG_FMT_VPXYV12: 94 case VPX_IMG_FMT_VPXYV12:
101 bps = 12; 95 bps = 12;
102 break; 96 break;
103 default: 97 default:
104 bps = 16; 98 bps = 16;
105 break; 99 break;
106 } 100 }
107 101
108 /* Get chroma shift values for this format */ 102 /* Get chroma shift values for this format */
109 switch (fmt) 103 switch (fmt) {
110 {
111 case VPX_IMG_FMT_I420: 104 case VPX_IMG_FMT_I420:
112 case VPX_IMG_FMT_YV12: 105 case VPX_IMG_FMT_YV12:
113 case VPX_IMG_FMT_VPXI420: 106 case VPX_IMG_FMT_VPXI420:
114 case VPX_IMG_FMT_VPXYV12: 107 case VPX_IMG_FMT_VPXYV12:
115 xcs = 1; 108 xcs = 1;
116 break; 109 break;
117 default: 110 default:
118 xcs = 0; 111 xcs = 0;
119 break; 112 break;
120 } 113 }
121 114
122 switch (fmt) 115 switch (fmt) {
123 {
124 case VPX_IMG_FMT_I420: 116 case VPX_IMG_FMT_I420:
125 case VPX_IMG_FMT_YV12: 117 case VPX_IMG_FMT_YV12:
126 case VPX_IMG_FMT_VPXI420: 118 case VPX_IMG_FMT_VPXI420:
127 case VPX_IMG_FMT_VPXYV12: 119 case VPX_IMG_FMT_VPXYV12:
128 ycs = 1; 120 ycs = 1;
129 break; 121 break;
130 default: 122 default:
131 ycs = 0; 123 ycs = 0;
132 break; 124 break;
133 } 125 }
134 126
135 /* Calculate storage sizes given the chroma subsampling */ 127 /* Calculate storage sizes given the chroma subsampling */
136 align = (1 << xcs) - 1; 128 align = (1 << xcs) - 1;
137 w = (d_w + align) & ~align; 129 w = (d_w + align) & ~align;
138 align = (1 << ycs) - 1; 130 align = (1 << ycs) - 1;
139 h = (d_h + align) & ~align; 131 h = (d_h + align) & ~align;
140 s = (fmt & VPX_IMG_FMT_PLANAR) ? w : bps * w / 8; 132 s = (fmt & VPX_IMG_FMT_PLANAR) ? w : bps * w / 8;
141 s = (s + stride_align - 1) & ~(stride_align - 1); 133 s = (s + stride_align - 1) & ~(stride_align - 1);
142 134
143 /* Allocate the new image */ 135 /* Allocate the new image */
136 if (!img) {
137 img = (vpx_image_t *)calloc(1, sizeof(vpx_image_t));
138
144 if (!img) 139 if (!img)
145 { 140 goto fail;
146 img = (vpx_image_t *)calloc(1, sizeof(vpx_image_t));
147 141
148 if (!img) 142 img->self_allocd = 1;
149 goto fail; 143 } else {
144 memset(img, 0, sizeof(vpx_image_t));
145 }
150 146
151 img->self_allocd = 1; 147 img->img_data = img_data;
152 }
153 else
154 {
155 memset(img, 0, sizeof(vpx_image_t));
156 }
157 148
158 img->img_data = img_data; 149 if (!img_data) {
150 img->img_data = img_buf_memalign(buf_align, ((fmt & VPX_IMG_FMT_PLANAR) ?
151 h * s * bps / 8 : h * s));
152 img->img_data_owner = 1;
153 }
159 154
160 if (!img_data) 155 if (!img->img_data)
161 { 156 goto fail;
162 img->img_data = img_buf_memalign(buf_align, ((fmt & VPX_IMG_FMT_PLANAR)?
163 h * s * bps / 8 : h * s));
164 img->img_data_owner = 1;
165 }
166 157
167 if (!img->img_data) 158 img->fmt = fmt;
168 goto fail; 159 img->w = w;
160 img->h = h;
161 img->x_chroma_shift = xcs;
162 img->y_chroma_shift = ycs;
163 img->bps = bps;
169 164
170 img->fmt = fmt; 165 /* Calculate strides */
171 img->w = w; 166 img->stride[VPX_PLANE_Y] = img->stride[VPX_PLANE_ALPHA] = s;
172 img->h = h; 167 img->stride[VPX_PLANE_U] = img->stride[VPX_PLANE_V] = s >> xcs;
173 img->x_chroma_shift = xcs;
174 img->y_chroma_shift = ycs;
175 img->bps = bps;
176 168
177 /* Calculate strides */ 169 /* Default viewport to entire image */
178 img->stride[VPX_PLANE_Y] = img->stride[VPX_PLANE_ALPHA] = s; 170 if (!vpx_img_set_rect(img, 0, 0, d_w, d_h))
179 img->stride[VPX_PLANE_U] = img->stride[VPX_PLANE_V] = s >> xcs; 171 return img;
180
181 /* Default viewport to entire image */
182 if (!vpx_img_set_rect(img, 0, 0, d_w, d_h))
183 return img;
184 172
185 fail: 173 fail:
186 vpx_img_free(img); 174 vpx_img_free(img);
187 return NULL; 175 return NULL;
188 } 176 }
189 177
190 vpx_image_t *vpx_img_alloc(vpx_image_t *img, 178 vpx_image_t *vpx_img_alloc(vpx_image_t *img,
191 vpx_img_fmt_t fmt, 179 vpx_img_fmt_t fmt,
192 unsigned int d_w, 180 unsigned int d_w,
193 unsigned int d_h, 181 unsigned int d_h,
194 unsigned int align) 182 unsigned int align) {
195 { 183 return img_alloc_helper(img, fmt, d_w, d_h, align, align, NULL);
196 return img_alloc_helper(img, fmt, d_w, d_h, align, align, NULL);
197 } 184 }
198 185
199 vpx_image_t *vpx_img_wrap(vpx_image_t *img, 186 vpx_image_t *vpx_img_wrap(vpx_image_t *img,
200 vpx_img_fmt_t fmt, 187 vpx_img_fmt_t fmt,
201 unsigned int d_w, 188 unsigned int d_w,
202 unsigned int d_h, 189 unsigned int d_h,
203 unsigned int stride_align, 190 unsigned int stride_align,
204 unsigned char *img_data) 191 unsigned char *img_data) {
205 { 192 /* By setting buf_align = 1, we don't change buffer alignment in this
206 /* By setting buf_align = 1, we don't change buffer alignment in this 193 * function. */
207 * function. */ 194 return img_alloc_helper(img, fmt, d_w, d_h, 1, stride_align, img_data);
208 return img_alloc_helper(img, fmt, d_w, d_h, 1, stride_align, img_data);
209 } 195 }
210 196
211 int vpx_img_set_rect(vpx_image_t *img, 197 int vpx_img_set_rect(vpx_image_t *img,
212 unsigned int x, 198 unsigned int x,
213 unsigned int y, 199 unsigned int y,
214 unsigned int w, 200 unsigned int w,
215 unsigned int h) 201 unsigned int h) {
216 { 202 unsigned char *data;
217 unsigned char *data;
218 203
219 if (x + w <= img->w && y + h <= img->h) 204 if (x + w <= img->w && y + h <= img->h) {
220 { 205 img->d_w = w;
221 img->d_w = w; 206 img->d_h = h;
222 img->d_h = h;
223 207
224 /* Calculate plane pointers */ 208 /* Calculate plane pointers */
225 if (!(img->fmt & VPX_IMG_FMT_PLANAR)) 209 if (!(img->fmt & VPX_IMG_FMT_PLANAR)) {
226 { 210 img->planes[VPX_PLANE_PACKED] =
227 img->planes[VPX_PLANE_PACKED] = 211 img->img_data + x * img->bps / 8 + y * img->stride[VPX_PLANE_PACKED];
228 img->img_data + x * img->bps / 8 + y * img->stride[VPX_PLANE_PAC KED]; 212 } else {
229 } 213 data = img->img_data;
230 else
231 {
232 data = img->img_data;
233 214
234 if (img->fmt & VPX_IMG_FMT_HAS_ALPHA) 215 if (img->fmt & VPX_IMG_FMT_HAS_ALPHA) {
235 { 216 img->planes[VPX_PLANE_ALPHA] =
236 img->planes[VPX_PLANE_ALPHA] = 217 data + x + y * img->stride[VPX_PLANE_ALPHA];
237 data + x + y * img->stride[VPX_PLANE_ALPHA]; 218 data += img->h * img->stride[VPX_PLANE_ALPHA];
238 data += img->h * img->stride[VPX_PLANE_ALPHA]; 219 }
239 }
240 220
241 img->planes[VPX_PLANE_Y] = data + x + y * img->stride[VPX_PLANE_Y]; 221 img->planes[VPX_PLANE_Y] = data + x + y * img->stride[VPX_PLANE_Y];
242 data += img->h * img->stride[VPX_PLANE_Y]; 222 data += img->h * img->stride[VPX_PLANE_Y];
243 223
244 if (!(img->fmt & VPX_IMG_FMT_UV_FLIP)) 224 if (!(img->fmt & VPX_IMG_FMT_UV_FLIP)) {
245 { 225 img->planes[VPX_PLANE_U] = data
246 img->planes[VPX_PLANE_U] = data 226 + (x >> img->x_chroma_shift)
247 + (x >> img->x_chroma_shift) 227 + (y >> img->y_chroma_shift) * img->stride[VP X_PLANE_U];
248 + (y >> img->y_chroma_shift) * img->strid e[VPX_PLANE_U]; 228 data += (img->h >> img->y_chroma_shift) * img->stride[VPX_PLANE_U];
249 data += (img->h >> img->y_chroma_shift) * img->stride[VPX_PLANE_ U]; 229 img->planes[VPX_PLANE_V] = data
250 img->planes[VPX_PLANE_V] = data 230 + (x >> img->x_chroma_shift)
251 + (x >> img->x_chroma_shift) 231 + (y >> img->y_chroma_shift) * img->stride[VP X_PLANE_V];
252 + (y >> img->y_chroma_shift) * img->strid e[VPX_PLANE_V]; 232 } else {
253 } 233 img->planes[VPX_PLANE_V] = data
254 else 234 + (x >> img->x_chroma_shift)
255 { 235 + (y >> img->y_chroma_shift) * img->stride[VP X_PLANE_V];
256 img->planes[VPX_PLANE_V] = data 236 data += (img->h >> img->y_chroma_shift) * img->stride[VPX_PLANE_V];
257 + (x >> img->x_chroma_shift) 237 img->planes[VPX_PLANE_U] = data
258 + (y >> img->y_chroma_shift) * img->strid e[VPX_PLANE_V]; 238 + (x >> img->x_chroma_shift)
259 data += (img->h >> img->y_chroma_shift) * img->stride[VPX_PLANE_ V]; 239 + (y >> img->y_chroma_shift) * img->stride[VP X_PLANE_U];
260 img->planes[VPX_PLANE_U] = data 240 }
261 + (x >> img->x_chroma_shift)
262 + (y >> img->y_chroma_shift) * img->strid e[VPX_PLANE_U];
263 }
264 }
265
266 return 0;
267 } 241 }
268 242
269 return -1; 243 return 0;
244 }
245
246 return -1;
270 } 247 }
271 248
272 void vpx_img_flip(vpx_image_t *img) 249 void vpx_img_flip(vpx_image_t *img) {
273 { 250 /* Note: In the calculation pointer adjustment calculation, we want the
274 /* Note: In the calculation pointer adjustment calculation, we want the 251 * rhs to be promoted to a signed type. Section 6.3.1.8 of the ISO C99
275 * rhs to be promoted to a signed type. Section 6.3.1.8 of the ISO C99 252 * standard indicates that if the adjustment parameter is unsigned, the
276 * standard indicates that if the adjustment parameter is unsigned, the 253 * stride parameter will be promoted to unsigned, causing errors when
277 * stride parameter will be promoted to unsigned, causing errors when 254 * the lhs is a larger type than the rhs.
278 * the lhs is a larger type than the rhs. 255 */
279 */ 256 img->planes[VPX_PLANE_Y] += (signed)(img->d_h - 1) * img->stride[VPX_PLANE_Y];
280 img->planes[VPX_PLANE_Y] += (signed)(img->d_h - 1) * img->stride[VPX_PLANE_Y ]; 257 img->stride[VPX_PLANE_Y] = -img->stride[VPX_PLANE_Y];
281 img->stride[VPX_PLANE_Y] = -img->stride[VPX_PLANE_Y];
282 258
283 img->planes[VPX_PLANE_U] += (signed)((img->d_h >> img->y_chroma_shift) - 1) 259 img->planes[VPX_PLANE_U] += (signed)((img->d_h >> img->y_chroma_shift) - 1)
284 * img->stride[VPX_PLANE_U]; 260 * img->stride[VPX_PLANE_U];
285 img->stride[VPX_PLANE_U] = -img->stride[VPX_PLANE_U]; 261 img->stride[VPX_PLANE_U] = -img->stride[VPX_PLANE_U];
286 262
287 img->planes[VPX_PLANE_V] += (signed)((img->d_h >> img->y_chroma_shift) - 1) 263 img->planes[VPX_PLANE_V] += (signed)((img->d_h >> img->y_chroma_shift) - 1)
288 * img->stride[VPX_PLANE_V]; 264 * img->stride[VPX_PLANE_V];
289 img->stride[VPX_PLANE_V] = -img->stride[VPX_PLANE_V]; 265 img->stride[VPX_PLANE_V] = -img->stride[VPX_PLANE_V];
290 266
291 img->planes[VPX_PLANE_ALPHA] += (signed)(img->d_h - 1) * img->stride[VPX_PLA NE_ALPHA]; 267 img->planes[VPX_PLANE_ALPHA] += (signed)(img->d_h - 1) * img->stride[VPX_PLANE _ALPHA];
292 img->stride[VPX_PLANE_ALPHA] = -img->stride[VPX_PLANE_ALPHA]; 268 img->stride[VPX_PLANE_ALPHA] = -img->stride[VPX_PLANE_ALPHA];
293 } 269 }
294 270
295 void vpx_img_free(vpx_image_t *img) 271 void vpx_img_free(vpx_image_t *img) {
296 { 272 if (img) {
297 if (img) 273 if (img->img_data && img->img_data_owner)
298 { 274 img_buf_free(img->img_data);
299 if (img->img_data && img->img_data_owner)
300 img_buf_free(img->img_data);
301 275
302 if (img->self_allocd) 276 if (img->self_allocd)
303 free(img); 277 free(img);
304 } 278 }
305 } 279 }
OLDNEW
« libvpx.gyp ('K') | « source/libvpx/vpx/src/vpx_encoder.c ('k') | source/libvpx/vpx/vp8.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698