OLD | NEW |
1 /* | 1 /* |
2 * Copyright (c) 2013 The WebM project authors. All Rights Reserved. | 2 * Copyright (c) 2013 The WebM project authors. All Rights Reserved. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
9 */ | 9 */ |
10 | 10 |
(...skipping 218 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
229 filters_x, x0_q4, x_step_q4, | 229 filters_x, x0_q4, x_step_q4, |
230 filters_y, y0_q4, y_step_q4, w, h); | 230 filters_y, y0_q4, y_step_q4, w, h); |
231 } | 231 } |
232 | 232 |
233 void vp9_convolve8_avg_c(const uint8_t *src, ptrdiff_t src_stride, | 233 void vp9_convolve8_avg_c(const uint8_t *src, ptrdiff_t src_stride, |
234 uint8_t *dst, ptrdiff_t dst_stride, | 234 uint8_t *dst, ptrdiff_t dst_stride, |
235 const int16_t *filter_x, int x_step_q4, | 235 const int16_t *filter_x, int x_step_q4, |
236 const int16_t *filter_y, int y_step_q4, | 236 const int16_t *filter_y, int y_step_q4, |
237 int w, int h) { | 237 int w, int h) { |
238 /* Fixed size intermediate buffer places limits on parameters. */ | 238 /* Fixed size intermediate buffer places limits on parameters. */ |
239 DECLARE_ALIGNED_ARRAY(16, uint8_t, temp, 64 * 64); | 239 DECLARE_ALIGNED(16, uint8_t, temp[64 * 64]); |
240 assert(w <= 64); | 240 assert(w <= 64); |
241 assert(h <= 64); | 241 assert(h <= 64); |
242 | 242 |
243 vp9_convolve8_c(src, src_stride, temp, 64, | 243 vp9_convolve8_c(src, src_stride, temp, 64, |
244 filter_x, x_step_q4, filter_y, y_step_q4, w, h); | 244 filter_x, x_step_q4, filter_y, y_step_q4, w, h); |
245 vp9_convolve_avg_c(temp, 64, dst, dst_stride, NULL, 0, NULL, 0, w, h); | 245 vp9_convolve_avg_c(temp, 64, dst, dst_stride, NULL, 0, NULL, 0, w, h); |
246 } | 246 } |
247 | 247 |
248 void vp9_convolve_copy_c(const uint8_t *src, ptrdiff_t src_stride, | 248 void vp9_convolve_copy_c(const uint8_t *src, ptrdiff_t src_stride, |
249 uint8_t *dst, ptrdiff_t dst_stride, | 249 uint8_t *dst, ptrdiff_t dst_stride, |
250 const int16_t *filter_x, int filter_x_stride, | 250 const int16_t *filter_x, int filter_x_stride, |
251 const int16_t *filter_y, int filter_y_stride, | 251 const int16_t *filter_y, int filter_y_stride, |
252 int w, int h) { | 252 int w, int h) { |
253 int r; | 253 int r; |
254 | 254 |
255 (void)filter_x; (void)filter_x_stride; | 255 (void)filter_x; (void)filter_x_stride; |
256 (void)filter_y; (void)filter_y_stride; | 256 (void)filter_y; (void)filter_y_stride; |
257 | 257 |
258 for (r = h; r > 0; --r) { | 258 for (r = h; r > 0; --r) { |
259 vpx_memcpy(dst, src, w); | 259 memcpy(dst, src, w); |
260 src += src_stride; | 260 src += src_stride; |
261 dst += dst_stride; | 261 dst += dst_stride; |
262 } | 262 } |
263 } | 263 } |
264 | 264 |
265 void vp9_convolve_avg_c(const uint8_t *src, ptrdiff_t src_stride, | 265 void vp9_convolve_avg_c(const uint8_t *src, ptrdiff_t src_stride, |
266 uint8_t *dst, ptrdiff_t dst_stride, | 266 uint8_t *dst, ptrdiff_t dst_stride, |
267 const int16_t *filter_x, int filter_x_stride, | 267 const int16_t *filter_x, int filter_x_stride, |
268 const int16_t *filter_y, int filter_y_stride, | 268 const int16_t *filter_y, int filter_y_stride, |
269 int w, int h) { | 269 int w, int h) { |
(...skipping 224 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
494 filters_x, x0_q4, x_step_q4, | 494 filters_x, x0_q4, x_step_q4, |
495 filters_y, y0_q4, y_step_q4, w, h, bd); | 495 filters_y, y0_q4, y_step_q4, w, h, bd); |
496 } | 496 } |
497 | 497 |
498 void vp9_highbd_convolve8_avg_c(const uint8_t *src, ptrdiff_t src_stride, | 498 void vp9_highbd_convolve8_avg_c(const uint8_t *src, ptrdiff_t src_stride, |
499 uint8_t *dst, ptrdiff_t dst_stride, | 499 uint8_t *dst, ptrdiff_t dst_stride, |
500 const int16_t *filter_x, int x_step_q4, | 500 const int16_t *filter_x, int x_step_q4, |
501 const int16_t *filter_y, int y_step_q4, | 501 const int16_t *filter_y, int y_step_q4, |
502 int w, int h, int bd) { | 502 int w, int h, int bd) { |
503 // Fixed size intermediate buffer places limits on parameters. | 503 // Fixed size intermediate buffer places limits on parameters. |
504 DECLARE_ALIGNED_ARRAY(16, uint16_t, temp, 64 * 64); | 504 DECLARE_ALIGNED(16, uint16_t, temp[64 * 64]); |
505 assert(w <= 64); | 505 assert(w <= 64); |
506 assert(h <= 64); | 506 assert(h <= 64); |
507 | 507 |
508 vp9_highbd_convolve8_c(src, src_stride, CONVERT_TO_BYTEPTR(temp), 64, | 508 vp9_highbd_convolve8_c(src, src_stride, CONVERT_TO_BYTEPTR(temp), 64, |
509 filter_x, x_step_q4, filter_y, y_step_q4, w, h, bd); | 509 filter_x, x_step_q4, filter_y, y_step_q4, w, h, bd); |
510 vp9_highbd_convolve_avg_c(CONVERT_TO_BYTEPTR(temp), 64, dst, dst_stride, | 510 vp9_highbd_convolve_avg_c(CONVERT_TO_BYTEPTR(temp), 64, dst, dst_stride, |
511 NULL, 0, NULL, 0, w, h, bd); | 511 NULL, 0, NULL, 0, w, h, bd); |
512 } | 512 } |
513 | 513 |
514 void vp9_highbd_convolve_copy_c(const uint8_t *src8, ptrdiff_t src_stride, | 514 void vp9_highbd_convolve_copy_c(const uint8_t *src8, ptrdiff_t src_stride, |
515 uint8_t *dst8, ptrdiff_t dst_stride, | 515 uint8_t *dst8, ptrdiff_t dst_stride, |
516 const int16_t *filter_x, int filter_x_stride, | 516 const int16_t *filter_x, int filter_x_stride, |
517 const int16_t *filter_y, int filter_y_stride, | 517 const int16_t *filter_y, int filter_y_stride, |
518 int w, int h, int bd) { | 518 int w, int h, int bd) { |
519 int r; | 519 int r; |
520 uint16_t *src = CONVERT_TO_SHORTPTR(src8); | 520 uint16_t *src = CONVERT_TO_SHORTPTR(src8); |
521 uint16_t *dst = CONVERT_TO_SHORTPTR(dst8); | 521 uint16_t *dst = CONVERT_TO_SHORTPTR(dst8); |
522 (void)filter_x; | 522 (void)filter_x; |
523 (void)filter_y; | 523 (void)filter_y; |
524 (void)filter_x_stride; | 524 (void)filter_x_stride; |
525 (void)filter_y_stride; | 525 (void)filter_y_stride; |
526 (void)bd; | 526 (void)bd; |
527 | 527 |
528 for (r = h; r > 0; --r) { | 528 for (r = h; r > 0; --r) { |
529 vpx_memcpy(dst, src, w * sizeof(uint16_t)); | 529 memcpy(dst, src, w * sizeof(uint16_t)); |
530 src += src_stride; | 530 src += src_stride; |
531 dst += dst_stride; | 531 dst += dst_stride; |
532 } | 532 } |
533 } | 533 } |
534 | 534 |
535 void vp9_highbd_convolve_avg_c(const uint8_t *src8, ptrdiff_t src_stride, | 535 void vp9_highbd_convolve_avg_c(const uint8_t *src8, ptrdiff_t src_stride, |
536 uint8_t *dst8, ptrdiff_t dst_stride, | 536 uint8_t *dst8, ptrdiff_t dst_stride, |
537 const int16_t *filter_x, int filter_x_stride, | 537 const int16_t *filter_x, int filter_x_stride, |
538 const int16_t *filter_y, int filter_y_stride, | 538 const int16_t *filter_y, int filter_y_stride, |
539 int w, int h, int bd) { | 539 int w, int h, int bd) { |
540 int x, y; | 540 int x, y; |
541 uint16_t *src = CONVERT_TO_SHORTPTR(src8); | 541 uint16_t *src = CONVERT_TO_SHORTPTR(src8); |
542 uint16_t *dst = CONVERT_TO_SHORTPTR(dst8); | 542 uint16_t *dst = CONVERT_TO_SHORTPTR(dst8); |
543 (void)filter_x; | 543 (void)filter_x; |
544 (void)filter_y; | 544 (void)filter_y; |
545 (void)filter_x_stride; | 545 (void)filter_x_stride; |
546 (void)filter_y_stride; | 546 (void)filter_y_stride; |
547 (void)bd; | 547 (void)bd; |
548 | 548 |
549 for (y = 0; y < h; ++y) { | 549 for (y = 0; y < h; ++y) { |
550 for (x = 0; x < w; ++x) { | 550 for (x = 0; x < w; ++x) { |
551 dst[x] = ROUND_POWER_OF_TWO(dst[x] + src[x], 1); | 551 dst[x] = ROUND_POWER_OF_TWO(dst[x] + src[x], 1); |
552 } | 552 } |
553 src += src_stride; | 553 src += src_stride; |
554 dst += dst_stride; | 554 dst += dst_stride; |
555 } | 555 } |
556 } | 556 } |
557 #endif | 557 #endif |
OLD | NEW |