Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(284)

Side by Side Diff: source/libvpx/vp8/common/reconintra_mt.c

Issue 3417017: Update libvpx sources to v0.9.2-35-ga8a38bc. ... (Closed) Base URL: svn://chrome-svn/chrome/trunk/deps/third_party/libvpx/
Patch Set: Created 10 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « source/libvpx/vp8/common/reconintra_mt.h ('k') | source/libvpx/vp8/common/setupintrarecon.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
(Empty)
1 /*
2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
11
12 #include "vpx_ports/config.h"
13 #include "recon.h"
14 #include "reconintra.h"
15 #include "vpx_mem/vpx_mem.h"
16 #include "onyxd_int.h"
17
18 // For skip_recon_mb(), add vp8_build_intra_predictors_mby_s(MACROBLOCKD *x) and
19 // vp8_build_intra_predictors_mbuv_s(MACROBLOCKD *x).
20
21 void vp8mt_build_intra_predictors_mby(VP8D_COMP *pbi, MACROBLOCKD *x, int mb_row , int mb_col)
22 {
23 #if CONFIG_MULTITHREAD
24 unsigned char *yabove_row; // = x->dst.y_buffer - x->dst.y_stride;
25 unsigned char *yleft_col;
26 unsigned char yleft_buf[16];
27 unsigned char ytop_left; // = yabove_row[-1];
28 unsigned char *ypred_ptr = x->predictor;
29 int r, c, i;
30
31 if (pbi->common.filter_level)
32 {
33 yabove_row = pbi->mt_yabove_row[mb_row] + mb_col*16 +32;
34 yleft_col = pbi->mt_yleft_col[mb_row];
35 } else
36 {
37 yabove_row = x->dst.y_buffer - x->dst.y_stride;
38
39 for (i = 0; i < 16; i++)
40 yleft_buf[i] = x->dst.y_buffer [i* x->dst.y_stride -1];
41 yleft_col = yleft_buf;
42 }
43
44 ytop_left = yabove_row[-1];
45
46 // for Y
47 switch (x->mode_info_context->mbmi.mode)
48 {
49 case DC_PRED:
50 {
51 int expected_dc;
52 int i;
53 int shift;
54 int average = 0;
55
56
57 if (x->up_available || x->left_available)
58 {
59 if (x->up_available)
60 {
61 for (i = 0; i < 16; i++)
62 {
63 average += yabove_row[i];
64 }
65 }
66
67 if (x->left_available)
68 {
69
70 for (i = 0; i < 16; i++)
71 {
72 average += yleft_col[i];
73 }
74
75 }
76
77
78
79 shift = 3 + x->up_available + x->left_available;
80 expected_dc = (average + (1 << (shift - 1))) >> shift;
81 }
82 else
83 {
84 expected_dc = 128;
85 }
86
87 vpx_memset(ypred_ptr, expected_dc, 256);
88 }
89 break;
90 case V_PRED:
91 {
92
93 for (r = 0; r < 16; r++)
94 {
95
96 ((int *)ypred_ptr)[0] = ((int *)yabove_row)[0];
97 ((int *)ypred_ptr)[1] = ((int *)yabove_row)[1];
98 ((int *)ypred_ptr)[2] = ((int *)yabove_row)[2];
99 ((int *)ypred_ptr)[3] = ((int *)yabove_row)[3];
100 ypred_ptr += 16;
101 }
102 }
103 break;
104 case H_PRED:
105 {
106
107 for (r = 0; r < 16; r++)
108 {
109
110 vpx_memset(ypred_ptr, yleft_col[r], 16);
111 ypred_ptr += 16;
112 }
113
114 }
115 break;
116 case TM_PRED:
117 {
118
119 for (r = 0; r < 16; r++)
120 {
121 for (c = 0; c < 16; c++)
122 {
123 int pred = yleft_col[r] + yabove_row[ c] - ytop_left;
124
125 if (pred < 0)
126 pred = 0;
127
128 if (pred > 255)
129 pred = 255;
130
131 ypred_ptr[c] = pred;
132 }
133
134 ypred_ptr += 16;
135 }
136
137 }
138 break;
139 case B_PRED:
140 case NEARESTMV:
141 case NEARMV:
142 case ZEROMV:
143 case NEWMV:
144 case SPLITMV:
145 case MB_MODE_COUNT:
146 break;
147 }
148 #else
149 (void) pbi;
150 (void) x;
151 (void) mb_row;
152 (void) mb_col;
153 #endif
154 }
155
156 void vp8mt_build_intra_predictors_mby_s(VP8D_COMP *pbi, MACROBLOCKD *x, int mb_r ow, int mb_col)
157 {
158 #if CONFIG_MULTITHREAD
159 unsigned char *yabove_row; // = x->dst.y_buffer - x->dst.y_stride;
160 unsigned char *yleft_col;
161 unsigned char yleft_buf[16];
162 unsigned char ytop_left; // = yabove_row[-1];
163 unsigned char *ypred_ptr = x->predictor;
164 int r, c, i;
165
166 int y_stride = x->dst.y_stride;
167 ypred_ptr = x->dst.y_buffer; //x->predictor;
168
169 if (pbi->common.filter_level)
170 {
171 yabove_row = pbi->mt_yabove_row[mb_row] + mb_col*16 +32;
172 yleft_col = pbi->mt_yleft_col[mb_row];
173 } else
174 {
175 yabove_row = x->dst.y_buffer - x->dst.y_stride;
176
177 for (i = 0; i < 16; i++)
178 yleft_buf[i] = x->dst.y_buffer [i* x->dst.y_stride -1];
179 yleft_col = yleft_buf;
180 }
181
182 ytop_left = yabove_row[-1];
183
184 // for Y
185 switch (x->mode_info_context->mbmi.mode)
186 {
187 case DC_PRED:
188 {
189 int expected_dc;
190 int i;
191 int shift;
192 int average = 0;
193
194
195 if (x->up_available || x->left_available)
196 {
197 if (x->up_available)
198 {
199 for (i = 0; i < 16; i++)
200 {
201 average += yabove_row[i];
202 }
203 }
204
205 if (x->left_available)
206 {
207
208 for (i = 0; i < 16; i++)
209 {
210 average += yleft_col[i];
211 }
212
213 }
214
215
216
217 shift = 3 + x->up_available + x->left_available;
218 expected_dc = (average + (1 << (shift - 1))) >> shift;
219 }
220 else
221 {
222 expected_dc = 128;
223 }
224
225 //vpx_memset(ypred_ptr, expected_dc, 256);
226 for (r = 0; r < 16; r++)
227 {
228 vpx_memset(ypred_ptr, expected_dc, 16);
229 ypred_ptr += y_stride; //16;
230 }
231 }
232 break;
233 case V_PRED:
234 {
235
236 for (r = 0; r < 16; r++)
237 {
238
239 ((int *)ypred_ptr)[0] = ((int *)yabove_row)[0];
240 ((int *)ypred_ptr)[1] = ((int *)yabove_row)[1];
241 ((int *)ypred_ptr)[2] = ((int *)yabove_row)[2];
242 ((int *)ypred_ptr)[3] = ((int *)yabove_row)[3];
243 ypred_ptr += y_stride; //16;
244 }
245 }
246 break;
247 case H_PRED:
248 {
249
250 for (r = 0; r < 16; r++)
251 {
252
253 vpx_memset(ypred_ptr, yleft_col[r], 16);
254 ypred_ptr += y_stride; //16;
255 }
256
257 }
258 break;
259 case TM_PRED:
260 {
261
262 for (r = 0; r < 16; r++)
263 {
264 for (c = 0; c < 16; c++)
265 {
266 int pred = yleft_col[r] + yabove_row[ c] - ytop_left;
267
268 if (pred < 0)
269 pred = 0;
270
271 if (pred > 255)
272 pred = 255;
273
274 ypred_ptr[c] = pred;
275 }
276
277 ypred_ptr += y_stride; //16;
278 }
279
280 }
281 break;
282 case B_PRED:
283 case NEARESTMV:
284 case NEARMV:
285 case ZEROMV:
286 case NEWMV:
287 case SPLITMV:
288 case MB_MODE_COUNT:
289 break;
290 }
291 #else
292 (void) pbi;
293 (void) x;
294 (void) mb_row;
295 (void) mb_col;
296 #endif
297 }
298
299 void vp8mt_build_intra_predictors_mbuv(VP8D_COMP *pbi, MACROBLOCKD *x, int mb_ro w, int mb_col)
300 {
301 #if CONFIG_MULTITHREAD
302 unsigned char *uabove_row; // = x->dst.u_buffer - x->dst.uv_stride;
303 unsigned char *uleft_col; //[16];
304 unsigned char uleft_buf[8];
305 unsigned char utop_left; // = uabove_row[-1];
306 unsigned char *vabove_row; // = x->dst.v_buffer - x->dst.uv_stride;
307 unsigned char *vleft_col; //[20];
308 unsigned char vleft_buf[8];
309 unsigned char vtop_left; // = vabove_row[-1];
310 unsigned char *upred_ptr = &x->predictor[256];
311 unsigned char *vpred_ptr = &x->predictor[320];
312 int i, j;
313
314 if (pbi->common.filter_level)
315 {
316 uabove_row = pbi->mt_uabove_row[mb_row] + mb_col*8 +16;
317 vabove_row = pbi->mt_vabove_row[mb_row] + mb_col*8 +16;
318 uleft_col = pbi->mt_uleft_col[mb_row];
319 vleft_col = pbi->mt_vleft_col[mb_row];
320 } else
321 {
322 uabove_row = x->dst.u_buffer - x->dst.uv_stride;
323 vabove_row = x->dst.v_buffer - x->dst.uv_stride;
324
325 for (i = 0; i < 8; i++)
326 {
327 uleft_buf[i] = x->dst.u_buffer [i* x->dst.uv_stride -1];
328 vleft_buf[i] = x->dst.v_buffer [i* x->dst.uv_stride -1];
329 }
330 uleft_col = uleft_buf;
331 vleft_col = vleft_buf;
332 }
333 utop_left = uabove_row[-1];
334 vtop_left = vabove_row[-1];
335
336 switch (x->mode_info_context->mbmi.uv_mode)
337 {
338 case DC_PRED:
339 {
340 int expected_udc;
341 int expected_vdc;
342 int i;
343 int shift;
344 int Uaverage = 0;
345 int Vaverage = 0;
346
347 if (x->up_available)
348 {
349 for (i = 0; i < 8; i++)
350 {
351 Uaverage += uabove_row[i];
352 Vaverage += vabove_row[i];
353 }
354 }
355
356 if (x->left_available)
357 {
358 for (i = 0; i < 8; i++)
359 {
360 Uaverage += uleft_col[i];
361 Vaverage += vleft_col[i];
362 }
363 }
364
365 if (!x->up_available && !x->left_available)
366 {
367 expected_udc = 128;
368 expected_vdc = 128;
369 }
370 else
371 {
372 shift = 2 + x->up_available + x->left_available;
373 expected_udc = (Uaverage + (1 << (shift - 1))) >> shift;
374 expected_vdc = (Vaverage + (1 << (shift - 1))) >> shift;
375 }
376
377
378 vpx_memset(upred_ptr, expected_udc, 64);
379 vpx_memset(vpred_ptr, expected_vdc, 64);
380
381
382 }
383 break;
384 case V_PRED:
385 {
386 int i;
387
388 for (i = 0; i < 8; i++)
389 {
390 vpx_memcpy(upred_ptr, uabove_row, 8);
391 vpx_memcpy(vpred_ptr, vabove_row, 8);
392 upred_ptr += 8;
393 vpred_ptr += 8;
394 }
395
396 }
397 break;
398 case H_PRED:
399 {
400 int i;
401
402 for (i = 0; i < 8; i++)
403 {
404 vpx_memset(upred_ptr, uleft_col[i], 8);
405 vpx_memset(vpred_ptr, vleft_col[i], 8);
406 upred_ptr += 8;
407 vpred_ptr += 8;
408 }
409 }
410
411 break;
412 case TM_PRED:
413 {
414 int i;
415
416 for (i = 0; i < 8; i++)
417 {
418 for (j = 0; j < 8; j++)
419 {
420 int predu = uleft_col[i] + uabove_row[j] - utop_left;
421 int predv = vleft_col[i] + vabove_row[j] - vtop_left;
422
423 if (predu < 0)
424 predu = 0;
425
426 if (predu > 255)
427 predu = 255;
428
429 if (predv < 0)
430 predv = 0;
431
432 if (predv > 255)
433 predv = 255;
434
435 upred_ptr[j] = predu;
436 vpred_ptr[j] = predv;
437 }
438
439 upred_ptr += 8;
440 vpred_ptr += 8;
441 }
442
443 }
444 break;
445 case B_PRED:
446 case NEARESTMV:
447 case NEARMV:
448 case ZEROMV:
449 case NEWMV:
450 case SPLITMV:
451 case MB_MODE_COUNT:
452 break;
453 }
454 #else
455 (void) pbi;
456 (void) x;
457 (void) mb_row;
458 (void) mb_col;
459 #endif
460 }
461
462 void vp8mt_build_intra_predictors_mbuv_s(VP8D_COMP *pbi, MACROBLOCKD *x, int mb_ row, int mb_col)
463 {
464 #if CONFIG_MULTITHREAD
465 unsigned char *uabove_row; // = x->dst.u_buffer - x->dst.uv_stride;
466 unsigned char *uleft_col; //[16];
467 unsigned char uleft_buf[8];
468 unsigned char utop_left; // = uabove_row[-1];
469 unsigned char *vabove_row; // = x->dst.v_buffer - x->dst.uv_stride;
470 unsigned char *vleft_col; //[20];
471 unsigned char vleft_buf[8];
472 unsigned char vtop_left; // = vabove_row[-1];
473 unsigned char *upred_ptr = x->dst.u_buffer; //&x->predictor[256];
474 unsigned char *vpred_ptr = x->dst.v_buffer; //&x->predictor[320];
475 int uv_stride = x->dst.uv_stride;
476 int i, j;
477
478 if (pbi->common.filter_level)
479 {
480 uabove_row = pbi->mt_uabove_row[mb_row] + mb_col*8 +16;
481 vabove_row = pbi->mt_vabove_row[mb_row] + mb_col*8 +16;
482 uleft_col = pbi->mt_uleft_col[mb_row];
483 vleft_col = pbi->mt_vleft_col[mb_row];
484 } else
485 {
486 uabove_row = x->dst.u_buffer - x->dst.uv_stride;
487 vabove_row = x->dst.v_buffer - x->dst.uv_stride;
488
489 for (i = 0; i < 8; i++)
490 {
491 uleft_buf[i] = x->dst.u_buffer [i* x->dst.uv_stride -1];
492 vleft_buf[i] = x->dst.v_buffer [i* x->dst.uv_stride -1];
493 }
494 uleft_col = uleft_buf;
495 vleft_col = vleft_buf;
496 }
497 utop_left = uabove_row[-1];
498 vtop_left = vabove_row[-1];
499
500 switch (x->mode_info_context->mbmi.uv_mode)
501 {
502 case DC_PRED:
503 {
504 int expected_udc;
505 int expected_vdc;
506 int i;
507 int shift;
508 int Uaverage = 0;
509 int Vaverage = 0;
510
511 if (x->up_available)
512 {
513 for (i = 0; i < 8; i++)
514 {
515 Uaverage += uabove_row[i];
516 Vaverage += vabove_row[i];
517 }
518 }
519
520 if (x->left_available)
521 {
522 for (i = 0; i < 8; i++)
523 {
524 Uaverage += uleft_col[i];
525 Vaverage += vleft_col[i];
526 }
527 }
528
529 if (!x->up_available && !x->left_available)
530 {
531 expected_udc = 128;
532 expected_vdc = 128;
533 }
534 else
535 {
536 shift = 2 + x->up_available + x->left_available;
537 expected_udc = (Uaverage + (1 << (shift - 1))) >> shift;
538 expected_vdc = (Vaverage + (1 << (shift - 1))) >> shift;
539 }
540
541
542 //vpx_memset(upred_ptr,expected_udc,64);
543 //vpx_memset(vpred_ptr,expected_vdc,64);
544 for (i = 0; i < 8; i++)
545 {
546 vpx_memset(upred_ptr, expected_udc, 8);
547 vpx_memset(vpred_ptr, expected_vdc, 8);
548 upred_ptr += uv_stride; //8;
549 vpred_ptr += uv_stride; //8;
550 }
551 }
552 break;
553 case V_PRED:
554 {
555 int i;
556
557 for (i = 0; i < 8; i++)
558 {
559 vpx_memcpy(upred_ptr, uabove_row, 8);
560 vpx_memcpy(vpred_ptr, vabove_row, 8);
561 upred_ptr += uv_stride; //8;
562 vpred_ptr += uv_stride; //8;
563 }
564
565 }
566 break;
567 case H_PRED:
568 {
569 int i;
570
571 for (i = 0; i < 8; i++)
572 {
573 vpx_memset(upred_ptr, uleft_col[i], 8);
574 vpx_memset(vpred_ptr, vleft_col[i], 8);
575 upred_ptr += uv_stride; //8;
576 vpred_ptr += uv_stride; //8;
577 }
578 }
579
580 break;
581 case TM_PRED:
582 {
583 int i;
584
585 for (i = 0; i < 8; i++)
586 {
587 for (j = 0; j < 8; j++)
588 {
589 int predu = uleft_col[i] + uabove_row[j] - utop_left;
590 int predv = vleft_col[i] + vabove_row[j] - vtop_left;
591
592 if (predu < 0)
593 predu = 0;
594
595 if (predu > 255)
596 predu = 255;
597
598 if (predv < 0)
599 predv = 0;
600
601 if (predv > 255)
602 predv = 255;
603
604 upred_ptr[j] = predu;
605 vpred_ptr[j] = predv;
606 }
607
608 upred_ptr += uv_stride; //8;
609 vpred_ptr += uv_stride; //8;
610 }
611
612 }
613 break;
614 case B_PRED:
615 case NEARESTMV:
616 case NEARMV:
617 case ZEROMV:
618 case NEWMV:
619 case SPLITMV:
620 case MB_MODE_COUNT:
621 break;
622 }
623 #else
624 (void) pbi;
625 (void) x;
626 (void) mb_row;
627 (void) mb_col;
628 #endif
629 }
630
631
632 void vp8mt_predict_intra4x4(VP8D_COMP *pbi,
633 MACROBLOCKD *xd,
634 int b_mode,
635 unsigned char *predictor,
636 int mb_row,
637 int mb_col,
638 int num)
639 {
640 #if CONFIG_MULTITHREAD
641 int i, r, c;
642
643 unsigned char *Above; // = *(x->base_dst) + x->dst - x->dst_stride;
644 unsigned char Left[4];
645 unsigned char top_left; // = Above[-1];
646
647 BLOCKD *x = &xd->block[num];
648
649 //Caution: For some b_mode, it needs 8 pixels (4 above + 4 above-right).
650 if (num < 4 && pbi->common.filter_level)
651 Above = pbi->mt_yabove_row[mb_row] + mb_col*16 + num*4 + 32;
652 else
653 Above = *(x->base_dst) + x->dst - x->dst_stride;
654
655 if (num%4==0 && pbi->common.filter_level)
656 {
657 for (i=0; i<4; i++)
658 Left[i] = pbi->mt_yleft_col[mb_row][num + i];
659 }else
660 {
661 Left[0] = (*(x->base_dst))[x->dst - 1];
662 Left[1] = (*(x->base_dst))[x->dst - 1 + x->dst_stride];
663 Left[2] = (*(x->base_dst))[x->dst - 1 + 2 * x->dst_stride];
664 Left[3] = (*(x->base_dst))[x->dst - 1 + 3 * x->dst_stride];
665 }
666
667 if ((num==4 || num==8 || num==12) && pbi->common.filter_level)
668 top_left = pbi->mt_yleft_col[mb_row][num-1];
669 else
670 top_left = Above[-1];
671
672 switch (b_mode)
673 {
674 case B_DC_PRED:
675 {
676 int expected_dc = 0;
677
678 for (i = 0; i < 4; i++)
679 {
680 expected_dc += Above[i];
681 expected_dc += Left[i];
682 }
683
684 expected_dc = (expected_dc + 4) >> 3;
685
686 for (r = 0; r < 4; r++)
687 {
688 for (c = 0; c < 4; c++)
689 {
690 predictor[c] = expected_dc;
691 }
692
693 predictor += 16;
694 }
695 }
696 break;
697 case B_TM_PRED:
698 {
699 // prediction similar to true_motion prediction
700 for (r = 0; r < 4; r++)
701 {
702 for (c = 0; c < 4; c++)
703 {
704 int pred = Above[c] - top_left + Left[r];
705
706 if (pred < 0)
707 pred = 0;
708
709 if (pred > 255)
710 pred = 255;
711
712 predictor[c] = pred;
713 }
714
715 predictor += 16;
716 }
717 }
718 break;
719
720 case B_VE_PRED:
721 {
722
723 unsigned int ap[4];
724 ap[0] = (top_left + 2 * Above[0] + Above[1] + 2) >> 2;
725 ap[1] = (Above[0] + 2 * Above[1] + Above[2] + 2) >> 2;
726 ap[2] = (Above[1] + 2 * Above[2] + Above[3] + 2) >> 2;
727 ap[3] = (Above[2] + 2 * Above[3] + Above[4] + 2) >> 2;
728
729 for (r = 0; r < 4; r++)
730 {
731 for (c = 0; c < 4; c++)
732 {
733
734 predictor[c] = ap[c];
735 }
736
737 predictor += 16;
738 }
739
740 }
741 break;
742
743
744 case B_HE_PRED:
745 {
746
747 unsigned int lp[4];
748 lp[0] = (top_left + 2 * Left[0] + Left[1] + 2) >> 2;
749 lp[1] = (Left[0] + 2 * Left[1] + Left[2] + 2) >> 2;
750 lp[2] = (Left[1] + 2 * Left[2] + Left[3] + 2) >> 2;
751 lp[3] = (Left[2] + 2 * Left[3] + Left[3] + 2) >> 2;
752
753 for (r = 0; r < 4; r++)
754 {
755 for (c = 0; c < 4; c++)
756 {
757 predictor[c] = lp[r];
758 }
759
760 predictor += 16;
761 }
762 }
763 break;
764 case B_LD_PRED:
765 {
766 unsigned char *ptr = Above;
767 predictor[0 * 16 + 0] = (ptr[0] + ptr[1] * 2 + ptr[2] + 2) >> 2;
768 predictor[0 * 16 + 1] =
769 predictor[1 * 16 + 0] = (ptr[1] + ptr[2] * 2 + ptr[3] + 2) >> 2;
770 predictor[0 * 16 + 2] =
771 predictor[1 * 16 + 1] =
772 predictor[2 * 16 + 0] = (ptr[2] + ptr[3] * 2 + ptr[4] + 2) >> 2;
773 predictor[0 * 16 + 3] =
774 predictor[1 * 16 + 2] =
775 predictor[2 * 16 + 1] =
776 predictor[3 * 16 + 0] = (ptr[3] + ptr[4] * 2 + ptr[5] + 2) > > 2;
777 predictor[1 * 16 + 3] =
778 predictor[2 * 16 + 2] =
779 predictor[3 * 16 + 1] = (ptr[4] + ptr[5] * 2 + ptr[6] + 2) >> 2;
780 predictor[2 * 16 + 3] =
781 predictor[3 * 16 + 2] = (ptr[5] + ptr[6] * 2 + ptr[7] + 2) >> 2;
782 predictor[3 * 16 + 3] = (ptr[6] + ptr[7] * 2 + ptr[7] + 2) >> 2;
783
784 }
785 break;
786 case B_RD_PRED:
787 {
788
789 unsigned char pp[9];
790
791 pp[0] = Left[3];
792 pp[1] = Left[2];
793 pp[2] = Left[1];
794 pp[3] = Left[0];
795 pp[4] = top_left;
796 pp[5] = Above[0];
797 pp[6] = Above[1];
798 pp[7] = Above[2];
799 pp[8] = Above[3];
800
801 predictor[3 * 16 + 0] = (pp[0] + pp[1] * 2 + pp[2] + 2) >> 2;
802 predictor[3 * 16 + 1] =
803 predictor[2 * 16 + 0] = (pp[1] + pp[2] * 2 + pp[3] + 2) >> 2;
804 predictor[3 * 16 + 2] =
805 predictor[2 * 16 + 1] =
806 predictor[1 * 16 + 0] = (pp[2] + pp[3] * 2 + pp[4] + 2) >> 2;
807 predictor[3 * 16 + 3] =
808 predictor[2 * 16 + 2] =
809 predictor[1 * 16 + 1] =
810 predictor[0 * 16 + 0] = (pp[3] + pp[4] * 2 + pp[5] + 2) >> 2 ;
811 predictor[2 * 16 + 3] =
812 predictor[1 * 16 + 2] =
813 predictor[0 * 16 + 1] = (pp[4] + pp[5] * 2 + pp[6] + 2) >> 2;
814 predictor[1 * 16 + 3] =
815 predictor[0 * 16 + 2] = (pp[5] + pp[6] * 2 + pp[7] + 2) >> 2;
816 predictor[0 * 16 + 3] = (pp[6] + pp[7] * 2 + pp[8] + 2) >> 2;
817
818 }
819 break;
820 case B_VR_PRED:
821 {
822
823 unsigned char pp[9];
824
825 pp[0] = Left[3];
826 pp[1] = Left[2];
827 pp[2] = Left[1];
828 pp[3] = Left[0];
829 pp[4] = top_left;
830 pp[5] = Above[0];
831 pp[6] = Above[1];
832 pp[7] = Above[2];
833 pp[8] = Above[3];
834
835
836 predictor[3 * 16 + 0] = (pp[1] + pp[2] * 2 + pp[3] + 2) >> 2;
837 predictor[2 * 16 + 0] = (pp[2] + pp[3] * 2 + pp[4] + 2) >> 2;
838 predictor[3 * 16 + 1] =
839 predictor[1 * 16 + 0] = (pp[3] + pp[4] * 2 + pp[5] + 2) >> 2;
840 predictor[2 * 16 + 1] =
841 predictor[0 * 16 + 0] = (pp[4] + pp[5] + 1) >> 1;
842 predictor[3 * 16 + 2] =
843 predictor[1 * 16 + 1] = (pp[4] + pp[5] * 2 + pp[6] + 2) >> 2;
844 predictor[2 * 16 + 2] =
845 predictor[0 * 16 + 1] = (pp[5] + pp[6] + 1) >> 1;
846 predictor[3 * 16 + 3] =
847 predictor[1 * 16 + 2] = (pp[5] + pp[6] * 2 + pp[7] + 2) >> 2;
848 predictor[2 * 16 + 3] =
849 predictor[0 * 16 + 2] = (pp[6] + pp[7] + 1) >> 1;
850 predictor[1 * 16 + 3] = (pp[6] + pp[7] * 2 + pp[8] + 2) >> 2;
851 predictor[0 * 16 + 3] = (pp[7] + pp[8] + 1) >> 1;
852
853 }
854 break;
855 case B_VL_PRED:
856 {
857
858 unsigned char *pp = Above;
859
860 predictor[0 * 16 + 0] = (pp[0] + pp[1] + 1) >> 1;
861 predictor[1 * 16 + 0] = (pp[0] + pp[1] * 2 + pp[2] + 2) >> 2;
862 predictor[2 * 16 + 0] =
863 predictor[0 * 16 + 1] = (pp[1] + pp[2] + 1) >> 1;
864 predictor[1 * 16 + 1] =
865 predictor[3 * 16 + 0] = (pp[1] + pp[2] * 2 + pp[3] + 2) >> 2;
866 predictor[2 * 16 + 1] =
867 predictor[0 * 16 + 2] = (pp[2] + pp[3] + 1) >> 1;
868 predictor[3 * 16 + 1] =
869 predictor[1 * 16 + 2] = (pp[2] + pp[3] * 2 + pp[4] + 2) >> 2;
870 predictor[0 * 16 + 3] =
871 predictor[2 * 16 + 2] = (pp[3] + pp[4] + 1) >> 1;
872 predictor[1 * 16 + 3] =
873 predictor[3 * 16 + 2] = (pp[3] + pp[4] * 2 + pp[5] + 2) >> 2;
874 predictor[2 * 16 + 3] = (pp[4] + pp[5] * 2 + pp[6] + 2) >> 2;
875 predictor[3 * 16 + 3] = (pp[5] + pp[6] * 2 + pp[7] + 2) >> 2;
876 }
877 break;
878
879 case B_HD_PRED:
880 {
881 unsigned char pp[9];
882 pp[0] = Left[3];
883 pp[1] = Left[2];
884 pp[2] = Left[1];
885 pp[3] = Left[0];
886 pp[4] = top_left;
887 pp[5] = Above[0];
888 pp[6] = Above[1];
889 pp[7] = Above[2];
890 pp[8] = Above[3];
891
892
893 predictor[3 * 16 + 0] = (pp[0] + pp[1] + 1) >> 1;
894 predictor[3 * 16 + 1] = (pp[0] + pp[1] * 2 + pp[2] + 2) >> 2;
895 predictor[2 * 16 + 0] =
896 predictor[3 * 16 + 2] = (pp[1] + pp[2] + 1) >> 1;
897 predictor[2 * 16 + 1] =
898 predictor[3 * 16 + 3] = (pp[1] + pp[2] * 2 + pp[3] + 2) >> 2;
899 predictor[2 * 16 + 2] =
900 predictor[1 * 16 + 0] = (pp[2] + pp[3] + 1) >> 1;
901 predictor[2 * 16 + 3] =
902 predictor[1 * 16 + 1] = (pp[2] + pp[3] * 2 + pp[4] + 2) >> 2;
903 predictor[1 * 16 + 2] =
904 predictor[0 * 16 + 0] = (pp[3] + pp[4] + 1) >> 1;
905 predictor[1 * 16 + 3] =
906 predictor[0 * 16 + 1] = (pp[3] + pp[4] * 2 + pp[5] + 2) >> 2;
907 predictor[0 * 16 + 2] = (pp[4] + pp[5] * 2 + pp[6] + 2) >> 2;
908 predictor[0 * 16 + 3] = (pp[5] + pp[6] * 2 + pp[7] + 2) >> 2;
909 }
910 break;
911
912
913 case B_HU_PRED:
914 {
915 unsigned char *pp = Left;
916 predictor[0 * 16 + 0] = (pp[0] + pp[1] + 1) >> 1;
917 predictor[0 * 16 + 1] = (pp[0] + pp[1] * 2 + pp[2] + 2) >> 2;
918 predictor[0 * 16 + 2] =
919 predictor[1 * 16 + 0] = (pp[1] + pp[2] + 1) >> 1;
920 predictor[0 * 16 + 3] =
921 predictor[1 * 16 + 1] = (pp[1] + pp[2] * 2 + pp[3] + 2) >> 2;
922 predictor[1 * 16 + 2] =
923 predictor[2 * 16 + 0] = (pp[2] + pp[3] + 1) >> 1;
924 predictor[1 * 16 + 3] =
925 predictor[2 * 16 + 1] = (pp[2] + pp[3] * 2 + pp[3] + 2) >> 2;
926 predictor[2 * 16 + 2] =
927 predictor[2 * 16 + 3] =
928 predictor[3 * 16 + 0] =
929 predictor[3 * 16 + 1] =
930 predictor[3 * 16 + 2] =
931 predictor[3 * 16 + 3] = pp[3];
932 }
933 break;
934
935
936 }
937 #else
938 (void) pbi;
939 (void) xd;
940 (void) b_mode;
941 (void) predictor;
942 (void) mb_row;
943 (void) mb_col;
944 (void) num;
945 #endif
946 }
947
948 // copy 4 bytes from the above right down so that the 4x4 prediction modes using pixels above and
949 // to the right prediction have filled in pixels to use.
950 void vp8mt_intra_prediction_down_copy(VP8D_COMP *pbi, MACROBLOCKD *x, int mb_row , int mb_col)
951 {
952 #if CONFIG_MULTITHREAD
953 unsigned char *above_right; // = *(x->block[0].base_dst) + x->block[0].dst - x->block[0].dst_stride + 16;
954 unsigned int *src_ptr;
955 unsigned int *dst_ptr0;
956 unsigned int *dst_ptr1;
957 unsigned int *dst_ptr2;
958
959 if (pbi->common.filter_level)
960 above_right = pbi->mt_yabove_row[mb_row] + mb_col*16 + 32 +16;
961 else
962 above_right = *(x->block[0].base_dst) + x->block[0].dst - x->block[0].ds t_stride + 16;
963
964 src_ptr = (unsigned int *)above_right;
965 //dst_ptr0 = (unsigned int *)(above_right + 4 * x->block[0].dst_stride);
966 //dst_ptr1 = (unsigned int *)(above_right + 8 * x->block[0].dst_stride);
967 //dst_ptr2 = (unsigned int *)(above_right + 12 * x->block[0].dst_stride);
968 dst_ptr0 = (unsigned int *)(*(x->block[0].base_dst) + x->block[0].dst + 16 + 3 * x->block[0].dst_stride);
969 dst_ptr1 = (unsigned int *)(*(x->block[0].base_dst) + x->block[0].dst + 16 + 7 * x->block[0].dst_stride);
970 dst_ptr2 = (unsigned int *)(*(x->block[0].base_dst) + x->block[0].dst + 16 + 11 * x->block[0].dst_stride);
971 *dst_ptr0 = *src_ptr;
972 *dst_ptr1 = *src_ptr;
973 *dst_ptr2 = *src_ptr;
974 #else
975 (void) pbi;
976 (void) x;
977 (void) mb_row;
978 (void) mb_col;
979 #endif
980 }
OLDNEW
« no previous file with comments | « source/libvpx/vp8/common/reconintra_mt.h ('k') | source/libvpx/vp8/common/setupintrarecon.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698