| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved. | 2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved. |
| 3 * | 3 * |
| 4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
| 5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
| 6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
| 7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
| 8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
| 9 */ | 9 */ |
| 10 | 10 |
| 11 #include <stdio.h> | 11 #include <stdio.h> |
| 12 #include "vpx_ports/config.h" | 12 #include "./vpx_config.h" |
| 13 #include "vp9_rtcd.h" | 13 #include "vp9_rtcd.h" |
| 14 #include "vp9/common/vp9_reconintra.h" | 14 #include "vp9/common/vp9_reconintra.h" |
| 15 #include "vpx_mem/vpx_mem.h" | 15 #include "vpx_mem/vpx_mem.h" |
| 16 | 16 |
| 17 /* For skip_recon_mb(), add vp9_build_intra_predictors_mby_s(MACROBLOCKD *xd) | 17 /* For skip_recon_mb(), add vp9_build_intra_predictors_mby_s(MACROBLOCKD *xd) |
| 18 * and vp9_build_intra_predictors_mbuv_s(MACROBLOCKD *xd). | 18 * and vp9_build_intra_predictors_mbuv_s(MACROBLOCKD *xd). |
| 19 */ | 19 */ |
| 20 | 20 |
| 21 static void d27_predictor(uint8_t *ypred_ptr, int y_stride, int n, | 21 static void d27_predictor(uint8_t *ypred_ptr, int y_stride, int n, |
| 22 uint8_t *yabove_row, uint8_t *yleft_col) { | 22 uint8_t *yabove_row, uint8_t *yleft_col) { |
| (...skipping 94 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 117 uint8_t *yabove_row, uint8_t *yleft_col) { | 117 uint8_t *yabove_row, uint8_t *yleft_col) { |
| 118 int r, c; | 118 int r, c; |
| 119 for (r = 0; r < n - 1; ++r) { | 119 for (r = 0; r < n - 1; ++r) { |
| 120 for (c = 0; c <= r; ++c) { | 120 for (c = 0; c <= r; ++c) { |
| 121 ypred_ptr[(r - c) * y_stride + c] = | 121 ypred_ptr[(r - c) * y_stride + c] = |
| 122 (yabove_row[r + 1] * (c + 1) + | 122 (yabove_row[r + 1] * (c + 1) + |
| 123 yleft_col[r + 1] * (r - c + 1) + r / 2 + 1) / (r + 2); | 123 yleft_col[r + 1] * (r - c + 1) + r / 2 + 1) / (r + 2); |
| 124 } | 124 } |
| 125 } | 125 } |
| 126 for (c = 0; c <= r; ++c) { | 126 for (c = 0; c <= r; ++c) { |
| 127 int yabove_ext = yabove_row[r]; // 2*yabove_row[r] - yabove_row[r-1]; | 127 int yabove_ext = yabove_row[r]; // clip_pixel(2 * yabove_row[r] - |
| 128 int yleft_ext = yleft_col[r]; // 2*yleft_col[r] - yleft_col[r-1]; | 128 // yabove_row[r - 1]); |
| 129 yabove_ext = (yabove_ext > 255 ? 255 : (yabove_ext < 0 ? 0 : yabove_ext)); | 129 int yleft_ext = yleft_col[r]; // clip_pixel(2 * yleft_col[r] - |
| 130 yleft_ext = (yleft_ext > 255 ? 255 : (yleft_ext < 0 ? 0 : yleft_ext)); | 130 // yleft_col[r-1]); |
| 131 ypred_ptr[(r - c) * y_stride + c] = | 131 ypred_ptr[(r - c) * y_stride + c] = |
| 132 (yabove_ext * (c + 1) + | 132 (yabove_ext * (c + 1) + |
| 133 yleft_ext * (r - c + 1) + r / 2 + 1) / (r + 2); | 133 yleft_ext * (r - c + 1) + r / 2 + 1) / (r + 2); |
| 134 } | 134 } |
| 135 for (r = 1; r < n; ++r) { | 135 for (r = 1; r < n; ++r) { |
| 136 for (c = n - r; c < n; ++c) | 136 for (c = n - r; c < n; ++c) { |
| 137 ypred_ptr[r * y_stride + c] = (ypred_ptr[(r - 1) * y_stride + c] + | 137 const int yabove_ext = ypred_ptr[(r - 1) * y_stride + c]; |
| 138 ypred_ptr[r * y_stride + c - 1] + 1) >> 1; | 138 const int yleft_ext = ypred_ptr[r * y_stride + c - 1]; |
| 139 ypred_ptr[r * y_stride + c] = (yabove_ext + yleft_ext + 1) >> 1; |
| 140 } |
| 139 } | 141 } |
| 140 } | 142 } |
| 141 | 143 |
| 142 static void d117_predictor(uint8_t *ypred_ptr, int y_stride, int n, | 144 static void d117_predictor(uint8_t *ypred_ptr, int y_stride, int n, |
| 143 uint8_t *yabove_row, uint8_t *yleft_col) { | 145 uint8_t *yabove_row, uint8_t *yleft_col) { |
| 144 int r, c; | 146 int r, c; |
| 145 for (c = 0; c < n; c++) | 147 for (c = 0; c < n; c++) |
| 146 ypred_ptr[c] = (yabove_row[c - 1] + yabove_row[c] + 1) >> 1; | 148 ypred_ptr[c] = (yabove_row[c - 1] + yabove_row[c] + 1) >> 1; |
| 147 ypred_ptr += y_stride; | 149 ypred_ptr += y_stride; |
| 148 for (c = 0; c < n; c++) | 150 for (c = 0; c < n; c++) |
| (...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 189 for (c = 0; c < n - 2; c++) | 191 for (c = 0; c < n - 2; c++) |
| 190 ypred_ptr[c] = yabove_row[c]; | 192 ypred_ptr[c] = yabove_row[c]; |
| 191 ypred_ptr += y_stride; | 193 ypred_ptr += y_stride; |
| 192 for (r = 1; r < n; ++r) { | 194 for (r = 1; r < n; ++r) { |
| 193 for (c = 0; c < n - 2; c++) | 195 for (c = 0; c < n - 2; c++) |
| 194 ypred_ptr[c] = ypred_ptr[-y_stride + c - 2]; | 196 ypred_ptr[c] = ypred_ptr[-y_stride + c - 2]; |
| 195 ypred_ptr += y_stride; | 197 ypred_ptr += y_stride; |
| 196 } | 198 } |
| 197 } | 199 } |
| 198 | 200 |
| 199 static void corner_predictor(unsigned char *ypred_ptr, int y_stride, int n, | 201 static void corner_predictor(uint8_t *ypred_ptr, int y_stride, int n, |
| 200 unsigned char *yabove_row, | 202 uint8_t *yabove_row, |
| 201 unsigned char *yleft_col) { | 203 uint8_t *yleft_col) { |
| 202 int mh, mv, maxgradh, maxgradv, x, y, nx, ny; | 204 int mh, mv, maxgradh, maxgradv, x, y, nx, ny; |
| 203 int i, j; | 205 int i, j; |
| 204 int top_left = yabove_row[-1]; | 206 int top_left = yabove_row[-1]; |
| 205 mh = mv = 0; | 207 mh = mv = 0; |
| 206 maxgradh = yabove_row[1] - top_left; | 208 maxgradh = yabove_row[1] - top_left; |
| 207 maxgradv = yleft_col[1] - top_left; | 209 maxgradv = yleft_col[1] - top_left; |
| 208 for (i = 2; i < n; ++i) { | 210 for (i = 2; i < n; ++i) { |
| 209 int gh = yabove_row[i] - yabove_row[i - 2]; | 211 int gh = yabove_row[i] - yabove_row[i - 2]; |
| 210 int gv = yleft_col[i] - yleft_col[i - 2]; | 212 int gv = yleft_col[i] - yleft_col[i - 2]; |
| 211 if (gh > maxgradh) { | 213 if (gh > maxgradh) { |
| (...skipping 27 matching lines...) Expand all Loading... |
| 239 } | 241 } |
| 240 | 242 |
| 241 void vp9_recon_intra_mbuv(MACROBLOCKD *xd) { | 243 void vp9_recon_intra_mbuv(MACROBLOCKD *xd) { |
| 242 int i; | 244 int i; |
| 243 for (i = 16; i < 24; i += 2) { | 245 for (i = 16; i < 24; i += 2) { |
| 244 BLOCKD *b = &xd->block[i]; | 246 BLOCKD *b = &xd->block[i]; |
| 245 vp9_recon2b(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride); | 247 vp9_recon2b(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride); |
| 246 } | 248 } |
| 247 } | 249 } |
| 248 | 250 |
| 249 void vp9_build_intra_predictors_internal(unsigned char *src, int src_stride, | 251 void vp9_build_intra_predictors_internal(uint8_t *src, int src_stride, |
| 250 unsigned char *ypred_ptr, | 252 uint8_t *ypred_ptr, |
| 251 int y_stride, int mode, int bsize, | 253 int y_stride, int mode, int bsize, |
| 252 int up_available, int left_available) { | 254 int up_available, int left_available) { |
| 253 | 255 |
| 254 unsigned char *yabove_row = src - src_stride; | 256 uint8_t *yabove_row = src - src_stride; |
| 255 unsigned char yleft_col[32]; | 257 uint8_t yleft_col[64]; |
| 256 unsigned char ytop_left = yabove_row[-1]; | 258 uint8_t ytop_left = yabove_row[-1]; |
| 257 int r, c, i; | 259 int r, c, i; |
| 258 | 260 |
| 259 for (i = 0; i < bsize; i++) { | 261 for (i = 0; i < bsize; i++) { |
| 260 yleft_col[i] = src[i * src_stride - 1]; | 262 yleft_col[i] = src[i * src_stride - 1]; |
| 261 } | 263 } |
| 262 | 264 |
| 263 /* for Y */ | 265 /* for Y */ |
| 264 switch (mode) { | 266 switch (mode) { |
| 265 case DC_PRED: { | 267 case DC_PRED: { |
| 266 int expected_dc; | 268 int expected_dc; |
| 267 int i; | 269 int i; |
| 268 int shift; | 270 int shift; |
| 269 int average = 0; | 271 int average = 0; |
| 270 int log2_bsize_minus_1; | 272 int log2_bsize_minus_1; |
| 271 | 273 |
| 272 assert(bsize == 4 || bsize == 8 || bsize == 16 || bsize == 32); | 274 assert(bsize == 4 || bsize == 8 || bsize == 16 || bsize == 32 || |
| 275 bsize == 64); |
| 273 if (bsize == 4) { | 276 if (bsize == 4) { |
| 274 log2_bsize_minus_1 = 1; | 277 log2_bsize_minus_1 = 1; |
| 275 } else if (bsize == 8) { | 278 } else if (bsize == 8) { |
| 276 log2_bsize_minus_1 = 2; | 279 log2_bsize_minus_1 = 2; |
| 277 } else if (bsize == 16) { | 280 } else if (bsize == 16) { |
| 278 log2_bsize_minus_1 = 3; | 281 log2_bsize_minus_1 = 3; |
| 279 } else /* bsize == 32 */ { | 282 } else if (bsize == 32) { |
| 280 log2_bsize_minus_1 = 4; | 283 log2_bsize_minus_1 = 4; |
| 284 } else { |
| 285 assert(bsize == 64); |
| 286 log2_bsize_minus_1 = 5; |
| 281 } | 287 } |
| 282 | 288 |
| 283 if (up_available || left_available) { | 289 if (up_available || left_available) { |
| 284 if (up_available) { | 290 if (up_available) { |
| 285 for (i = 0; i < bsize; i++) { | 291 for (i = 0; i < bsize; i++) { |
| 286 average += yabove_row[i]; | 292 average += yabove_row[i]; |
| 287 } | 293 } |
| 288 } | 294 } |
| 289 | 295 |
| 290 if (left_available) { | 296 if (left_available) { |
| (...skipping 23 matching lines...) Expand all Loading... |
| 314 case H_PRED: { | 320 case H_PRED: { |
| 315 for (r = 0; r < bsize; r++) { | 321 for (r = 0; r < bsize; r++) { |
| 316 vpx_memset(ypred_ptr, yleft_col[r], bsize); | 322 vpx_memset(ypred_ptr, yleft_col[r], bsize); |
| 317 ypred_ptr += y_stride; | 323 ypred_ptr += y_stride; |
| 318 } | 324 } |
| 319 } | 325 } |
| 320 break; | 326 break; |
| 321 case TM_PRED: { | 327 case TM_PRED: { |
| 322 for (r = 0; r < bsize; r++) { | 328 for (r = 0; r < bsize; r++) { |
| 323 for (c = 0; c < bsize; c++) { | 329 for (c = 0; c < bsize; c++) { |
| 324 int pred = yleft_col[r] + yabove_row[ c] - ytop_left; | 330 ypred_ptr[c] = clip_pixel(yleft_col[r] + yabove_row[c] - ytop_left); |
| 325 | |
| 326 if (pred < 0) | |
| 327 pred = 0; | |
| 328 | |
| 329 if (pred > 255) | |
| 330 pred = 255; | |
| 331 | |
| 332 ypred_ptr[c] = pred; | |
| 333 } | 331 } |
| 334 | 332 |
| 335 ypred_ptr += y_stride; | 333 ypred_ptr += y_stride; |
| 336 } | 334 } |
| 337 } | 335 } |
| 338 break; | 336 break; |
| 339 case D45_PRED: { | 337 case D45_PRED: { |
| 340 d45_predictor(ypred_ptr, y_stride, bsize, yabove_row, yleft_col); | 338 d45_predictor(ypred_ptr, y_stride, bsize, yabove_row, yleft_col); |
| 341 } | 339 } |
| 342 break; | 340 break; |
| (...skipping 24 matching lines...) Expand all Loading... |
| 367 case ZEROMV: | 365 case ZEROMV: |
| 368 case NEWMV: | 366 case NEWMV: |
| 369 case SPLITMV: | 367 case SPLITMV: |
| 370 case MB_MODE_COUNT: | 368 case MB_MODE_COUNT: |
| 371 break; | 369 break; |
| 372 } | 370 } |
| 373 } | 371 } |
| 374 | 372 |
| 375 #if CONFIG_COMP_INTERINTRA_PRED | 373 #if CONFIG_COMP_INTERINTRA_PRED |
| 376 static void combine_interintra(MB_PREDICTION_MODE mode, | 374 static void combine_interintra(MB_PREDICTION_MODE mode, |
| 377 unsigned char *interpred, | 375 uint8_t *interpred, |
| 378 int interstride, | 376 int interstride, |
| 379 unsigned char *intrapred, | 377 uint8_t *intrapred, |
| 380 int intrastride, | 378 int intrastride, |
| 381 int size) { | 379 int size) { |
| 382 // TODO(debargha): Explore different ways of combining predictors | 380 // TODO(debargha): Explore different ways of combining predictors |
| 383 // or designing the tables below | 381 // or designing the tables below |
| 384 static const int scale_bits = 8; | 382 static const int scale_bits = 8; |
| 385 static const int scale_max = 256; // 1 << scale_bits; | 383 static const int scale_max = 256; // 1 << scale_bits; |
| 386 static const int scale_round = 127; // (1 << (scale_bits - 1)); | 384 static const int scale_round = 127; // (1 << (scale_bits - 1)); |
| 387 // This table is a function A + B*exp(-kx), where x is hor. index | 385 // This table is a function A + B*exp(-kx), where x is hor. index |
| 388 static const int weights1d[32] = { | 386 static const int weights1d[32] = { |
| 389 128, 122, 116, 111, 107, 103, 99, 96, | 387 128, 122, 116, 111, 107, 103, 99, 96, |
| (...skipping 126 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 516 69, 68, 68, 68, 68, 68, 67, 67, | 514 69, 68, 68, 68, 68, 68, 67, 67, |
| 517 128, 101, 93, 89, 85, 83, 81, 79, | 515 128, 101, 93, 89, 85, 83, 81, 79, |
| 518 78, 76, 75, 74, 74, 73, 72, 72, | 516 78, 76, 75, 74, 74, 73, 72, 72, |
| 519 71, 71, 70, 70, 70, 69, 69, 69, | 517 71, 71, 70, 70, 70, 69, 69, 69, |
| 520 68, 68, 68, 68, 68, 67, 67, 67, | 518 68, 68, 68, 68, 68, 67, 67, 67, |
| 521 128, 101, 93, 88, 85, 82, 80, 79, | 519 128, 101, 93, 88, 85, 82, 80, 79, |
| 522 77, 76, 75, 74, 73, 73, 72, 71, | 520 77, 76, 75, 74, 73, 73, 72, 71, |
| 523 71, 70, 70, 70, 69, 69, 69, 68, | 521 71, 70, 70, 70, 69, 69, 69, 68, |
| 524 68, 68, 68, 68, 67, 67, 67, 67, | 522 68, 68, 68, 68, 67, 67, 67, 67, |
| 525 }; | 523 }; |
| 526 int size_scale = (size == 32 ? 1 : | 524 int size_scale = (size >= 32 ? 1 : |
| 527 size == 16 ? 2 : | 525 size == 16 ? 2 : |
| 528 size == 8 ? 4 : 8); | 526 size == 8 ? 4 : 8); |
| 527 int size_shift = size == 64 ? 1 : 0; |
| 529 int i, j; | 528 int i, j; |
| 530 switch (mode) { | 529 switch (mode) { |
| 531 case V_PRED: | 530 case V_PRED: |
| 532 for (i = 0; i < size; ++i) { | 531 for (i = 0; i < size; ++i) { |
| 533 for (j = 0; j < size; ++j) { | 532 for (j = 0; j < size; ++j) { |
| 534 int k = i * interstride + j; | 533 int k = i * interstride + j; |
| 535 int scale = weights1d[i * size_scale]; | 534 int scale = weights1d[i * size_scale >> size_shift]; |
| 536 interpred[k] = | 535 interpred[k] = |
| 537 ((scale_max - scale) * interpred[k] + | 536 ((scale_max - scale) * interpred[k] + |
| 538 scale * intrapred[i * intrastride + j] + scale_round) | 537 scale * intrapred[i * intrastride + j] + scale_round) |
| 539 >> scale_bits; | 538 >> scale_bits; |
| 540 } | 539 } |
| 541 } | 540 } |
| 542 break; | 541 break; |
| 543 | 542 |
| 544 case H_PRED: | 543 case H_PRED: |
| 545 for (i = 0; i < size; ++i) { | 544 for (i = 0; i < size; ++i) { |
| 546 for (j = 0; j < size; ++j) { | 545 for (j = 0; j < size; ++j) { |
| 547 int k = i * interstride + j; | 546 int k = i * interstride + j; |
| 548 int scale = weights1d[j * size_scale]; | 547 int scale = weights1d[j * size_scale >> size_shift]; |
| 549 interpred[k] = | 548 interpred[k] = |
| 550 ((scale_max - scale) * interpred[k] + | 549 ((scale_max - scale) * interpred[k] + |
| 551 scale * intrapred[i * intrastride + j] + scale_round) | 550 scale * intrapred[i * intrastride + j] + scale_round) |
| 552 >> scale_bits; | 551 >> scale_bits; |
| 553 } | 552 } |
| 554 } | 553 } |
| 555 break; | 554 break; |
| 556 | 555 |
| 557 case D63_PRED: | 556 case D63_PRED: |
| 558 case D117_PRED: | 557 case D117_PRED: |
| 559 for (i = 0; i < size; ++i) { | 558 for (i = 0; i < size; ++i) { |
| 560 for (j = 0; j < size; ++j) { | 559 for (j = 0; j < size; ++j) { |
| 561 int k = i * interstride + j; | 560 int k = i * interstride + j; |
| 562 int scale = (weights2d[i * size_scale * 32 + j * size_scale] + | 561 int scale = (weights2d[(i * size_scale * 32 + |
| 563 weights1d[i * size_scale]) >> 1; | 562 j * size_scale) >> size_shift] + |
| 563 weights1d[i * size_scale >> size_shift]) >> 1; |
| 564 interpred[k] = | 564 interpred[k] = |
| 565 ((scale_max - scale) * interpred[k] + | 565 ((scale_max - scale) * interpred[k] + |
| 566 scale * intrapred[i * intrastride + j] + scale_round) | 566 scale * intrapred[i * intrastride + j] + scale_round) |
| 567 >> scale_bits; | 567 >> scale_bits; |
| 568 } | 568 } |
| 569 } | 569 } |
| 570 break; | 570 break; |
| 571 | 571 |
| 572 case D27_PRED: | 572 case D27_PRED: |
| 573 case D153_PRED: | 573 case D153_PRED: |
| 574 for (i = 0; i < size; ++i) { | 574 for (i = 0; i < size; ++i) { |
| 575 for (j = 0; j < size; ++j) { | 575 for (j = 0; j < size; ++j) { |
| 576 int k = i * interstride + j; | 576 int k = i * interstride + j; |
| 577 int scale = (weights2d[i * size_scale * 32 + j * size_scale] + | 577 int scale = (weights2d[(i * size_scale * 32 + |
| 578 weights1d[j * size_scale]) >> 1; | 578 j * size_scale) >> size_shift] + |
| 579 weights1d[j * size_scale >> size_shift]) >> 1; |
| 579 interpred[k] = | 580 interpred[k] = |
| 580 ((scale_max - scale) * interpred[k] + | 581 ((scale_max - scale) * interpred[k] + |
| 581 scale * intrapred[i * intrastride + j] + scale_round) | 582 scale * intrapred[i * intrastride + j] + scale_round) |
| 582 >> scale_bits; | 583 >> scale_bits; |
| 583 } | 584 } |
| 584 } | 585 } |
| 585 break; | 586 break; |
| 586 | 587 |
| 587 case D135_PRED: | 588 case D135_PRED: |
| 588 for (i = 0; i < size; ++i) { | 589 for (i = 0; i < size; ++i) { |
| 589 for (j = 0; j < size; ++j) { | 590 for (j = 0; j < size; ++j) { |
| 590 int k = i * interstride + j; | 591 int k = i * interstride + j; |
| 591 int scale = weights2d[i * size_scale * 32 + j * size_scale]; | 592 int scale = weights2d[(i * size_scale * 32 + |
| 593 j * size_scale) >> size_shift]; |
| 592 interpred[k] = | 594 interpred[k] = |
| 593 ((scale_max - scale) * interpred[k] + | 595 ((scale_max - scale) * interpred[k] + |
| 594 scale * intrapred[i * intrastride + j] + scale_round) | 596 scale * intrapred[i * intrastride + j] + scale_round) |
| 595 >> scale_bits; | 597 >> scale_bits; |
| 596 } | 598 } |
| 597 } | 599 } |
| 598 break; | 600 break; |
| 599 | 601 |
| 600 case D45_PRED: | 602 case D45_PRED: |
| 601 case DC_PRED: | 603 case DC_PRED: |
| 602 case TM_PRED: | 604 case TM_PRED: |
| 603 default: | 605 default: |
| 604 // simple average | 606 // simple average |
| 605 for (i = 0; i < size; ++i) { | 607 for (i = 0; i < size; ++i) { |
| 606 for (j = 0; j < size; ++j) { | 608 for (j = 0; j < size; ++j) { |
| 607 int k = i * interstride + j; | 609 int k = i * interstride + j; |
| 608 interpred[k] = (interpred[k] + intrapred[i * intrastride + j]) >> 1; | 610 interpred[k] = (interpred[k] + intrapred[i * intrastride + j]) >> 1; |
| 609 } | 611 } |
| 610 } | 612 } |
| 611 break; | 613 break; |
| 612 } | 614 } |
| 613 } | 615 } |
| 614 | 616 |
| 615 void vp9_build_interintra_16x16_predictors_mb(MACROBLOCKD *xd, | 617 void vp9_build_interintra_16x16_predictors_mb(MACROBLOCKD *xd, |
| 616 unsigned char *ypred, | 618 uint8_t *ypred, |
| 617 unsigned char *upred, | 619 uint8_t *upred, |
| 618 unsigned char *vpred, | 620 uint8_t *vpred, |
| 619 int ystride, int uvstride) { | 621 int ystride, int uvstride) { |
| 620 vp9_build_interintra_16x16_predictors_mby(xd, ypred, ystride); | 622 vp9_build_interintra_16x16_predictors_mby(xd, ypred, ystride); |
| 621 vp9_build_interintra_16x16_predictors_mbuv(xd, upred, vpred, uvstride); | 623 vp9_build_interintra_16x16_predictors_mbuv(xd, upred, vpred, uvstride); |
| 622 } | 624 } |
| 623 | 625 |
| 624 void vp9_build_interintra_16x16_predictors_mby(MACROBLOCKD *xd, | 626 void vp9_build_interintra_16x16_predictors_mby(MACROBLOCKD *xd, |
| 625 unsigned char *ypred, | 627 uint8_t *ypred, |
| 626 int ystride) { | 628 int ystride) { |
| 627 unsigned char intrapredictor[256]; | 629 uint8_t intrapredictor[256]; |
| 628 vp9_build_intra_predictors_internal( | 630 vp9_build_intra_predictors_internal( |
| 629 xd->dst.y_buffer, xd->dst.y_stride, | 631 xd->dst.y_buffer, xd->dst.y_stride, |
| 630 intrapredictor, 16, | 632 intrapredictor, 16, |
| 631 xd->mode_info_context->mbmi.interintra_mode, 16, | 633 xd->mode_info_context->mbmi.interintra_mode, 16, |
| 632 xd->up_available, xd->left_available); | 634 xd->up_available, xd->left_available); |
| 633 combine_interintra(xd->mode_info_context->mbmi.interintra_mode, | 635 combine_interintra(xd->mode_info_context->mbmi.interintra_mode, |
| 634 ypred, ystride, intrapredictor, 16, 16); | 636 ypred, ystride, intrapredictor, 16, 16); |
| 635 } | 637 } |
| 636 | 638 |
| 637 void vp9_build_interintra_16x16_predictors_mbuv(MACROBLOCKD *xd, | 639 void vp9_build_interintra_16x16_predictors_mbuv(MACROBLOCKD *xd, |
| 638 unsigned char *upred, | 640 uint8_t *upred, |
| 639 unsigned char *vpred, | 641 uint8_t *vpred, |
| 640 int uvstride) { | 642 int uvstride) { |
| 641 unsigned char uintrapredictor[64]; | 643 uint8_t uintrapredictor[64]; |
| 642 unsigned char vintrapredictor[64]; | 644 uint8_t vintrapredictor[64]; |
| 643 vp9_build_intra_predictors_internal( | 645 vp9_build_intra_predictors_internal( |
| 644 xd->dst.u_buffer, xd->dst.uv_stride, | 646 xd->dst.u_buffer, xd->dst.uv_stride, |
| 645 uintrapredictor, 8, | 647 uintrapredictor, 8, |
| 646 xd->mode_info_context->mbmi.interintra_uv_mode, 8, | 648 xd->mode_info_context->mbmi.interintra_uv_mode, 8, |
| 647 xd->up_available, xd->left_available); | 649 xd->up_available, xd->left_available); |
| 648 vp9_build_intra_predictors_internal( | 650 vp9_build_intra_predictors_internal( |
| 649 xd->dst.v_buffer, xd->dst.uv_stride, | 651 xd->dst.v_buffer, xd->dst.uv_stride, |
| 650 vintrapredictor, 8, | 652 vintrapredictor, 8, |
| 651 xd->mode_info_context->mbmi.interintra_uv_mode, 8, | 653 xd->mode_info_context->mbmi.interintra_uv_mode, 8, |
| 652 xd->up_available, xd->left_available); | 654 xd->up_available, xd->left_available); |
| 653 combine_interintra(xd->mode_info_context->mbmi.interintra_uv_mode, | 655 combine_interintra(xd->mode_info_context->mbmi.interintra_uv_mode, |
| 654 upred, uvstride, uintrapredictor, 8, 8); | 656 upred, uvstride, uintrapredictor, 8, 8); |
| 655 combine_interintra(xd->mode_info_context->mbmi.interintra_uv_mode, | 657 combine_interintra(xd->mode_info_context->mbmi.interintra_uv_mode, |
| 656 vpred, uvstride, vintrapredictor, 8, 8); | 658 vpred, uvstride, vintrapredictor, 8, 8); |
| 657 } | 659 } |
| 658 | 660 |
| 659 #if CONFIG_SUPERBLOCKS | |
| 660 void vp9_build_interintra_32x32_predictors_sby(MACROBLOCKD *xd, | 661 void vp9_build_interintra_32x32_predictors_sby(MACROBLOCKD *xd, |
| 661 unsigned char *ypred, | 662 uint8_t *ypred, |
| 662 int ystride) { | 663 int ystride) { |
| 663 unsigned char intrapredictor[1024]; | 664 uint8_t intrapredictor[1024]; |
| 664 vp9_build_intra_predictors_internal( | 665 vp9_build_intra_predictors_internal( |
| 665 xd->dst.y_buffer, xd->dst.y_stride, | 666 xd->dst.y_buffer, xd->dst.y_stride, |
| 666 intrapredictor, 32, | 667 intrapredictor, 32, |
| 667 xd->mode_info_context->mbmi.interintra_mode, 32, | 668 xd->mode_info_context->mbmi.interintra_mode, 32, |
| 668 xd->up_available, xd->left_available); | 669 xd->up_available, xd->left_available); |
| 669 combine_interintra(xd->mode_info_context->mbmi.interintra_mode, | 670 combine_interintra(xd->mode_info_context->mbmi.interintra_mode, |
| 670 ypred, ystride, intrapredictor, 32, 32); | 671 ypred, ystride, intrapredictor, 32, 32); |
| 671 } | 672 } |
| 672 | 673 |
| 673 void vp9_build_interintra_32x32_predictors_sbuv(MACROBLOCKD *xd, | 674 void vp9_build_interintra_32x32_predictors_sbuv(MACROBLOCKD *xd, |
| 674 unsigned char *upred, | 675 uint8_t *upred, |
| 675 unsigned char *vpred, | 676 uint8_t *vpred, |
| 676 int uvstride) { | 677 int uvstride) { |
| 677 unsigned char uintrapredictor[256]; | 678 uint8_t uintrapredictor[256]; |
| 678 unsigned char vintrapredictor[256]; | 679 uint8_t vintrapredictor[256]; |
| 679 vp9_build_intra_predictors_internal( | 680 vp9_build_intra_predictors_internal( |
| 680 xd->dst.u_buffer, xd->dst.uv_stride, | 681 xd->dst.u_buffer, xd->dst.uv_stride, |
| 681 uintrapredictor, 16, | 682 uintrapredictor, 16, |
| 682 xd->mode_info_context->mbmi.interintra_uv_mode, 16, | 683 xd->mode_info_context->mbmi.interintra_uv_mode, 16, |
| 683 xd->up_available, xd->left_available); | 684 xd->up_available, xd->left_available); |
| 684 vp9_build_intra_predictors_internal( | 685 vp9_build_intra_predictors_internal( |
| 685 xd->dst.v_buffer, xd->dst.uv_stride, | 686 xd->dst.v_buffer, xd->dst.uv_stride, |
| 686 vintrapredictor, 16, | 687 vintrapredictor, 16, |
| 687 xd->mode_info_context->mbmi.interintra_uv_mode, 16, | 688 xd->mode_info_context->mbmi.interintra_uv_mode, 16, |
| 688 xd->up_available, xd->left_available); | 689 xd->up_available, xd->left_available); |
| 689 combine_interintra(xd->mode_info_context->mbmi.interintra_uv_mode, | 690 combine_interintra(xd->mode_info_context->mbmi.interintra_uv_mode, |
| 690 upred, uvstride, uintrapredictor, 16, 16); | 691 upred, uvstride, uintrapredictor, 16, 16); |
| 691 combine_interintra(xd->mode_info_context->mbmi.interintra_uv_mode, | 692 combine_interintra(xd->mode_info_context->mbmi.interintra_uv_mode, |
| 692 vpred, uvstride, vintrapredictor, 16, 16); | 693 vpred, uvstride, vintrapredictor, 16, 16); |
| 693 } | 694 } |
| 694 | 695 |
| 695 void vp9_build_interintra_32x32_predictors_sb(MACROBLOCKD *xd, | 696 void vp9_build_interintra_32x32_predictors_sb(MACROBLOCKD *xd, |
| 696 unsigned char *ypred, | 697 uint8_t *ypred, |
| 697 unsigned char *upred, | 698 uint8_t *upred, |
| 698 unsigned char *vpred, | 699 uint8_t *vpred, |
| 699 int ystride, | 700 int ystride, |
| 700 int uvstride) { | 701 int uvstride) { |
| 701 vp9_build_interintra_32x32_predictors_sby(xd, ypred, ystride); | 702 vp9_build_interintra_32x32_predictors_sby(xd, ypred, ystride); |
| 702 vp9_build_interintra_32x32_predictors_sbuv(xd, upred, vpred, uvstride); | 703 vp9_build_interintra_32x32_predictors_sbuv(xd, upred, vpred, uvstride); |
| 703 } | 704 } |
| 704 #endif | 705 |
| 705 #endif | 706 void vp9_build_interintra_64x64_predictors_sby(MACROBLOCKD *xd, |
| 707 uint8_t *ypred, |
| 708 int ystride) { |
| 709 uint8_t intrapredictor[4096]; |
| 710 const int mode = xd->mode_info_context->mbmi.interintra_mode; |
| 711 vp9_build_intra_predictors_internal(xd->dst.y_buffer, xd->dst.y_stride, |
| 712 intrapredictor, 64, mode, 64, |
| 713 xd->up_available, xd->left_available); |
| 714 combine_interintra(xd->mode_info_context->mbmi.interintra_mode, |
| 715 ypred, ystride, intrapredictor, 64, 64); |
| 716 } |
| 717 |
| 718 void vp9_build_interintra_64x64_predictors_sbuv(MACROBLOCKD *xd, |
| 719 uint8_t *upred, |
| 720 uint8_t *vpred, |
| 721 int uvstride) { |
| 722 uint8_t uintrapredictor[1024]; |
| 723 uint8_t vintrapredictor[1024]; |
| 724 const int mode = xd->mode_info_context->mbmi.interintra_uv_mode; |
| 725 vp9_build_intra_predictors_internal(xd->dst.u_buffer, xd->dst.uv_stride, |
| 726 uintrapredictor, 32, mode, 32, |
| 727 xd->up_available, xd->left_available); |
| 728 vp9_build_intra_predictors_internal(xd->dst.v_buffer, xd->dst.uv_stride, |
| 729 vintrapredictor, 32, mode, 32, |
| 730 xd->up_available, xd->left_available); |
| 731 combine_interintra(xd->mode_info_context->mbmi.interintra_uv_mode, |
| 732 upred, uvstride, uintrapredictor, 32, 32); |
| 733 combine_interintra(xd->mode_info_context->mbmi.interintra_uv_mode, |
| 734 vpred, uvstride, vintrapredictor, 32, 32); |
| 735 } |
| 736 |
| 737 void vp9_build_interintra_64x64_predictors_sb(MACROBLOCKD *xd, |
| 738 uint8_t *ypred, |
| 739 uint8_t *upred, |
| 740 uint8_t *vpred, |
| 741 int ystride, |
| 742 int uvstride) { |
| 743 vp9_build_interintra_64x64_predictors_sby(xd, ypred, ystride); |
| 744 vp9_build_interintra_64x64_predictors_sbuv(xd, upred, vpred, uvstride); |
| 745 } |
| 746 #endif // CONFIG_COMP_INTERINTRA_PRED |
| 706 | 747 |
| 707 void vp9_build_intra_predictors_mby(MACROBLOCKD *xd) { | 748 void vp9_build_intra_predictors_mby(MACROBLOCKD *xd) { |
| 708 vp9_build_intra_predictors_internal(xd->dst.y_buffer, xd->dst.y_stride, | 749 vp9_build_intra_predictors_internal(xd->dst.y_buffer, xd->dst.y_stride, |
| 709 xd->predictor, 16, | 750 xd->predictor, 16, |
| 710 xd->mode_info_context->mbmi.mode, 16, | 751 xd->mode_info_context->mbmi.mode, 16, |
| 711 xd->up_available, xd->left_available); | 752 xd->up_available, xd->left_available); |
| 712 } | 753 } |
| 713 | 754 |
| 714 void vp9_build_intra_predictors_mby_s(MACROBLOCKD *xd) { | 755 void vp9_build_intra_predictors_mby_s(MACROBLOCKD *xd) { |
| 715 vp9_build_intra_predictors_internal(xd->dst.y_buffer, xd->dst.y_stride, | 756 vp9_build_intra_predictors_internal(xd->dst.y_buffer, xd->dst.y_stride, |
| 716 xd->dst.y_buffer, xd->dst.y_stride, | 757 xd->dst.y_buffer, xd->dst.y_stride, |
| 717 xd->mode_info_context->mbmi.mode, 16, | 758 xd->mode_info_context->mbmi.mode, 16, |
| 718 xd->up_available, xd->left_available); | 759 xd->up_available, xd->left_available); |
| 719 } | 760 } |
| 720 | 761 |
| 721 #if CONFIG_SUPERBLOCKS | |
| 722 void vp9_build_intra_predictors_sby_s(MACROBLOCKD *xd) { | 762 void vp9_build_intra_predictors_sby_s(MACROBLOCKD *xd) { |
| 723 vp9_build_intra_predictors_internal(xd->dst.y_buffer, xd->dst.y_stride, | 763 vp9_build_intra_predictors_internal(xd->dst.y_buffer, xd->dst.y_stride, |
| 724 xd->dst.y_buffer, xd->dst.y_stride, | 764 xd->dst.y_buffer, xd->dst.y_stride, |
| 725 xd->mode_info_context->mbmi.mode, 32, | 765 xd->mode_info_context->mbmi.mode, 32, |
| 726 xd->up_available, xd->left_available); | 766 xd->up_available, xd->left_available); |
| 727 } | 767 } |
| 728 #endif | |
| 729 | 768 |
| 730 #if CONFIG_COMP_INTRA_PRED | 769 void vp9_build_intra_predictors_sb64y_s(MACROBLOCKD *xd) { |
| 731 void vp9_build_comp_intra_predictors_mby(MACROBLOCKD *xd) { | |
| 732 unsigned char predictor[2][256]; | |
| 733 int i; | |
| 734 | |
| 735 vp9_build_intra_predictors_internal(xd->dst.y_buffer, xd->dst.y_stride, | 770 vp9_build_intra_predictors_internal(xd->dst.y_buffer, xd->dst.y_stride, |
| 736 predictor[0], 16, | 771 xd->dst.y_buffer, xd->dst.y_stride, |
| 737 xd->mode_info_context->mbmi.mode, | 772 xd->mode_info_context->mbmi.mode, 64, |
| 738 16, xd->up_available, | 773 xd->up_available, xd->left_available); |
| 739 xd->left_available); | |
| 740 vp9_build_intra_predictors_internal(xd->dst.y_buffer, xd->dst.y_stride, | |
| 741 predictor[1], 16, | |
| 742 xd->mode_info_context->mbmi.second_mode, | |
| 743 16, xd->up_available, | |
| 744 xd->left_available); | |
| 745 | |
| 746 for (i = 0; i < 256; i++) { | |
| 747 xd->predictor[i] = (predictor[0][i] + predictor[1][i] + 1) >> 1; | |
| 748 } | |
| 749 } | 774 } |
| 750 #endif | |
| 751 | 775 |
| 752 void vp9_build_intra_predictors_mbuv_internal(MACROBLOCKD *xd, | 776 void vp9_build_intra_predictors_mbuv_internal(MACROBLOCKD *xd, |
| 753 unsigned char *upred_ptr, | 777 uint8_t *upred_ptr, |
| 754 unsigned char *vpred_ptr, | 778 uint8_t *vpred_ptr, |
| 755 int uv_stride, | 779 int uv_stride, |
| 756 int mode, int bsize) { | 780 int mode, int bsize) { |
| 757 vp9_build_intra_predictors_internal(xd->dst.u_buffer, xd->dst.uv_stride, | 781 vp9_build_intra_predictors_internal(xd->dst.u_buffer, xd->dst.uv_stride, |
| 758 upred_ptr, uv_stride, mode, bsize, | 782 upred_ptr, uv_stride, mode, bsize, |
| 759 xd->up_available, xd->left_available); | 783 xd->up_available, xd->left_available); |
| 760 vp9_build_intra_predictors_internal(xd->dst.v_buffer, xd->dst.uv_stride, | 784 vp9_build_intra_predictors_internal(xd->dst.v_buffer, xd->dst.uv_stride, |
| 761 vpred_ptr, uv_stride, mode, bsize, | 785 vpred_ptr, uv_stride, mode, bsize, |
| 762 xd->up_available, xd->left_available); | 786 xd->up_available, xd->left_available); |
| 763 } | 787 } |
| 764 | 788 |
| 765 void vp9_build_intra_predictors_mbuv(MACROBLOCKD *xd) { | 789 void vp9_build_intra_predictors_mbuv(MACROBLOCKD *xd) { |
| 766 vp9_build_intra_predictors_mbuv_internal(xd, &xd->predictor[256], | 790 vp9_build_intra_predictors_mbuv_internal(xd, &xd->predictor[256], |
| 767 &xd->predictor[320], 8, | 791 &xd->predictor[320], 8, |
| 768 xd->mode_info_context->mbmi.uv_mode, | 792 xd->mode_info_context->mbmi.uv_mode, |
| 769 8); | 793 8); |
| 770 } | 794 } |
| 771 | 795 |
| 772 void vp9_build_intra_predictors_mbuv_s(MACROBLOCKD *xd) { | 796 void vp9_build_intra_predictors_mbuv_s(MACROBLOCKD *xd) { |
| 773 vp9_build_intra_predictors_mbuv_internal(xd, xd->dst.u_buffer, | 797 vp9_build_intra_predictors_mbuv_internal(xd, xd->dst.u_buffer, |
| 774 xd->dst.v_buffer, | 798 xd->dst.v_buffer, |
| 775 xd->dst.uv_stride, | 799 xd->dst.uv_stride, |
| 776 xd->mode_info_context->mbmi.uv_mode, | 800 xd->mode_info_context->mbmi.uv_mode, |
| 777 8); | 801 8); |
| 778 } | 802 } |
| 779 | 803 |
| 780 #if CONFIG_SUPERBLOCKS | |
| 781 void vp9_build_intra_predictors_sbuv_s(MACROBLOCKD *xd) { | 804 void vp9_build_intra_predictors_sbuv_s(MACROBLOCKD *xd) { |
| 782 vp9_build_intra_predictors_mbuv_internal(xd, xd->dst.u_buffer, | 805 vp9_build_intra_predictors_mbuv_internal(xd, xd->dst.u_buffer, |
| 783 xd->dst.v_buffer, xd->dst.uv_stride, | 806 xd->dst.v_buffer, xd->dst.uv_stride, |
| 784 xd->mode_info_context->mbmi.uv_mode, | 807 xd->mode_info_context->mbmi.uv_mode, |
| 785 16); | 808 16); |
| 786 } | 809 } |
| 787 #endif | |
| 788 | 810 |
| 789 #if CONFIG_COMP_INTRA_PRED | 811 void vp9_build_intra_predictors_sb64uv_s(MACROBLOCKD *xd) { |
| 790 void vp9_build_comp_intra_predictors_mbuv(MACROBLOCKD *xd) { | 812 vp9_build_intra_predictors_mbuv_internal(xd, xd->dst.u_buffer, |
| 791 unsigned char predictor[2][2][64]; | 813 xd->dst.v_buffer, xd->dst.uv_stride, |
| 792 int i; | 814 xd->mode_info_context->mbmi.uv_mode, |
| 793 | 815 32); |
| 794 vp9_build_intra_predictors_mbuv_internal( | |
| 795 xd, predictor[0][0], predictor[1][0], 8, | |
| 796 xd->mode_info_context->mbmi.uv_mode, 8); | |
| 797 vp9_build_intra_predictors_mbuv_internal( | |
| 798 xd, predictor[0][1], predictor[1][1], 8, | |
| 799 xd->mode_info_context->mbmi.second_uv_mode, 8); | |
| 800 for (i = 0; i < 64; i++) { | |
| 801 xd->predictor[256 + i] = (predictor[0][0][i] + predictor[0][1][i] + 1) >> 1; | |
| 802 xd->predictor[256 + 64 + i] = (predictor[1][0][i] + | |
| 803 predictor[1][1][i] + 1) >> 1; | |
| 804 } | |
| 805 } | 816 } |
| 806 #endif | |
| 807 | 817 |
| 808 void vp9_intra8x8_predict(BLOCKD *xd, | 818 void vp9_intra8x8_predict(BLOCKD *xd, |
| 809 int mode, | 819 int mode, |
| 810 unsigned char *predictor) { | 820 uint8_t *predictor) { |
| 811 vp9_build_intra_predictors_internal(*(xd->base_dst) + xd->dst, | 821 vp9_build_intra_predictors_internal(*(xd->base_dst) + xd->dst, |
| 812 xd->dst_stride, predictor, 16, | 822 xd->dst_stride, predictor, 16, |
| 813 mode, 8, 1, 1); | 823 mode, 8, 1, 1); |
| 814 } | 824 } |
| 815 | 825 |
| 816 #if CONFIG_COMP_INTRA_PRED | |
| 817 void vp9_comp_intra8x8_predict(BLOCKD *xd, | |
| 818 int mode, int second_mode, | |
| 819 unsigned char *out_predictor) { | |
| 820 unsigned char predictor[2][8 * 16]; | |
| 821 int i, j; | |
| 822 | |
| 823 vp9_intra8x8_predict(xd, mode, predictor[0]); | |
| 824 vp9_intra8x8_predict(xd, second_mode, predictor[1]); | |
| 825 | |
| 826 for (i = 0; i < 8 * 16; i += 16) { | |
| 827 for (j = i; j < i + 8; j++) { | |
| 828 out_predictor[j] = (predictor[0][j] + predictor[1][j] + 1) >> 1; | |
| 829 } | |
| 830 } | |
| 831 } | |
| 832 #endif | |
| 833 | |
| 834 void vp9_intra_uv4x4_predict(BLOCKD *xd, | 826 void vp9_intra_uv4x4_predict(BLOCKD *xd, |
| 835 int mode, | 827 int mode, |
| 836 unsigned char *predictor) { | 828 uint8_t *predictor) { |
| 837 vp9_build_intra_predictors_internal(*(xd->base_dst) + xd->dst, | 829 vp9_build_intra_predictors_internal(*(xd->base_dst) + xd->dst, |
| 838 xd->dst_stride, predictor, 8, | 830 xd->dst_stride, predictor, 8, |
| 839 mode, 4, 1, 1); | 831 mode, 4, 1, 1); |
| 840 } | 832 } |
| 841 | 833 |
| 842 #if CONFIG_COMP_INTRA_PRED | |
| 843 void vp9_comp_intra_uv4x4_predict(BLOCKD *xd, | |
| 844 int mode, int mode2, | |
| 845 unsigned char *out_predictor) { | |
| 846 unsigned char predictor[2][8 * 4]; | |
| 847 int i, j; | |
| 848 | |
| 849 vp9_intra_uv4x4_predict(xd, mode, predictor[0]); | |
| 850 vp9_intra_uv4x4_predict(xd, mode2, predictor[1]); | |
| 851 | |
| 852 for (i = 0; i < 4 * 8; i += 8) { | |
| 853 for (j = i; j < i + 4; j++) { | |
| 854 out_predictor[j] = (predictor[0][j] + predictor[1][j] + 1) >> 1; | |
| 855 } | |
| 856 } | |
| 857 } | |
| 858 #endif | |
| 859 | |
| 860 /* TODO: try different ways of use Y-UV mode correlation | 834 /* TODO: try different ways of use Y-UV mode correlation |
| 861 Current code assumes that a uv 4x4 block use same mode | 835 Current code assumes that a uv 4x4 block use same mode |
| 862 as corresponding Y 8x8 area | 836 as corresponding Y 8x8 area |
| 863 */ | 837 */ |
| OLD | NEW |