Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(84)

Unified Diff: source/libvpx/vp8/encoder/quantize.c

Issue 7671004: Update libvpx snapshot to v0.9.7-p1 (Cayuga). (Closed) Base URL: svn://chrome-svn/chrome/trunk/deps/third_party/libvpx/
Patch Set: '' Created 9 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
« no previous file with comments | « source/libvpx/vp8/encoder/quantize.h ('k') | source/libvpx/vp8/encoder/ratectrl.h » ('j') | no next file with comments »
Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
Index: source/libvpx/vp8/encoder/quantize.c
===================================================================
--- source/libvpx/vp8/encoder/quantize.c (revision 96967)
+++ source/libvpx/vp8/encoder/quantize.c (working copy)
@@ -12,8 +12,9 @@
#include <math.h>
#include "vpx_mem/vpx_mem.h"
+#include "onyx_int.h"
#include "quantize.h"
-#include "vp8/common/entropy.h"
+#include "vp8/common/quant_common.h"
#define EXACT_QUANT
@@ -27,7 +28,7 @@
short *zbin_ptr = b->zbin;
short *round_ptr = b->round;
short *quant_ptr = b->quant_fast;
- short *quant_shift_ptr = b->quant_shift;
+ unsigned char *quant_shift_ptr = b->quant_shift;
short *qcoeff_ptr = d->qcoeff;
short *dqcoeff_ptr = d->dqcoeff;
short *dequant_ptr = d->dequant;
@@ -112,7 +113,7 @@
short *zbin_ptr = b->zbin;
short *round_ptr = b->round;
short *quant_ptr = b->quant;
- short *quant_shift_ptr = b->quant_shift;
+ unsigned char *quant_shift_ptr = b->quant_shift;
short *qcoeff_ptr = d->qcoeff;
short *dqcoeff_ptr = d->dqcoeff;
short *dequant_ptr = d->dequant;
@@ -166,7 +167,7 @@
int sz;
short *coeff_ptr;
short *quant_ptr;
- short *quant_shift_ptr;
+ unsigned char *quant_shift_ptr;
short *qcoeff_ptr;
short *dqcoeff_ptr;
short *dequant_ptr;
@@ -268,7 +269,7 @@
#endif
-void vp8_quantize_mby(MACROBLOCK *x)
+void vp8_quantize_mby_c(MACROBLOCK *x)
{
int i;
int has_2nd_order = (x->e_mbd.mode_info_context->mbmi.mode != B_PRED
@@ -281,7 +282,7 @@
x->quantize_b(&x->block[24], &x->e_mbd.block[24]);
}
-void vp8_quantize_mb(MACROBLOCK *x)
+void vp8_quantize_mb_c(MACROBLOCK *x)
{
int i;
int has_2nd_order=(x->e_mbd.mode_info_context->mbmi.mode != B_PRED
@@ -292,10 +293,442 @@
}
-void vp8_quantize_mbuv(MACROBLOCK *x)
+void vp8_quantize_mbuv_c(MACROBLOCK *x)
{
int i;
for (i = 16; i < 24; i++)
x->quantize_b(&x->block[i], &x->e_mbd.block[i]);
}
+
+/* quantize_b_pair function pointer in MACROBLOCK structure is set to one of
+ * these two C functions if corresponding optimized routine is not available.
+ * NEON optimized version implements currently the fast quantization for pair
+ * of blocks. */
+void vp8_regular_quantize_b_pair(BLOCK *b1, BLOCK *b2, BLOCKD *d1, BLOCKD *d2)
+{
+ vp8_regular_quantize_b(b1, d1);
+ vp8_regular_quantize_b(b2, d2);
+}
+
+void vp8_fast_quantize_b_pair_c(BLOCK *b1, BLOCK *b2, BLOCKD *d1, BLOCKD *d2)
+{
+ vp8_fast_quantize_b_c(b1, d1);
+ vp8_fast_quantize_b_c(b2, d2);
+}
+
+
+static const int qrounding_factors[129] =
+{
+ 48, 48, 48, 48, 48, 48, 48, 48,
+ 48, 48, 48, 48, 48, 48, 48, 48,
+ 48, 48, 48, 48, 48, 48, 48, 48,
+ 48, 48, 48, 48, 48, 48, 48, 48,
+ 48, 48, 48, 48, 48, 48, 48, 48,
+ 48, 48, 48, 48, 48, 48, 48, 48,
+ 48, 48, 48, 48, 48, 48, 48, 48,
+ 48, 48, 48, 48, 48, 48, 48, 48,
+ 48, 48, 48, 48, 48, 48, 48, 48,
+ 48, 48, 48, 48, 48, 48, 48, 48,
+ 48, 48, 48, 48, 48, 48, 48, 48,
+ 48, 48, 48, 48, 48, 48, 48, 48,
+ 48, 48, 48, 48, 48, 48, 48, 48,
+ 48, 48, 48, 48, 48, 48, 48, 48,
+ 48, 48, 48, 48, 48, 48, 48, 48,
+ 48, 48, 48, 48, 48, 48, 48, 48,
+ 48
+};
+
+
+static const int qzbin_factors[129] =
+{
+ 84, 84, 84, 84, 84, 84, 84, 84,
+ 84, 84, 84, 84, 84, 84, 84, 84,
+ 84, 84, 84, 84, 84, 84, 84, 84,
+ 84, 84, 84, 84, 84, 84, 84, 84,
+ 84, 84, 84, 84, 84, 84, 84, 84,
+ 84, 84, 84, 84, 84, 84, 84, 84,
+ 80, 80, 80, 80, 80, 80, 80, 80,
+ 80, 80, 80, 80, 80, 80, 80, 80,
+ 80, 80, 80, 80, 80, 80, 80, 80,
+ 80, 80, 80, 80, 80, 80, 80, 80,
+ 80, 80, 80, 80, 80, 80, 80, 80,
+ 80, 80, 80, 80, 80, 80, 80, 80,
+ 80, 80, 80, 80, 80, 80, 80, 80,
+ 80, 80, 80, 80, 80, 80, 80, 80,
+ 80, 80, 80, 80, 80, 80, 80, 80,
+ 80, 80, 80, 80, 80, 80, 80, 80,
+ 80
+};
+
+
+static const int qrounding_factors_y2[129] =
+{
+ 48, 48, 48, 48, 48, 48, 48, 48,
+ 48, 48, 48, 48, 48, 48, 48, 48,
+ 48, 48, 48, 48, 48, 48, 48, 48,
+ 48, 48, 48, 48, 48, 48, 48, 48,
+ 48, 48, 48, 48, 48, 48, 48, 48,
+ 48, 48, 48, 48, 48, 48, 48, 48,
+ 48, 48, 48, 48, 48, 48, 48, 48,
+ 48, 48, 48, 48, 48, 48, 48, 48,
+ 48, 48, 48, 48, 48, 48, 48, 48,
+ 48, 48, 48, 48, 48, 48, 48, 48,
+ 48, 48, 48, 48, 48, 48, 48, 48,
+ 48, 48, 48, 48, 48, 48, 48, 48,
+ 48, 48, 48, 48, 48, 48, 48, 48,
+ 48, 48, 48, 48, 48, 48, 48, 48,
+ 48, 48, 48, 48, 48, 48, 48, 48,
+ 48, 48, 48, 48, 48, 48, 48, 48,
+ 48
+};
+
+
+static const int qzbin_factors_y2[129] =
+{
+ 84, 84, 84, 84, 84, 84, 84, 84,
+ 84, 84, 84, 84, 84, 84, 84, 84,
+ 84, 84, 84, 84, 84, 84, 84, 84,
+ 84, 84, 84, 84, 84, 84, 84, 84,
+ 84, 84, 84, 84, 84, 84, 84, 84,
+ 84, 84, 84, 84, 84, 84, 84, 84,
+ 80, 80, 80, 80, 80, 80, 80, 80,
+ 80, 80, 80, 80, 80, 80, 80, 80,
+ 80, 80, 80, 80, 80, 80, 80, 80,
+ 80, 80, 80, 80, 80, 80, 80, 80,
+ 80, 80, 80, 80, 80, 80, 80, 80,
+ 80, 80, 80, 80, 80, 80, 80, 80,
+ 80, 80, 80, 80, 80, 80, 80, 80,
+ 80, 80, 80, 80, 80, 80, 80, 80,
+ 80, 80, 80, 80, 80, 80, 80, 80,
+ 80, 80, 80, 80, 80, 80, 80, 80,
+ 80
+};
+
+
+#define EXACT_QUANT
+#ifdef EXACT_QUANT
+static void invert_quant(int improved_quant, short *quant,
+ unsigned char *shift, short d)
+{
+ if(improved_quant)
+ {
+ unsigned t;
+ int l;
+ t = d;
+ for(l = 0; t > 1; l++)
+ t>>=1;
+ t = 1 + (1<<(16+l))/d;
+ *quant = (short)(t - (1<<16));
+ *shift = l;
+ }
+ else
+ {
+ *quant = (1 << 16) / d;
+ *shift = 0;
+ }
+}
+
+
+void vp8cx_init_quantizer(VP8_COMP *cpi)
+{
+ int i;
+ int quant_val;
+ int Q;
+
+ int zbin_boost[16] = {0, 0, 8, 10, 12, 14, 16, 20, 24, 28, 32, 36, 40, 44, 44, 44};
+
+ for (Q = 0; Q < QINDEX_RANGE; Q++)
+ {
+ // dc values
+ quant_val = vp8_dc_quant(Q, cpi->common.y1dc_delta_q);
+ cpi->Y1quant_fast[Q][0] = (1 << 16) / quant_val;
+ invert_quant(cpi->sf.improved_quant, cpi->Y1quant[Q] + 0,
+ cpi->Y1quant_shift[Q] + 0, quant_val);
+ cpi->Y1zbin[Q][0] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;
+ cpi->Y1round[Q][0] = (qrounding_factors[Q] * quant_val) >> 7;
+ cpi->common.Y1dequant[Q][0] = quant_val;
+ cpi->zrun_zbin_boost_y1[Q][0] = (quant_val * zbin_boost[0]) >> 7;
+
+ quant_val = vp8_dc2quant(Q, cpi->common.y2dc_delta_q);
+ cpi->Y2quant_fast[Q][0] = (1 << 16) / quant_val;
+ invert_quant(cpi->sf.improved_quant, cpi->Y2quant[Q] + 0,
+ cpi->Y2quant_shift[Q] + 0, quant_val);
+ cpi->Y2zbin[Q][0] = ((qzbin_factors_y2[Q] * quant_val) + 64) >> 7;
+ cpi->Y2round[Q][0] = (qrounding_factors_y2[Q] * quant_val) >> 7;
+ cpi->common.Y2dequant[Q][0] = quant_val;
+ cpi->zrun_zbin_boost_y2[Q][0] = (quant_val * zbin_boost[0]) >> 7;
+
+ quant_val = vp8_dc_uv_quant(Q, cpi->common.uvdc_delta_q);
+ cpi->UVquant_fast[Q][0] = (1 << 16) / quant_val;
+ invert_quant(cpi->sf.improved_quant, cpi->UVquant[Q] + 0,
+ cpi->UVquant_shift[Q] + 0, quant_val);
+ cpi->UVzbin[Q][0] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;;
+ cpi->UVround[Q][0] = (qrounding_factors[Q] * quant_val) >> 7;
+ cpi->common.UVdequant[Q][0] = quant_val;
+ cpi->zrun_zbin_boost_uv[Q][0] = (quant_val * zbin_boost[0]) >> 7;
+
+ // all the ac values = ;
+ for (i = 1; i < 16; i++)
+ {
+ int rc = vp8_default_zig_zag1d[i];
+
+ quant_val = vp8_ac_yquant(Q);
+ cpi->Y1quant_fast[Q][rc] = (1 << 16) / quant_val;
+ invert_quant(cpi->sf.improved_quant, cpi->Y1quant[Q] + rc,
+ cpi->Y1quant_shift[Q] + rc, quant_val);
+ cpi->Y1zbin[Q][rc] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;
+ cpi->Y1round[Q][rc] = (qrounding_factors[Q] * quant_val) >> 7;
+ cpi->common.Y1dequant[Q][rc] = quant_val;
+ cpi->zrun_zbin_boost_y1[Q][i] = (quant_val * zbin_boost[i]) >> 7;
+
+ quant_val = vp8_ac2quant(Q, cpi->common.y2ac_delta_q);
+ cpi->Y2quant_fast[Q][rc] = (1 << 16) / quant_val;
+ invert_quant(cpi->sf.improved_quant, cpi->Y2quant[Q] + rc,
+ cpi->Y2quant_shift[Q] + rc, quant_val);
+ cpi->Y2zbin[Q][rc] = ((qzbin_factors_y2[Q] * quant_val) + 64) >> 7;
+ cpi->Y2round[Q][rc] = (qrounding_factors_y2[Q] * quant_val) >> 7;
+ cpi->common.Y2dequant[Q][rc] = quant_val;
+ cpi->zrun_zbin_boost_y2[Q][i] = (quant_val * zbin_boost[i]) >> 7;
+
+ quant_val = vp8_ac_uv_quant(Q, cpi->common.uvac_delta_q);
+ cpi->UVquant_fast[Q][rc] = (1 << 16) / quant_val;
+ invert_quant(cpi->sf.improved_quant, cpi->UVquant[Q] + rc,
+ cpi->UVquant_shift[Q] + rc, quant_val);
+ cpi->UVzbin[Q][rc] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;
+ cpi->UVround[Q][rc] = (qrounding_factors[Q] * quant_val) >> 7;
+ cpi->common.UVdequant[Q][rc] = quant_val;
+ cpi->zrun_zbin_boost_uv[Q][i] = (quant_val * zbin_boost[i]) >> 7;
+ }
+ }
+}
+#else
+void vp8cx_init_quantizer(VP8_COMP *cpi)
+{
+ int i;
+ int quant_val;
+ int Q;
+
+ int zbin_boost[16] = {0, 0, 8, 10, 12, 14, 16, 20, 24, 28, 32, 36, 40, 44, 44, 44};
+
+ for (Q = 0; Q < QINDEX_RANGE; Q++)
+ {
+ // dc values
+ quant_val = vp8_dc_quant(Q, cpi->common.y1dc_delta_q);
+ cpi->Y1quant[Q][0] = (1 << 16) / quant_val;
+ cpi->Y1zbin[Q][0] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;
+ cpi->Y1round[Q][0] = (qrounding_factors[Q] * quant_val) >> 7;
+ cpi->common.Y1dequant[Q][0] = quant_val;
+ cpi->zrun_zbin_boost_y1[Q][0] = (quant_val * zbin_boost[0]) >> 7;
+
+ quant_val = vp8_dc2quant(Q, cpi->common.y2dc_delta_q);
+ cpi->Y2quant[Q][0] = (1 << 16) / quant_val;
+ cpi->Y2zbin[Q][0] = ((qzbin_factors_y2[Q] * quant_val) + 64) >> 7;
+ cpi->Y2round[Q][0] = (qrounding_factors_y2[Q] * quant_val) >> 7;
+ cpi->common.Y2dequant[Q][0] = quant_val;
+ cpi->zrun_zbin_boost_y2[Q][0] = (quant_val * zbin_boost[0]) >> 7;
+
+ quant_val = vp8_dc_uv_quant(Q, cpi->common.uvdc_delta_q);
+ cpi->UVquant[Q][0] = (1 << 16) / quant_val;
+ cpi->UVzbin[Q][0] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;;
+ cpi->UVround[Q][0] = (qrounding_factors[Q] * quant_val) >> 7;
+ cpi->common.UVdequant[Q][0] = quant_val;
+ cpi->zrun_zbin_boost_uv[Q][0] = (quant_val * zbin_boost[0]) >> 7;
+
+ // all the ac values = ;
+ for (i = 1; i < 16; i++)
+ {
+ int rc = vp8_default_zig_zag1d[i];
+
+ quant_val = vp8_ac_yquant(Q);
+ cpi->Y1quant[Q][rc] = (1 << 16) / quant_val;
+ cpi->Y1zbin[Q][rc] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;
+ cpi->Y1round[Q][rc] = (qrounding_factors[Q] * quant_val) >> 7;
+ cpi->common.Y1dequant[Q][rc] = quant_val;
+ cpi->zrun_zbin_boost_y1[Q][i] = (quant_val * zbin_boost[i]) >> 7;
+
+ quant_val = vp8_ac2quant(Q, cpi->common.y2ac_delta_q);
+ cpi->Y2quant[Q][rc] = (1 << 16) / quant_val;
+ cpi->Y2zbin[Q][rc] = ((qzbin_factors_y2[Q] * quant_val) + 64) >> 7;
+ cpi->Y2round[Q][rc] = (qrounding_factors_y2[Q] * quant_val) >> 7;
+ cpi->common.Y2dequant[Q][rc] = quant_val;
+ cpi->zrun_zbin_boost_y2[Q][i] = (quant_val * zbin_boost[i]) >> 7;
+
+ quant_val = vp8_ac_uv_quant(Q, cpi->common.uvac_delta_q);
+ cpi->UVquant[Q][rc] = (1 << 16) / quant_val;
+ cpi->UVzbin[Q][rc] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;
+ cpi->UVround[Q][rc] = (qrounding_factors[Q] * quant_val) >> 7;
+ cpi->common.UVdequant[Q][rc] = quant_val;
+ cpi->zrun_zbin_boost_uv[Q][i] = (quant_val * zbin_boost[i]) >> 7;
+ }
+ }
+}
+#endif
+
+
+void vp8cx_mb_init_quantizer(VP8_COMP *cpi, MACROBLOCK *x)
+{
+ int i;
+ int QIndex;
+ MACROBLOCKD *xd = &x->e_mbd;
+ int zbin_extra;
+
+ // Select the baseline MB Q index.
+ if (xd->segmentation_enabled)
+ {
+ // Abs Value
+ if (xd->mb_segement_abs_delta == SEGMENT_ABSDATA)
+
+ QIndex = xd->segment_feature_data[MB_LVL_ALT_Q][xd->mode_info_context->mbmi.segment_id];
+ // Delta Value
+ else
+ {
+ QIndex = cpi->common.base_qindex + xd->segment_feature_data[MB_LVL_ALT_Q][xd->mode_info_context->mbmi.segment_id];
+ QIndex = (QIndex >= 0) ? ((QIndex <= MAXQ) ? QIndex : MAXQ) : 0; // Clamp to valid range
+ }
+ }
+ else
+ QIndex = cpi->common.base_qindex;
+
+ // Y
+ zbin_extra = ( cpi->common.Y1dequant[QIndex][1] *
+ ( cpi->zbin_over_quant +
+ cpi->zbin_mode_boost +
+ x->act_zbin_adj ) ) >> 7;
+
+ for (i = 0; i < 16; i++)
+ {
+ x->block[i].quant = cpi->Y1quant[QIndex];
+ x->block[i].quant_fast = cpi->Y1quant_fast[QIndex];
+ x->block[i].quant_shift = cpi->Y1quant_shift[QIndex];
+ x->block[i].zbin = cpi->Y1zbin[QIndex];
+ x->block[i].round = cpi->Y1round[QIndex];
+ x->e_mbd.block[i].dequant = cpi->common.Y1dequant[QIndex];
+ x->block[i].zrun_zbin_boost = cpi->zrun_zbin_boost_y1[QIndex];
+ x->block[i].zbin_extra = (short)zbin_extra;
+ }
+
+ // UV
+ zbin_extra = ( cpi->common.UVdequant[QIndex][1] *
+ ( cpi->zbin_over_quant +
+ cpi->zbin_mode_boost +
+ x->act_zbin_adj ) ) >> 7;
+
+ for (i = 16; i < 24; i++)
+ {
+ x->block[i].quant = cpi->UVquant[QIndex];
+ x->block[i].quant_fast = cpi->UVquant_fast[QIndex];
+ x->block[i].quant_shift = cpi->UVquant_shift[QIndex];
+ x->block[i].zbin = cpi->UVzbin[QIndex];
+ x->block[i].round = cpi->UVround[QIndex];
+ x->e_mbd.block[i].dequant = cpi->common.UVdequant[QIndex];
+ x->block[i].zrun_zbin_boost = cpi->zrun_zbin_boost_uv[QIndex];
+ x->block[i].zbin_extra = (short)zbin_extra;
+ }
+
+ // Y2
+ zbin_extra = ( cpi->common.Y2dequant[QIndex][1] *
+ ( (cpi->zbin_over_quant / 2) +
+ cpi->zbin_mode_boost +
+ x->act_zbin_adj ) ) >> 7;
+
+ x->block[24].quant_fast = cpi->Y2quant_fast[QIndex];
+ x->block[24].quant = cpi->Y2quant[QIndex];
+ x->block[24].quant_shift = cpi->Y2quant_shift[QIndex];
+ x->block[24].zbin = cpi->Y2zbin[QIndex];
+ x->block[24].round = cpi->Y2round[QIndex];
+ x->e_mbd.block[24].dequant = cpi->common.Y2dequant[QIndex];
+ x->block[24].zrun_zbin_boost = cpi->zrun_zbin_boost_y2[QIndex];
+ x->block[24].zbin_extra = (short)zbin_extra;
+
+ /* save this macroblock QIndex for vp8_update_zbin_extra() */
+ x->q_index = QIndex;
+}
+
+
+void vp8_update_zbin_extra(VP8_COMP *cpi, MACROBLOCK *x)
+{
+ int i;
+ int QIndex = x->q_index;
+ int zbin_extra;
+
+ // Y
+ zbin_extra = ( cpi->common.Y1dequant[QIndex][1] *
+ ( cpi->zbin_over_quant +
+ cpi->zbin_mode_boost +
+ x->act_zbin_adj ) ) >> 7;
+ for (i = 0; i < 16; i++)
+ {
+ x->block[i].zbin_extra = (short)zbin_extra;
+ }
+
+ // UV
+ zbin_extra = ( cpi->common.UVdequant[QIndex][1] *
+ ( cpi->zbin_over_quant +
+ cpi->zbin_mode_boost +
+ x->act_zbin_adj ) ) >> 7;
+
+ for (i = 16; i < 24; i++)
+ {
+ x->block[i].zbin_extra = (short)zbin_extra;
+ }
+
+ // Y2
+ zbin_extra = ( cpi->common.Y2dequant[QIndex][1] *
+ ( (cpi->zbin_over_quant / 2) +
+ cpi->zbin_mode_boost +
+ x->act_zbin_adj ) ) >> 7;
+
+ x->block[24].zbin_extra = (short)zbin_extra;
+}
+
+
+void vp8cx_frame_init_quantizer(VP8_COMP *cpi)
+{
+ // Clear Zbin mode boost for default case
+ cpi->zbin_mode_boost = 0;
+
+ // MB level quantizer setup
+ vp8cx_mb_init_quantizer(cpi, &cpi->mb);
+}
+
+
+void vp8_set_quantizer(struct VP8_COMP *cpi, int Q)
+{
+ VP8_COMMON *cm = &cpi->common;
+ MACROBLOCKD *mbd = &cpi->mb.e_mbd;
+ int update = 0;
+ int new_delta_q;
+ cm->base_qindex = Q;
+
+ /* if any of the delta_q values are changing update flag has to be set */
+ /* currently only y2dc_delta_q may change */
+
+ cm->y1dc_delta_q = 0;
+ cm->y2ac_delta_q = 0;
+ cm->uvdc_delta_q = 0;
+ cm->uvac_delta_q = 0;
+
+ if (Q < 4)
+ {
+ new_delta_q = 4-Q;
+ }
+ else
+ new_delta_q = 0;
+
+ update |= cm->y2dc_delta_q != new_delta_q;
+ cm->y2dc_delta_q = new_delta_q;
+
+
+ // Set Segment specific quatizers
+ mbd->segment_feature_data[MB_LVL_ALT_Q][0] = cpi->segment_feature_data[MB_LVL_ALT_Q][0];
+ mbd->segment_feature_data[MB_LVL_ALT_Q][1] = cpi->segment_feature_data[MB_LVL_ALT_Q][1];
+ mbd->segment_feature_data[MB_LVL_ALT_Q][2] = cpi->segment_feature_data[MB_LVL_ALT_Q][2];
+ mbd->segment_feature_data[MB_LVL_ALT_Q][3] = cpi->segment_feature_data[MB_LVL_ALT_Q][3];
+
+ /* quantizer has to be reinitialized for any delta_q changes */
+ if(update)
+ vp8cx_init_quantizer(cpi);
+
+}
+
« no previous file with comments | « source/libvpx/vp8/encoder/quantize.h ('k') | source/libvpx/vp8/encoder/ratectrl.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698