1 /*
2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
11 #include <math.h>
12
13 #include "./vpx_config.h"
14 #include "vpx_ports/bitops.h"
15 #include "vpx_mem/vpx_mem.h"
16
17 #include "onyx_int.h"
18 #include "vp8/encoder/quantize.h"
19 #include "vp8/common/quant_common.h"
20
vp8_fast_quantize_b_c(BLOCK * b,BLOCKD * d)21 void vp8_fast_quantize_b_c(BLOCK *b, BLOCKD *d) {
22 int i, rc, eob;
23 int x, y, z, sz;
24 short *coeff_ptr = b->coeff;
25 short *round_ptr = b->round;
26 short *quant_ptr = b->quant_fast;
27 short *qcoeff_ptr = d->qcoeff;
28 short *dqcoeff_ptr = d->dqcoeff;
29 short *dequant_ptr = d->dequant;
30
31 eob = -1;
32 for (i = 0; i < 16; ++i) {
33 rc = vp8_default_zig_zag1d[i];
34 z = coeff_ptr[rc];
35
36 sz = (z >> 31); /* sign of z */
37 x = (z ^ sz) - sz; /* x = abs(z) */
38
39 y = ((x + round_ptr[rc]) * quant_ptr[rc]) >> 16; /* quantize (x) */
40 x = (y ^ sz) - sz; /* get the sign back */
41 qcoeff_ptr[rc] = x; /* write to destination */
42 dqcoeff_ptr[rc] = x * dequant_ptr[rc]; /* dequantized value */
43
44 if (y) {
45 eob = i; /* last nonzero coeffs */
46 }
47 }
48 *d->eob = (char)(eob + 1);
49 }
50
vp8_regular_quantize_b_c(BLOCK * b,BLOCKD * d)51 void vp8_regular_quantize_b_c(BLOCK *b, BLOCKD *d) {
52 int i, rc, eob;
53 int zbin;
54 int x, y, z, sz;
55 short *zbin_boost_ptr = b->zrun_zbin_boost;
56 short *coeff_ptr = b->coeff;
57 short *zbin_ptr = b->zbin;
58 short *round_ptr = b->round;
59 short *quant_ptr = b->quant;
60 short *quant_shift_ptr = b->quant_shift;
61 short *qcoeff_ptr = d->qcoeff;
62 short *dqcoeff_ptr = d->dqcoeff;
63 short *dequant_ptr = d->dequant;
64 short zbin_oq_value = b->zbin_extra;
65
66 memset(qcoeff_ptr, 0, 32);
67 memset(dqcoeff_ptr, 0, 32);
68
69 eob = -1;
70
71 for (i = 0; i < 16; ++i) {
72 rc = vp8_default_zig_zag1d[i];
73 z = coeff_ptr[rc];
74
75 zbin = zbin_ptr[rc] + *zbin_boost_ptr + zbin_oq_value;
76
77 zbin_boost_ptr++;
78 sz = (z >> 31); /* sign of z */
79 x = (z ^ sz) - sz; /* x = abs(z) */
80
81 if (x >= zbin) {
82 x += round_ptr[rc];
83 y = ((((x * quant_ptr[rc]) >> 16) + x) * quant_shift_ptr[rc]) >>
84 16; /* quantize (x) */
85 x = (y ^ sz) - sz; /* get the sign back */
86 qcoeff_ptr[rc] = x; /* write to destination */
87 dqcoeff_ptr[rc] = x * dequant_ptr[rc]; /* dequantized value */
88
89 if (y) {
90 eob = i; /* last nonzero coeffs */
91 zbin_boost_ptr = b->zrun_zbin_boost; /* reset zero runlength */
92 }
93 }
94 }
95
96 *d->eob = (char)(eob + 1);
97 }
98
vp8_quantize_mby(MACROBLOCK * x)99 void vp8_quantize_mby(MACROBLOCK *x) {
100 int i;
101 int has_2nd_order = (x->e_mbd.mode_info_context->mbmi.mode != B_PRED &&
102 x->e_mbd.mode_info_context->mbmi.mode != SPLITMV);
103
104 for (i = 0; i < 16; ++i) x->quantize_b(&x->block[i], &x->e_mbd.block[i]);
105
106 if (has_2nd_order) x->quantize_b(&x->block[24], &x->e_mbd.block[24]);
107 }
108
vp8_quantize_mb(MACROBLOCK * x)109 void vp8_quantize_mb(MACROBLOCK *x) {
110 int i;
111 int has_2nd_order = (x->e_mbd.mode_info_context->mbmi.mode != B_PRED &&
112 x->e_mbd.mode_info_context->mbmi.mode != SPLITMV);
113
114 for (i = 0; i < 24 + has_2nd_order; ++i) {
115 x->quantize_b(&x->block[i], &x->e_mbd.block[i]);
116 }
117 }
118
vp8_quantize_mbuv(MACROBLOCK * x)119 void vp8_quantize_mbuv(MACROBLOCK *x) {
120 int i;
121
122 for (i = 16; i < 24; ++i) x->quantize_b(&x->block[i], &x->e_mbd.block[i]);
123 }
124
125 static const int qrounding_factors[129] = {
126 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48,
127 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48,
128 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48,
129 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48,
130 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48,
131 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48,
132 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48
133 };
134
135 static const int qzbin_factors[129] = {
136 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84,
137 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84,
138 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 80, 80, 80, 80, 80, 80, 80, 80, 80,
139 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80,
140 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80,
141 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80,
142 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80
143 };
144
145 static const int qrounding_factors_y2[129] = {
146 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48,
147 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48,
148 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48,
149 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48,
150 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48,
151 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48,
152 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48
153 };
154
155 static const int qzbin_factors_y2[129] = {
156 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84,
157 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84,
158 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 80, 80, 80, 80, 80, 80, 80, 80, 80,
159 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80,
160 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80,
161 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80,
162 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80
163 };
164
invert_quant(int improved_quant,short * quant,short * shift,short d)165 static void invert_quant(int improved_quant, short *quant, short *shift,
166 short d) {
167 if (improved_quant) {
168 unsigned int t;
169 int l, m;
170 t = (unsigned int)d;
171 l = get_msb(t);
172 m = 1 + (1 << (16 + l)) / d;
173 *quant = (short)(m - (1 << 16));
174 *shift = l;
175 /* use multiplication and constant shift by 16 */
176 *shift = 1 << (16 - *shift);
177 } else {
178 *quant = (1 << 16) / d;
179 *shift = 0;
180 }
181 }
182
vp8cx_init_quantizer(VP8_COMP * cpi)183 void vp8cx_init_quantizer(VP8_COMP *cpi) {
184 int i;
185 int quant_val;
186 int Q;
187
188 int zbin_boost[16] = { 0, 0, 8, 10, 12, 14, 16, 20,
189 24, 28, 32, 36, 40, 44, 44, 44 };
190
191 for (Q = 0; Q < QINDEX_RANGE; ++Q) {
192 /* dc values */
193 quant_val = vp8_dc_quant(Q, cpi->common.y1dc_delta_q);
194 cpi->Y1quant_fast[Q][0] = (1 << 16) / quant_val;
195 invert_quant(cpi->sf.improved_quant, cpi->Y1quant[Q] + 0,
196 cpi->Y1quant_shift[Q] + 0, quant_val);
197 cpi->Y1zbin[Q][0] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;
198 cpi->Y1round[Q][0] = (qrounding_factors[Q] * quant_val) >> 7;
199 cpi->common.Y1dequant[Q][0] = quant_val;
200 cpi->zrun_zbin_boost_y1[Q][0] = (quant_val * zbin_boost[0]) >> 7;
201
202 quant_val = vp8_dc2quant(Q, cpi->common.y2dc_delta_q);
203 cpi->Y2quant_fast[Q][0] = (1 << 16) / quant_val;
204 invert_quant(cpi->sf.improved_quant, cpi->Y2quant[Q] + 0,
205 cpi->Y2quant_shift[Q] + 0, quant_val);
206 cpi->Y2zbin[Q][0] = ((qzbin_factors_y2[Q] * quant_val) + 64) >> 7;
207 cpi->Y2round[Q][0] = (qrounding_factors_y2[Q] * quant_val) >> 7;
208 cpi->common.Y2dequant[Q][0] = quant_val;
209 cpi->zrun_zbin_boost_y2[Q][0] = (quant_val * zbin_boost[0]) >> 7;
210
211 quant_val = vp8_dc_uv_quant(Q, cpi->common.uvdc_delta_q);
212 cpi->UVquant_fast[Q][0] = (1 << 16) / quant_val;
213 invert_quant(cpi->sf.improved_quant, cpi->UVquant[Q] + 0,
214 cpi->UVquant_shift[Q] + 0, quant_val);
215 cpi->UVzbin[Q][0] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;
216 cpi->UVround[Q][0] = (qrounding_factors[Q] * quant_val) >> 7;
217 cpi->common.UVdequant[Q][0] = quant_val;
218 cpi->zrun_zbin_boost_uv[Q][0] = (quant_val * zbin_boost[0]) >> 7;
219
220 /* all the ac values = ; */
221 quant_val = vp8_ac_yquant(Q);
222 cpi->Y1quant_fast[Q][1] = (1 << 16) / quant_val;
223 invert_quant(cpi->sf.improved_quant, cpi->Y1quant[Q] + 1,
224 cpi->Y1quant_shift[Q] + 1, quant_val);
225 cpi->Y1zbin[Q][1] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;
226 cpi->Y1round[Q][1] = (qrounding_factors[Q] * quant_val) >> 7;
227 cpi->common.Y1dequant[Q][1] = quant_val;
228 cpi->zrun_zbin_boost_y1[Q][1] = (quant_val * zbin_boost[1]) >> 7;
229
230 quant_val = vp8_ac2quant(Q, cpi->common.y2ac_delta_q);
231 cpi->Y2quant_fast[Q][1] = (1 << 16) / quant_val;
232 invert_quant(cpi->sf.improved_quant, cpi->Y2quant[Q] + 1,
233 cpi->Y2quant_shift[Q] + 1, quant_val);
234 cpi->Y2zbin[Q][1] = ((qzbin_factors_y2[Q] * quant_val) + 64) >> 7;
235 cpi->Y2round[Q][1] = (qrounding_factors_y2[Q] * quant_val) >> 7;
236 cpi->common.Y2dequant[Q][1] = quant_val;
237 cpi->zrun_zbin_boost_y2[Q][1] = (quant_val * zbin_boost[1]) >> 7;
238
239 quant_val = vp8_ac_uv_quant(Q, cpi->common.uvac_delta_q);
240 cpi->UVquant_fast[Q][1] = (1 << 16) / quant_val;
241 invert_quant(cpi->sf.improved_quant, cpi->UVquant[Q] + 1,
242 cpi->UVquant_shift[Q] + 1, quant_val);
243 cpi->UVzbin[Q][1] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;
244 cpi->UVround[Q][1] = (qrounding_factors[Q] * quant_val) >> 7;
245 cpi->common.UVdequant[Q][1] = quant_val;
246 cpi->zrun_zbin_boost_uv[Q][1] = (quant_val * zbin_boost[1]) >> 7;
247
248 for (i = 2; i < 16; ++i) {
249 cpi->Y1quant_fast[Q][i] = cpi->Y1quant_fast[Q][1];
250 cpi->Y1quant[Q][i] = cpi->Y1quant[Q][1];
251 cpi->Y1quant_shift[Q][i] = cpi->Y1quant_shift[Q][1];
252 cpi->Y1zbin[Q][i] = cpi->Y1zbin[Q][1];
253 cpi->Y1round[Q][i] = cpi->Y1round[Q][1];
254 cpi->zrun_zbin_boost_y1[Q][i] =
255 (cpi->common.Y1dequant[Q][1] * zbin_boost[i]) >> 7;
256
257 cpi->Y2quant_fast[Q][i] = cpi->Y2quant_fast[Q][1];
258 cpi->Y2quant[Q][i] = cpi->Y2quant[Q][1];
259 cpi->Y2quant_shift[Q][i] = cpi->Y2quant_shift[Q][1];
260 cpi->Y2zbin[Q][i] = cpi->Y2zbin[Q][1];
261 cpi->Y2round[Q][i] = cpi->Y2round[Q][1];
262 cpi->zrun_zbin_boost_y2[Q][i] =
263 (cpi->common.Y2dequant[Q][1] * zbin_boost[i]) >> 7;
264
265 cpi->UVquant_fast[Q][i] = cpi->UVquant_fast[Q][1];
266 cpi->UVquant[Q][i] = cpi->UVquant[Q][1];
267 cpi->UVquant_shift[Q][i] = cpi->UVquant_shift[Q][1];
268 cpi->UVzbin[Q][i] = cpi->UVzbin[Q][1];
269 cpi->UVround[Q][i] = cpi->UVround[Q][1];
270 cpi->zrun_zbin_boost_uv[Q][i] =
271 (cpi->common.UVdequant[Q][1] * zbin_boost[i]) >> 7;
272 }
273 }
274 }
275
276 #define ZBIN_EXTRA_Y \
277 ((cpi->common.Y1dequant[QIndex][1] * \
278 (x->zbin_over_quant + x->zbin_mode_boost + x->act_zbin_adj)) >> \
279 7)
280
281 #define ZBIN_EXTRA_UV \
282 ((cpi->common.UVdequant[QIndex][1] * \
283 (x->zbin_over_quant + x->zbin_mode_boost + x->act_zbin_adj)) >> \
284 7)
285
286 #define ZBIN_EXTRA_Y2 \
287 ((cpi->common.Y2dequant[QIndex][1] * \
288 ((x->zbin_over_quant / 2) + x->zbin_mode_boost + x->act_zbin_adj)) >> \
289 7)
290
vp8cx_mb_init_quantizer(VP8_COMP * cpi,MACROBLOCK * x,int ok_to_skip)291 void vp8cx_mb_init_quantizer(VP8_COMP *cpi, MACROBLOCK *x, int ok_to_skip) {
292 int i;
293 int QIndex;
294 MACROBLOCKD *xd = &x->e_mbd;
295 int zbin_extra;
296
297 /* Select the baseline MB Q index. */
298 if (xd->segmentation_enabled) {
299 /* Abs Value */
300 if (xd->mb_segment_abs_delta == SEGMENT_ABSDATA) {
301 QIndex = xd->segment_feature_data[MB_LVL_ALT_Q]
302 [xd->mode_info_context->mbmi.segment_id];
303 /* Delta Value */
304 } else {
305 QIndex = cpi->common.base_qindex +
306 xd->segment_feature_data[MB_LVL_ALT_Q]
307 [xd->mode_info_context->mbmi.segment_id];
308 /* Clamp to valid range */
309 QIndex = (QIndex >= 0) ? ((QIndex <= MAXQ) ? QIndex : MAXQ) : 0;
310 }
311 } else {
312 QIndex = cpi->common.base_qindex;
313 }
314
315 /* This initialization should be called at least once. Use ok_to_skip to
316 * decide if it is ok to skip.
317 * Before encoding a frame, this function is always called with ok_to_skip
318 * =0, which means no skiping of calculations. The "last" values are
319 * initialized at that time.
320 */
321 if (!ok_to_skip || QIndex != x->q_index) {
322 xd->dequant_y1_dc[0] = 1;
323 xd->dequant_y1[0] = cpi->common.Y1dequant[QIndex][0];
324 xd->dequant_y2[0] = cpi->common.Y2dequant[QIndex][0];
325 xd->dequant_uv[0] = cpi->common.UVdequant[QIndex][0];
326
327 for (i = 1; i < 16; ++i) {
328 xd->dequant_y1_dc[i] = xd->dequant_y1[i] =
329 cpi->common.Y1dequant[QIndex][1];
330 xd->dequant_y2[i] = cpi->common.Y2dequant[QIndex][1];
331 xd->dequant_uv[i] = cpi->common.UVdequant[QIndex][1];
332 }
333 #if 1
334 /*TODO: Remove dequant from BLOCKD. This is a temporary solution until
335 * the quantizer code uses a passed in pointer to the dequant constants.
336 * This will also require modifications to the x86 and neon assembly.
337 * */
338 for (i = 0; i < 16; ++i) x->e_mbd.block[i].dequant = xd->dequant_y1;
339 for (i = 16; i < 24; ++i) x->e_mbd.block[i].dequant = xd->dequant_uv;
340 x->e_mbd.block[24].dequant = xd->dequant_y2;
341 #endif
342
343 /* Y */
344 zbin_extra = ZBIN_EXTRA_Y;
345
346 for (i = 0; i < 16; ++i) {
347 x->block[i].quant = cpi->Y1quant[QIndex];
348 x->block[i].quant_fast = cpi->Y1quant_fast[QIndex];
349 x->block[i].quant_shift = cpi->Y1quant_shift[QIndex];
350 x->block[i].zbin = cpi->Y1zbin[QIndex];
351 x->block[i].round = cpi->Y1round[QIndex];
352 x->block[i].zrun_zbin_boost = cpi->zrun_zbin_boost_y1[QIndex];
353 x->block[i].zbin_extra = (short)zbin_extra;
354 }
355
356 /* UV */
357 zbin_extra = ZBIN_EXTRA_UV;
358
359 for (i = 16; i < 24; ++i) {
360 x->block[i].quant = cpi->UVquant[QIndex];
361 x->block[i].quant_fast = cpi->UVquant_fast[QIndex];
362 x->block[i].quant_shift = cpi->UVquant_shift[QIndex];
363 x->block[i].zbin = cpi->UVzbin[QIndex];
364 x->block[i].round = cpi->UVround[QIndex];
365 x->block[i].zrun_zbin_boost = cpi->zrun_zbin_boost_uv[QIndex];
366 x->block[i].zbin_extra = (short)zbin_extra;
367 }
368
369 /* Y2 */
370 zbin_extra = ZBIN_EXTRA_Y2;
371
372 x->block[24].quant_fast = cpi->Y2quant_fast[QIndex];
373 x->block[24].quant = cpi->Y2quant[QIndex];
374 x->block[24].quant_shift = cpi->Y2quant_shift[QIndex];
375 x->block[24].zbin = cpi->Y2zbin[QIndex];
376 x->block[24].round = cpi->Y2round[QIndex];
377 x->block[24].zrun_zbin_boost = cpi->zrun_zbin_boost_y2[QIndex];
378 x->block[24].zbin_extra = (short)zbin_extra;
379
380 /* save this macroblock QIndex for vp8_update_zbin_extra() */
381 x->q_index = QIndex;
382
383 x->last_zbin_over_quant = x->zbin_over_quant;
384 x->last_zbin_mode_boost = x->zbin_mode_boost;
385 x->last_act_zbin_adj = x->act_zbin_adj;
386
387 } else if (x->last_zbin_over_quant != x->zbin_over_quant ||
388 x->last_zbin_mode_boost != x->zbin_mode_boost ||
389 x->last_act_zbin_adj != x->act_zbin_adj) {
390 /* Y */
391 zbin_extra = ZBIN_EXTRA_Y;
392
393 for (i = 0; i < 16; ++i) x->block[i].zbin_extra = (short)zbin_extra;
394
395 /* UV */
396 zbin_extra = ZBIN_EXTRA_UV;
397
398 for (i = 16; i < 24; ++i) x->block[i].zbin_extra = (short)zbin_extra;
399
400 /* Y2 */
401 zbin_extra = ZBIN_EXTRA_Y2;
402 x->block[24].zbin_extra = (short)zbin_extra;
403
404 x->last_zbin_over_quant = x->zbin_over_quant;
405 x->last_zbin_mode_boost = x->zbin_mode_boost;
406 x->last_act_zbin_adj = x->act_zbin_adj;
407 }
408 }
409
vp8_update_zbin_extra(VP8_COMP * cpi,MACROBLOCK * x)410 void vp8_update_zbin_extra(VP8_COMP *cpi, MACROBLOCK *x) {
411 int i;
412 int QIndex = x->q_index;
413 int zbin_extra;
414
415 /* Y */
416 zbin_extra = ZBIN_EXTRA_Y;
417
418 for (i = 0; i < 16; ++i) x->block[i].zbin_extra = (short)zbin_extra;
419
420 /* UV */
421 zbin_extra = ZBIN_EXTRA_UV;
422
423 for (i = 16; i < 24; ++i) x->block[i].zbin_extra = (short)zbin_extra;
424
425 /* Y2 */
426 zbin_extra = ZBIN_EXTRA_Y2;
427 x->block[24].zbin_extra = (short)zbin_extra;
428 }
429 #undef ZBIN_EXTRA_Y
430 #undef ZBIN_EXTRA_UV
431 #undef ZBIN_EXTRA_Y2
432
vp8cx_frame_init_quantizer(VP8_COMP * cpi)433 void vp8cx_frame_init_quantizer(VP8_COMP *cpi) {
434 /* Clear Zbin mode boost for default case */
435 cpi->mb.zbin_mode_boost = 0;
436
437 /* MB level quantizer setup */
438 vp8cx_mb_init_quantizer(cpi, &cpi->mb, 0);
439 }
440
vp8_set_quantizer(struct VP8_COMP * cpi,int Q)441 void vp8_set_quantizer(struct VP8_COMP *cpi, int Q) {
442 VP8_COMMON *cm = &cpi->common;
443 MACROBLOCKD *mbd = &cpi->mb.e_mbd;
444 int update = 0;
445 int new_delta_q;
446 int new_uv_delta_q;
447 cm->base_qindex = Q;
448
449 /* if any of the delta_q values are changing update flag has to be set */
450 /* currently only y2dc_delta_q may change */
451
452 cm->y1dc_delta_q = 0;
453 cm->y2ac_delta_q = 0;
454
455 if (Q < 4) {
456 new_delta_q = 4 - Q;
457 } else {
458 new_delta_q = 0;
459 }
460
461 update |= cm->y2dc_delta_q != new_delta_q;
462 cm->y2dc_delta_q = new_delta_q;
463
464 new_uv_delta_q = 0;
465 // For screen content, lower the q value for UV channel. For now, select
466 // conservative delta; same delta for dc and ac, and decrease it with lower
467 // Q, and set to 0 below some threshold. May want to condition this in
468 // future on the variance/energy in UV channel.
469 if (cpi->oxcf.screen_content_mode && Q > 40) {
470 new_uv_delta_q = -(int)(0.15 * Q);
471 // Check range: magnitude of delta is 4 bits.
472 if (new_uv_delta_q < -15) {
473 new_uv_delta_q = -15;
474 }
475 }
476 update |= cm->uvdc_delta_q != new_uv_delta_q;
477 cm->uvdc_delta_q = new_uv_delta_q;
478 cm->uvac_delta_q = new_uv_delta_q;
479
480 /* Set Segment specific quatizers */
481 mbd->segment_feature_data[MB_LVL_ALT_Q][0] =
482 cpi->segment_feature_data[MB_LVL_ALT_Q][0];
483 mbd->segment_feature_data[MB_LVL_ALT_Q][1] =
484 cpi->segment_feature_data[MB_LVL_ALT_Q][1];
485 mbd->segment_feature_data[MB_LVL_ALT_Q][2] =
486 cpi->segment_feature_data[MB_LVL_ALT_Q][2];
487 mbd->segment_feature_data[MB_LVL_ALT_Q][3] =
488 cpi->segment_feature_data[MB_LVL_ALT_Q][3];
489
490 /* quantizer has to be reinitialized for any delta_q changes */
491 if (update) vp8cx_init_quantizer(cpi);
492 }
493