1 /*
2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
11 #include <assert.h>
12 #include <math.h>
13 #include <limits.h>
14
15 #include "vp9/common/vp9_alloccommon.h"
16 #include "vp9/common/vp9_common.h"
17 #include "vp9/common/vp9_onyxc_int.h"
18 #include "vp9/common/vp9_quant_common.h"
19 #include "vp9/common/vp9_reconinter.h"
20 #include "vp9/encoder/vp9_encodeframe.h"
21 #include "vp9/encoder/vp9_ethread.h"
22 #include "vp9/encoder/vp9_extend.h"
23 #include "vp9/encoder/vp9_firstpass.h"
24 #include "vp9/encoder/vp9_mcomp.h"
25 #include "vp9/encoder/vp9_encoder.h"
26 #include "vp9/encoder/vp9_quantize.h"
27 #include "vp9/encoder/vp9_ratectrl.h"
28 #include "vp9/encoder/vp9_segmentation.h"
29 #include "vp9/encoder/vp9_temporal_filter.h"
30 #include "vpx_dsp/vpx_dsp_common.h"
31 #include "vpx_mem/vpx_mem.h"
32 #include "vpx_ports/mem.h"
33 #include "vpx_ports/vpx_timer.h"
34 #include "vpx_scale/vpx_scale.h"
35
36 static int fixed_divide[512];
37 static unsigned int index_mult[14] = { 0, 0, 0, 0, 49152,
38 39322, 32768, 28087, 24576, 21846,
39 19661, 17874, 0, 15124 };
40 #if CONFIG_VP9_HIGHBITDEPTH
41 static int64_t highbd_index_mult[14] = { 0U, 0U, 0U,
42 0U, 3221225472U, 2576980378U,
43 2147483648U, 1840700270U, 1610612736U,
44 1431655766U, 1288490189U, 1171354718U,
45 0U, 991146300U };
46 #endif // CONFIG_VP9_HIGHBITDEPTH
47
48 // Prediction function using 12-tap interpolation filter.
49 // TODO([email protected]): add SIMD optimization.
50 #define MAX_FILTER_TAP 12
51 #define TF_INTERP_EXTEND 6
52 typedef int16_t InterpKernel12[MAX_FILTER_TAP];
53 // 12-tap filter (used by the encoder only).
54 DECLARE_ALIGNED(256, static const InterpKernel12,
55 sub_pel_filters_12[SUBPEL_SHIFTS]) = {
56 { 0, 0, 0, 0, 0, 128, 0, 0, 0, 0, 0, 0 },
57 { 0, 1, -2, 3, -7, 127, 8, -4, 2, -1, 1, 0 },
58 { -1, 2, -3, 6, -13, 124, 18, -8, 4, -2, 2, -1 },
59 { -1, 3, -4, 8, -18, 120, 28, -12, 7, -4, 2, -1 },
60 { -1, 3, -6, 10, -21, 115, 38, -15, 8, -5, 3, -1 },
61 { -2, 4, -6, 12, -24, 108, 49, -18, 10, -6, 3, -2 },
62 { -2, 4, -7, 13, -25, 100, 60, -21, 11, -7, 4, -2 },
63 { -2, 4, -7, 13, -26, 91, 71, -24, 13, -7, 4, -2 },
64 { -2, 4, -7, 13, -25, 81, 81, -25, 13, -7, 4, -2 },
65 { -2, 4, -7, 13, -24, 71, 91, -26, 13, -7, 4, -2 },
66 { -2, 4, -7, 11, -21, 60, 100, -25, 13, -7, 4, -2 },
67 { -2, 3, -6, 10, -18, 49, 108, -24, 12, -6, 4, -2 },
68 { -1, 3, -5, 8, -15, 38, 115, -21, 10, -6, 3, -1 },
69 { -1, 2, -4, 7, -12, 28, 120, -18, 8, -4, 3, -1 },
70 { -1, 2, -2, 4, -8, 18, 124, -13, 6, -3, 2, -1 },
71 { 0, 1, -1, 2, -4, 8, 127, -7, 3, -2, 1, 0 }
72 };
73
convolve_horiz_12(const uint8_t * src,ptrdiff_t src_stride,uint8_t * dst,ptrdiff_t dst_stride,const InterpKernel12 * x_filters,int x0_q4,int x_step_q4,int w,int h)74 static void convolve_horiz_12(const uint8_t *src, ptrdiff_t src_stride,
75 uint8_t *dst, ptrdiff_t dst_stride,
76 const InterpKernel12 *x_filters, int x0_q4,
77 int x_step_q4, int w, int h) {
78 int x, y;
79 src -= MAX_FILTER_TAP / 2 - 1;
80
81 for (y = 0; y < h; ++y) {
82 int x_q4 = x0_q4;
83 for (x = 0; x < w; ++x) {
84 const uint8_t *const src_x = &src[x_q4 >> SUBPEL_BITS];
85 const int16_t *const x_filter = x_filters[x_q4 & SUBPEL_MASK];
86 int k, sum = 0;
87 for (k = 0; k < MAX_FILTER_TAP; ++k) sum += src_x[k] * x_filter[k];
88 dst[x] = clip_pixel(ROUND_POWER_OF_TWO(sum, FILTER_BITS));
89 x_q4 += x_step_q4;
90 }
91 src += src_stride;
92 dst += dst_stride;
93 }
94 }
95
convolve_vert_12(const uint8_t * src,ptrdiff_t src_stride,uint8_t * dst,ptrdiff_t dst_stride,const InterpKernel12 * y_filters,int y0_q4,int y_step_q4,int w,int h)96 static void convolve_vert_12(const uint8_t *src, ptrdiff_t src_stride,
97 uint8_t *dst, ptrdiff_t dst_stride,
98 const InterpKernel12 *y_filters, int y0_q4,
99 int y_step_q4, int w, int h) {
100 int x, y;
101 src -= src_stride * (MAX_FILTER_TAP / 2 - 1);
102
103 for (x = 0; x < w; ++x) {
104 int y_q4 = y0_q4;
105 for (y = 0; y < h; ++y) {
106 const uint8_t *src_y = &src[(y_q4 >> SUBPEL_BITS) * src_stride];
107 const int16_t *const y_filter = y_filters[y_q4 & SUBPEL_MASK];
108 int k, sum = 0;
109 for (k = 0; k < MAX_FILTER_TAP; ++k)
110 sum += src_y[k * src_stride] * y_filter[k];
111 dst[y * dst_stride] = clip_pixel(ROUND_POWER_OF_TWO(sum, FILTER_BITS));
112 y_q4 += y_step_q4;
113 }
114 ++src;
115 ++dst;
116 }
117 }
118
119 // Copied from vpx_convolve8_c(). Possible block sizes are 32x32, 16x16, 8x8.
vpx_convolve8_12_c(const uint8_t * src,ptrdiff_t src_stride,uint8_t * dst,ptrdiff_t dst_stride,const InterpKernel12 * filter,int x0_q4,int x_step_q4,int y0_q4,int y_step_q4,int w,int h)120 static void vpx_convolve8_12_c(const uint8_t *src, ptrdiff_t src_stride,
121 uint8_t *dst, ptrdiff_t dst_stride,
122 const InterpKernel12 *filter, int x0_q4,
123 int x_step_q4, int y0_q4, int y_step_q4, int w,
124 int h) {
125 uint8_t temp[BW * (BH + MAX_FILTER_TAP - 1)];
126 const int temp_stride = BW;
127 const int intermediate_height =
128 (((h - 1) * y_step_q4 + y0_q4) >> SUBPEL_BITS) + MAX_FILTER_TAP;
129
130 convolve_horiz_12(src - src_stride * (MAX_FILTER_TAP / 2 - 1), src_stride,
131 temp, temp_stride, filter, x0_q4, x_step_q4, w,
132 intermediate_height);
133 convolve_vert_12(temp + temp_stride * (MAX_FILTER_TAP / 2 - 1), temp_stride,
134 dst, dst_stride, filter, y0_q4, y_step_q4, w, h);
135 }
136
vp9_build_inter_predictor_12(const uint8_t * src,int src_stride,uint8_t * dst,int dst_stride,const MV * src_mv,const struct scale_factors * sf,int w,int h,int ref,const InterpKernel12 * kernel,enum mv_precision precision,int x,int y)137 static void vp9_build_inter_predictor_12(
138 const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride,
139 const MV *src_mv, const struct scale_factors *sf, int w, int h, int ref,
140 const InterpKernel12 *kernel, enum mv_precision precision, int x, int y) {
141 (void)ref;
142 const int is_q4 = precision == MV_PRECISION_Q4;
143 const MV mv_q4 = { is_q4 ? src_mv->row : src_mv->row * 2,
144 is_q4 ? src_mv->col : src_mv->col * 2 };
145 MV32 mv = vp9_scale_mv(&mv_q4, x, y, sf);
146 const int subpel_x = mv.col & SUBPEL_MASK;
147 const int subpel_y = mv.row & SUBPEL_MASK;
148
149 src += (mv.row >> SUBPEL_BITS) * src_stride + (mv.col >> SUBPEL_BITS);
150
151 vpx_convolve8_12_c(src, src_stride, dst, dst_stride, kernel, subpel_x,
152 sf->x_step_q4, subpel_y, sf->y_step_q4, w, h);
153 }
154
155 #if CONFIG_VP9_HIGHBITDEPTH
highbd_convolve_horiz_12(const uint16_t * src,ptrdiff_t src_stride,uint16_t * dst,ptrdiff_t dst_stride,const InterpKernel12 * x_filters,int x0_q4,int x_step_q4,int w,int h,int bd)156 static void highbd_convolve_horiz_12(const uint16_t *src, ptrdiff_t src_stride,
157 uint16_t *dst, ptrdiff_t dst_stride,
158 const InterpKernel12 *x_filters, int x0_q4,
159 int x_step_q4, int w, int h, int bd) {
160 int x, y;
161 src -= MAX_FILTER_TAP / 2 - 1;
162
163 for (y = 0; y < h; ++y) {
164 int x_q4 = x0_q4;
165 for (x = 0; x < w; ++x) {
166 const uint16_t *const src_x = &src[x_q4 >> SUBPEL_BITS];
167 const int16_t *const x_filter = x_filters[x_q4 & SUBPEL_MASK];
168 int k, sum = 0;
169 for (k = 0; k < MAX_FILTER_TAP; ++k) sum += src_x[k] * x_filter[k];
170 dst[x] = clip_pixel_highbd(ROUND_POWER_OF_TWO(sum, FILTER_BITS), bd);
171 x_q4 += x_step_q4;
172 }
173 src += src_stride;
174 dst += dst_stride;
175 }
176 }
177
highbd_convolve_vert_12(const uint16_t * src,ptrdiff_t src_stride,uint16_t * dst,ptrdiff_t dst_stride,const InterpKernel12 * y_filters,int y0_q4,int y_step_q4,int w,int h,int bd)178 static void highbd_convolve_vert_12(const uint16_t *src, ptrdiff_t src_stride,
179 uint16_t *dst, ptrdiff_t dst_stride,
180 const InterpKernel12 *y_filters, int y0_q4,
181 int y_step_q4, int w, int h, int bd) {
182 int x, y;
183 src -= src_stride * (MAX_FILTER_TAP / 2 - 1);
184
185 for (x = 0; x < w; ++x) {
186 int y_q4 = y0_q4;
187 for (y = 0; y < h; ++y) {
188 const uint16_t *src_y = &src[(y_q4 >> SUBPEL_BITS) * src_stride];
189 const int16_t *const y_filter = y_filters[y_q4 & SUBPEL_MASK];
190 int k, sum = 0;
191 for (k = 0; k < MAX_FILTER_TAP; ++k)
192 sum += src_y[k * src_stride] * y_filter[k];
193 dst[y * dst_stride] =
194 clip_pixel_highbd(ROUND_POWER_OF_TWO(sum, FILTER_BITS), bd);
195 y_q4 += y_step_q4;
196 }
197 ++src;
198 ++dst;
199 }
200 }
201
highbd_convolve_12(const uint16_t * src,ptrdiff_t src_stride,uint16_t * dst,ptrdiff_t dst_stride,const InterpKernel12 * filter,int x0_q4,int x_step_q4,int y0_q4,int y_step_q4,int w,int h,int bd)202 static void highbd_convolve_12(const uint16_t *src, ptrdiff_t src_stride,
203 uint16_t *dst, ptrdiff_t dst_stride,
204 const InterpKernel12 *filter, int x0_q4,
205 int x_step_q4, int y0_q4, int y_step_q4, int w,
206 int h, int bd) {
207 uint16_t temp[BW * (BH + MAX_FILTER_TAP - 1)];
208 const int temp_stride = BW;
209 const int intermediate_height =
210 (((h - 1) * y_step_q4 + y0_q4) >> SUBPEL_BITS) + MAX_FILTER_TAP;
211
212 highbd_convolve_horiz_12(src - src_stride * (MAX_FILTER_TAP / 2 - 1),
213 src_stride, temp, temp_stride, filter, x0_q4,
214 x_step_q4, w, intermediate_height, bd);
215 highbd_convolve_vert_12(temp + temp_stride * (MAX_FILTER_TAP / 2 - 1),
216 temp_stride, dst, dst_stride, filter, y0_q4,
217 y_step_q4, w, h, bd);
218 }
219
220 // Copied from vpx_highbd_convolve8_c()
vpx_highbd_convolve8_12_c(const uint16_t * src,ptrdiff_t src_stride,uint16_t * dst,ptrdiff_t dst_stride,const InterpKernel12 * filter,int x0_q4,int x_step_q4,int y0_q4,int y_step_q4,int w,int h,int bd)221 static void vpx_highbd_convolve8_12_c(const uint16_t *src, ptrdiff_t src_stride,
222 uint16_t *dst, ptrdiff_t dst_stride,
223 const InterpKernel12 *filter, int x0_q4,
224 int x_step_q4, int y0_q4, int y_step_q4,
225 int w, int h, int bd) {
226 highbd_convolve_12(src, src_stride, dst, dst_stride, filter, x0_q4, x_step_q4,
227 y0_q4, y_step_q4, w, h, bd);
228 }
229
vp9_highbd_build_inter_predictor_12(const uint16_t * src,int src_stride,uint16_t * dst,int dst_stride,const MV * src_mv,const struct scale_factors * sf,int w,int h,int ref,const InterpKernel12 * kernel,enum mv_precision precision,int x,int y,int bd)230 static void vp9_highbd_build_inter_predictor_12(
231 const uint16_t *src, int src_stride, uint16_t *dst, int dst_stride,
232 const MV *src_mv, const struct scale_factors *sf, int w, int h, int ref,
233 const InterpKernel12 *kernel, enum mv_precision precision, int x, int y,
234 int bd) {
235 (void)ref;
236 const int is_q4 = precision == MV_PRECISION_Q4;
237 const MV mv_q4 = { is_q4 ? src_mv->row : src_mv->row * 2,
238 is_q4 ? src_mv->col : src_mv->col * 2 };
239 MV32 mv = vp9_scale_mv(&mv_q4, x, y, sf);
240 const int subpel_x = mv.col & SUBPEL_MASK;
241 const int subpel_y = mv.row & SUBPEL_MASK;
242
243 src += (mv.row >> SUBPEL_BITS) * src_stride + (mv.col >> SUBPEL_BITS);
244
245 vpx_highbd_convolve8_12_c(src, src_stride, dst, dst_stride, kernel, subpel_x,
246 sf->x_step_q4, subpel_y, sf->y_step_q4, w, h, bd);
247 }
248 #endif // CONFIG_VP9_HIGHBITDEPTH
249
temporal_filter_predictors_mb_c(MACROBLOCKD * xd,uint8_t * y_mb_ptr,uint8_t * u_mb_ptr,uint8_t * v_mb_ptr,int stride,int uv_block_width,int uv_block_height,int mv_row,int mv_col,uint8_t * pred,struct scale_factors * scale,int x,int y,MV * blk_mvs,int use_32x32)250 static void temporal_filter_predictors_mb_c(
251 MACROBLOCKD *xd, uint8_t *y_mb_ptr, uint8_t *u_mb_ptr, uint8_t *v_mb_ptr,
252 int stride, int uv_block_width, int uv_block_height, int mv_row, int mv_col,
253 uint8_t *pred, struct scale_factors *scale, int x, int y, MV *blk_mvs,
254 int use_32x32) {
255 const int which_mv = 0;
256 const InterpKernel12 *const kernel = sub_pel_filters_12;
257 int i, j, k = 0, ys = (BH >> 1), xs = (BW >> 1);
258
259 enum mv_precision mv_precision_uv;
260 int uv_stride;
261 if (uv_block_width == (BW >> 1)) {
262 uv_stride = (stride + 1) >> 1;
263 mv_precision_uv = MV_PRECISION_Q4;
264 } else {
265 uv_stride = stride;
266 mv_precision_uv = MV_PRECISION_Q3;
267 }
268 #if !CONFIG_VP9_HIGHBITDEPTH
269 (void)xd;
270 #endif
271
272 if (use_32x32) {
273 const MV mv = { mv_row, mv_col };
274 #if CONFIG_VP9_HIGHBITDEPTH
275 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
276 vp9_highbd_build_inter_predictor_12(CONVERT_TO_SHORTPTR(y_mb_ptr), stride,
277 CONVERT_TO_SHORTPTR(&pred[0]), BW,
278 &mv, scale, BW, BH, which_mv, kernel,
279 MV_PRECISION_Q3, x, y, xd->bd);
280
281 vp9_highbd_build_inter_predictor_12(
282 CONVERT_TO_SHORTPTR(u_mb_ptr), uv_stride,
283 CONVERT_TO_SHORTPTR(&pred[BLK_PELS]), uv_block_width, &mv, scale,
284 uv_block_width, uv_block_height, which_mv, kernel, mv_precision_uv, x,
285 y, xd->bd);
286
287 vp9_highbd_build_inter_predictor_12(
288 CONVERT_TO_SHORTPTR(v_mb_ptr), uv_stride,
289 CONVERT_TO_SHORTPTR(&pred[(BLK_PELS << 1)]), uv_block_width, &mv,
290 scale, uv_block_width, uv_block_height, which_mv, kernel,
291 mv_precision_uv, x, y, xd->bd);
292 return;
293 }
294 #endif // CONFIG_VP9_HIGHBITDEPTH
295 vp9_build_inter_predictor_12(y_mb_ptr, stride, &pred[0], BW, &mv, scale, BW,
296 BH, which_mv, kernel, MV_PRECISION_Q3, x, y);
297
298 vp9_build_inter_predictor_12(u_mb_ptr, uv_stride, &pred[BLK_PELS],
299 uv_block_width, &mv, scale, uv_block_width,
300 uv_block_height, which_mv, kernel,
301 mv_precision_uv, x, y);
302
303 vp9_build_inter_predictor_12(v_mb_ptr, uv_stride, &pred[(BLK_PELS << 1)],
304 uv_block_width, &mv, scale, uv_block_width,
305 uv_block_height, which_mv, kernel,
306 mv_precision_uv, x, y);
307 return;
308 }
309
310 // While use_32x32 = 0, construct the 32x32 predictor using 4 16x16
311 // predictors.
312 // Y predictor
313 for (i = 0; i < BH; i += ys) {
314 for (j = 0; j < BW; j += xs) {
315 const MV mv = blk_mvs[k];
316 const int y_offset = i * stride + j;
317 const int p_offset = i * BW + j;
318
319 #if CONFIG_VP9_HIGHBITDEPTH
320 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
321 vp9_highbd_build_inter_predictor_12(
322 CONVERT_TO_SHORTPTR(y_mb_ptr + y_offset), stride,
323 CONVERT_TO_SHORTPTR(&pred[p_offset]), BW, &mv, scale, xs, ys,
324 which_mv, kernel, MV_PRECISION_Q3, x, y, xd->bd);
325 } else {
326 vp9_build_inter_predictor_12(y_mb_ptr + y_offset, stride,
327 &pred[p_offset], BW, &mv, scale, xs, ys,
328 which_mv, kernel, MV_PRECISION_Q3, x, y);
329 }
330 #else
331 vp9_build_inter_predictor_12(y_mb_ptr + y_offset, stride, &pred[p_offset],
332 BW, &mv, scale, xs, ys, which_mv, kernel,
333 MV_PRECISION_Q3, x, y);
334 #endif // CONFIG_VP9_HIGHBITDEPTH
335 k++;
336 }
337 }
338
339 // U and V predictors
340 ys = (uv_block_height >> 1);
341 xs = (uv_block_width >> 1);
342 k = 0;
343
344 for (i = 0; i < uv_block_height; i += ys) {
345 for (j = 0; j < uv_block_width; j += xs) {
346 const MV mv = blk_mvs[k];
347 const int uv_offset = i * uv_stride + j;
348 const int p_offset = i * uv_block_width + j;
349
350 #if CONFIG_VP9_HIGHBITDEPTH
351 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
352 vp9_highbd_build_inter_predictor_12(
353 CONVERT_TO_SHORTPTR(u_mb_ptr + uv_offset), uv_stride,
354 CONVERT_TO_SHORTPTR(&pred[BLK_PELS + p_offset]), uv_block_width,
355 &mv, scale, xs, ys, which_mv, kernel, mv_precision_uv, x, y,
356 xd->bd);
357
358 vp9_highbd_build_inter_predictor_12(
359 CONVERT_TO_SHORTPTR(v_mb_ptr + uv_offset), uv_stride,
360 CONVERT_TO_SHORTPTR(&pred[(BLK_PELS << 1) + p_offset]),
361 uv_block_width, &mv, scale, xs, ys, which_mv, kernel,
362 mv_precision_uv, x, y, xd->bd);
363 } else {
364 vp9_build_inter_predictor_12(u_mb_ptr + uv_offset, uv_stride,
365 &pred[BLK_PELS + p_offset], uv_block_width,
366 &mv, scale, xs, ys, which_mv, kernel,
367 mv_precision_uv, x, y);
368
369 vp9_build_inter_predictor_12(v_mb_ptr + uv_offset, uv_stride,
370 &pred[(BLK_PELS << 1) + p_offset],
371 uv_block_width, &mv, scale, xs, ys,
372 which_mv, kernel, mv_precision_uv, x, y);
373 }
374 #else
375 vp9_build_inter_predictor_12(u_mb_ptr + uv_offset, uv_stride,
376 &pred[BLK_PELS + p_offset], uv_block_width,
377 &mv, scale, xs, ys, which_mv, kernel,
378 mv_precision_uv, x, y);
379
380 vp9_build_inter_predictor_12(v_mb_ptr + uv_offset, uv_stride,
381 &pred[(BLK_PELS << 1) + p_offset],
382 uv_block_width, &mv, scale, xs, ys, which_mv,
383 kernel, mv_precision_uv, x, y);
384 #endif // CONFIG_VP9_HIGHBITDEPTH
385 k++;
386 }
387 }
388 }
389
vp9_temporal_filter_init(void)390 void vp9_temporal_filter_init(void) {
391 int i;
392
393 fixed_divide[0] = 0;
394 for (i = 1; i < 512; ++i) fixed_divide[i] = 0x80000 / i;
395 }
396
mod_index(int sum_dist,int index,int rounding,int strength,int filter_weight)397 static INLINE int mod_index(int sum_dist, int index, int rounding, int strength,
398 int filter_weight) {
399 int mod;
400
401 assert(index >= 0 && index <= 13);
402 assert(index_mult[index] != 0);
403
404 mod =
405 ((unsigned int)clamp(sum_dist, 0, UINT16_MAX) * index_mult[index]) >> 16;
406 mod += rounding;
407 mod >>= strength;
408
409 mod = VPXMIN(16, mod);
410
411 mod = 16 - mod;
412 mod *= filter_weight;
413
414 return mod;
415 }
416
417 #if CONFIG_VP9_HIGHBITDEPTH
highbd_mod_index(int sum_dist,int index,int rounding,int strength,int filter_weight)418 static INLINE int highbd_mod_index(int sum_dist, int index, int rounding,
419 int strength, int filter_weight) {
420 int mod;
421
422 assert(index >= 0 && index <= 13);
423 assert(highbd_index_mult[index] != 0);
424
425 mod = (int)((clamp(sum_dist, 0, INT32_MAX) * highbd_index_mult[index]) >> 32);
426 mod += rounding;
427 mod >>= strength;
428
429 mod = VPXMIN(16, mod);
430
431 mod = 16 - mod;
432 mod *= filter_weight;
433
434 return mod;
435 }
436 #endif // CONFIG_VP9_HIGHBITDEPTH
437
get_filter_weight(unsigned int i,unsigned int j,unsigned int block_height,unsigned int block_width,const int * const blk_fw,int use_32x32)438 static INLINE int get_filter_weight(unsigned int i, unsigned int j,
439 unsigned int block_height,
440 unsigned int block_width,
441 const int *const blk_fw, int use_32x32) {
442 // blk_fw[0] ~ blk_fw[3] are the same.
443 if (use_32x32) {
444 return blk_fw[0];
445 }
446
447 if (i < block_height / 2) {
448 if (j < block_width / 2) {
449 return blk_fw[0];
450 }
451
452 return blk_fw[1];
453 }
454
455 if (j < block_width / 2) {
456 return blk_fw[2];
457 }
458
459 return blk_fw[3];
460 }
461
vp9_apply_temporal_filter_c(const uint8_t * y_frame1,int y_stride,const uint8_t * y_pred,int y_buf_stride,const uint8_t * u_frame1,const uint8_t * v_frame1,int uv_stride,const uint8_t * u_pred,const uint8_t * v_pred,int uv_buf_stride,unsigned int block_width,unsigned int block_height,int ss_x,int ss_y,int strength,const int * const blk_fw,int use_32x32,uint32_t * y_accumulator,uint16_t * y_count,uint32_t * u_accumulator,uint16_t * u_count,uint32_t * v_accumulator,uint16_t * v_count)462 void vp9_apply_temporal_filter_c(
463 const uint8_t *y_frame1, int y_stride, const uint8_t *y_pred,
464 int y_buf_stride, const uint8_t *u_frame1, const uint8_t *v_frame1,
465 int uv_stride, const uint8_t *u_pred, const uint8_t *v_pred,
466 int uv_buf_stride, unsigned int block_width, unsigned int block_height,
467 int ss_x, int ss_y, int strength, const int *const blk_fw, int use_32x32,
468 uint32_t *y_accumulator, uint16_t *y_count, uint32_t *u_accumulator,
469 uint16_t *u_count, uint32_t *v_accumulator, uint16_t *v_count) {
470 unsigned int i, j, k, m;
471 int modifier;
472 const int rounding = (1 << strength) >> 1;
473 const unsigned int uv_block_width = block_width >> ss_x;
474 const unsigned int uv_block_height = block_height >> ss_y;
475 DECLARE_ALIGNED(16, uint16_t, y_diff_sse[BLK_PELS]);
476 DECLARE_ALIGNED(16, uint16_t, u_diff_sse[BLK_PELS]);
477 DECLARE_ALIGNED(16, uint16_t, v_diff_sse[BLK_PELS]);
478
479 int idx = 0, idy;
480
481 assert(strength >= 0);
482 assert(strength <= 6);
483
484 memset(y_diff_sse, 0, BLK_PELS * sizeof(uint16_t));
485 memset(u_diff_sse, 0, BLK_PELS * sizeof(uint16_t));
486 memset(v_diff_sse, 0, BLK_PELS * sizeof(uint16_t));
487
488 // Calculate diff^2 for each pixel of the 16x16 block.
489 // TODO(yunqing): the following code needs to be optimized.
490 for (i = 0; i < block_height; i++) {
491 for (j = 0; j < block_width; j++) {
492 const int16_t diff =
493 y_frame1[i * (int)y_stride + j] - y_pred[i * (int)block_width + j];
494 y_diff_sse[idx++] = diff * diff;
495 }
496 }
497 idx = 0;
498 for (i = 0; i < uv_block_height; i++) {
499 for (j = 0; j < uv_block_width; j++) {
500 const int16_t diffu =
501 u_frame1[i * uv_stride + j] - u_pred[i * uv_buf_stride + j];
502 const int16_t diffv =
503 v_frame1[i * uv_stride + j] - v_pred[i * uv_buf_stride + j];
504 u_diff_sse[idx] = diffu * diffu;
505 v_diff_sse[idx] = diffv * diffv;
506 idx++;
507 }
508 }
509
510 for (i = 0, k = 0, m = 0; i < block_height; i++) {
511 for (j = 0; j < block_width; j++) {
512 const int pixel_value = y_pred[i * y_buf_stride + j];
513 const int filter_weight =
514 get_filter_weight(i, j, block_height, block_width, blk_fw, use_32x32);
515
516 // non-local mean approach
517 int y_index = 0;
518
519 const int uv_r = i >> ss_y;
520 const int uv_c = j >> ss_x;
521 modifier = 0;
522
523 for (idy = -1; idy <= 1; ++idy) {
524 for (idx = -1; idx <= 1; ++idx) {
525 const int row = (int)i + idy;
526 const int col = (int)j + idx;
527
528 if (row >= 0 && row < (int)block_height && col >= 0 &&
529 col < (int)block_width) {
530 modifier += y_diff_sse[row * (int)block_width + col];
531 ++y_index;
532 }
533 }
534 }
535
536 assert(y_index > 0);
537
538 modifier += u_diff_sse[uv_r * uv_block_width + uv_c];
539 modifier += v_diff_sse[uv_r * uv_block_width + uv_c];
540
541 y_index += 2;
542
543 modifier =
544 mod_index(modifier, y_index, rounding, strength, filter_weight);
545
546 y_count[k] += modifier;
547 y_accumulator[k] += modifier * pixel_value;
548
549 ++k;
550
551 // Process chroma component
552 if (!(i & ss_y) && !(j & ss_x)) {
553 const int u_pixel_value = u_pred[uv_r * uv_buf_stride + uv_c];
554 const int v_pixel_value = v_pred[uv_r * uv_buf_stride + uv_c];
555
556 // non-local mean approach
557 int cr_index = 0;
558 int u_mod = 0, v_mod = 0;
559 int y_diff = 0;
560
561 for (idy = -1; idy <= 1; ++idy) {
562 for (idx = -1; idx <= 1; ++idx) {
563 const int row = uv_r + idy;
564 const int col = uv_c + idx;
565
566 if (row >= 0 && row < (int)uv_block_height && col >= 0 &&
567 col < (int)uv_block_width) {
568 u_mod += u_diff_sse[row * uv_block_width + col];
569 v_mod += v_diff_sse[row * uv_block_width + col];
570 ++cr_index;
571 }
572 }
573 }
574
575 assert(cr_index > 0);
576
577 for (idy = 0; idy < 1 + ss_y; ++idy) {
578 for (idx = 0; idx < 1 + ss_x; ++idx) {
579 const int row = (uv_r << ss_y) + idy;
580 const int col = (uv_c << ss_x) + idx;
581 y_diff += y_diff_sse[row * (int)block_width + col];
582 ++cr_index;
583 }
584 }
585
586 u_mod += y_diff;
587 v_mod += y_diff;
588
589 u_mod = mod_index(u_mod, cr_index, rounding, strength, filter_weight);
590 v_mod = mod_index(v_mod, cr_index, rounding, strength, filter_weight);
591
592 u_count[m] += u_mod;
593 u_accumulator[m] += u_mod * u_pixel_value;
594 v_count[m] += v_mod;
595 v_accumulator[m] += v_mod * v_pixel_value;
596
597 ++m;
598 } // Complete YUV pixel
599 }
600 }
601 }
602
603 #if CONFIG_VP9_HIGHBITDEPTH
vp9_highbd_apply_temporal_filter_c(const uint16_t * y_src,int y_src_stride,const uint16_t * y_pre,int y_pre_stride,const uint16_t * u_src,const uint16_t * v_src,int uv_src_stride,const uint16_t * u_pre,const uint16_t * v_pre,int uv_pre_stride,unsigned int block_width,unsigned int block_height,int ss_x,int ss_y,int strength,const int * const blk_fw,int use_32x32,uint32_t * y_accum,uint16_t * y_count,uint32_t * u_accum,uint16_t * u_count,uint32_t * v_accum,uint16_t * v_count)604 void vp9_highbd_apply_temporal_filter_c(
605 const uint16_t *y_src, int y_src_stride, const uint16_t *y_pre,
606 int y_pre_stride, const uint16_t *u_src, const uint16_t *v_src,
607 int uv_src_stride, const uint16_t *u_pre, const uint16_t *v_pre,
608 int uv_pre_stride, unsigned int block_width, unsigned int block_height,
609 int ss_x, int ss_y, int strength, const int *const blk_fw, int use_32x32,
610 uint32_t *y_accum, uint16_t *y_count, uint32_t *u_accum, uint16_t *u_count,
611 uint32_t *v_accum, uint16_t *v_count) {
612 const int uv_block_width = block_width >> ss_x;
613 const int uv_block_height = block_height >> ss_y;
614 const int y_diff_stride = BW;
615 const int uv_diff_stride = BW;
616
617 DECLARE_ALIGNED(16, uint32_t, y_diff_sse[BLK_PELS]);
618 DECLARE_ALIGNED(16, uint32_t, u_diff_sse[BLK_PELS]);
619 DECLARE_ALIGNED(16, uint32_t, v_diff_sse[BLK_PELS]);
620
621 const int rounding = (1 << strength) >> 1;
622
623 // Loop variables
624 int row, col;
625 int uv_row, uv_col;
626 int row_step, col_step;
627
628 memset(y_diff_sse, 0, BLK_PELS * sizeof(uint32_t));
629 memset(u_diff_sse, 0, BLK_PELS * sizeof(uint32_t));
630 memset(v_diff_sse, 0, BLK_PELS * sizeof(uint32_t));
631
632 // Get the square diffs
633 for (row = 0; row < (int)block_height; row++) {
634 for (col = 0; col < (int)block_width; col++) {
635 const int diff =
636 y_src[row * y_src_stride + col] - y_pre[row * y_pre_stride + col];
637 y_diff_sse[row * y_diff_stride + col] = diff * diff;
638 }
639 }
640
641 for (row = 0; row < uv_block_height; row++) {
642 for (col = 0; col < uv_block_width; col++) {
643 const int u_diff =
644 u_src[row * uv_src_stride + col] - u_pre[row * uv_pre_stride + col];
645 const int v_diff =
646 v_src[row * uv_src_stride + col] - v_pre[row * uv_pre_stride + col];
647 u_diff_sse[row * uv_diff_stride + col] = u_diff * u_diff;
648 v_diff_sse[row * uv_diff_stride + col] = v_diff * v_diff;
649 }
650 }
651
652 // Apply the filter to luma
653 for (row = 0; row < (int)block_height; row++) {
654 for (col = 0; col < (int)block_width; col++) {
655 const int filter_weight = get_filter_weight(
656 row, col, block_height, block_width, blk_fw, use_32x32);
657
658 // First we get the modifier for the current y pixel
659 const int y_pixel = y_pre[row * y_pre_stride + col];
660 int y_num_used = 0;
661 int y_mod = 0;
662
663 // Sum the neighboring 3x3 y pixels
664 for (row_step = -1; row_step <= 1; row_step++) {
665 for (col_step = -1; col_step <= 1; col_step++) {
666 const int sub_row = row + row_step;
667 const int sub_col = col + col_step;
668
669 if (sub_row >= 0 && sub_row < (int)block_height && sub_col >= 0 &&
670 sub_col < (int)block_width) {
671 y_mod += y_diff_sse[sub_row * y_diff_stride + sub_col];
672 y_num_used++;
673 }
674 }
675 }
676
677 // Sum the corresponding uv pixels to the current y modifier
678 // Note we are rounding down instead of rounding to the nearest pixel.
679 uv_row = row >> ss_y;
680 uv_col = col >> ss_x;
681 y_mod += u_diff_sse[uv_row * uv_diff_stride + uv_col];
682 y_mod += v_diff_sse[uv_row * uv_diff_stride + uv_col];
683
684 y_num_used += 2;
685
686 // Set the modifier
687 y_mod = highbd_mod_index(y_mod, y_num_used, rounding, strength,
688 filter_weight);
689
690 // Accumulate the result
691 y_count[row * block_width + col] += y_mod;
692 y_accum[row * block_width + col] += y_mod * y_pixel;
693 }
694 }
695
696 // Apply the filter to chroma
697 for (uv_row = 0; uv_row < uv_block_height; uv_row++) {
698 for (uv_col = 0; uv_col < uv_block_width; uv_col++) {
699 const int y_row = uv_row << ss_y;
700 const int y_col = uv_col << ss_x;
701 const int filter_weight = get_filter_weight(
702 uv_row, uv_col, uv_block_height, uv_block_width, blk_fw, use_32x32);
703
704 const int u_pixel = u_pre[uv_row * uv_pre_stride + uv_col];
705 const int v_pixel = v_pre[uv_row * uv_pre_stride + uv_col];
706
707 int uv_num_used = 0;
708 int u_mod = 0, v_mod = 0;
709
710 // Sum the neighboring 3x3 chromal pixels to the chroma modifier
711 for (row_step = -1; row_step <= 1; row_step++) {
712 for (col_step = -1; col_step <= 1; col_step++) {
713 const int sub_row = uv_row + row_step;
714 const int sub_col = uv_col + col_step;
715
716 if (sub_row >= 0 && sub_row < uv_block_height && sub_col >= 0 &&
717 sub_col < uv_block_width) {
718 u_mod += u_diff_sse[sub_row * uv_diff_stride + sub_col];
719 v_mod += v_diff_sse[sub_row * uv_diff_stride + sub_col];
720 uv_num_used++;
721 }
722 }
723 }
724
725 // Sum all the luma pixels associated with the current luma pixel
726 for (row_step = 0; row_step < 1 + ss_y; row_step++) {
727 for (col_step = 0; col_step < 1 + ss_x; col_step++) {
728 const int sub_row = y_row + row_step;
729 const int sub_col = y_col + col_step;
730 const int y_diff = y_diff_sse[sub_row * y_diff_stride + sub_col];
731
732 u_mod += y_diff;
733 v_mod += y_diff;
734 uv_num_used++;
735 }
736 }
737
738 // Set the modifier
739 u_mod = highbd_mod_index(u_mod, uv_num_used, rounding, strength,
740 filter_weight);
741 v_mod = highbd_mod_index(v_mod, uv_num_used, rounding, strength,
742 filter_weight);
743
744 // Accumulate the result
745 u_count[uv_row * uv_block_width + uv_col] += u_mod;
746 u_accum[uv_row * uv_block_width + uv_col] += u_mod * u_pixel;
747 v_count[uv_row * uv_block_width + uv_col] += v_mod;
748 v_accum[uv_row * uv_block_width + uv_col] += v_mod * v_pixel;
749 }
750 }
751 }
752 #endif // CONFIG_VP9_HIGHBITDEPTH
753
temporal_filter_find_matching_mb_c(VP9_COMP * cpi,ThreadData * td,uint8_t * arf_frame_buf,uint8_t * frame_ptr_buf,int stride,MV * ref_mv,MV * blk_mvs,int * blk_bestsme)754 static uint32_t temporal_filter_find_matching_mb_c(
755 VP9_COMP *cpi, ThreadData *td, uint8_t *arf_frame_buf,
756 uint8_t *frame_ptr_buf, int stride, MV *ref_mv, MV *blk_mvs,
757 int *blk_bestsme) {
758 MACROBLOCK *const x = &td->mb;
759 MACROBLOCKD *const xd = &x->e_mbd;
760 MV_SPEED_FEATURES *const mv_sf = &cpi->sf.mv;
761 const SEARCH_METHODS search_method = MESH;
762 const SEARCH_METHODS search_method_16 = cpi->sf.temporal_filter_search_method;
763 int step_param;
764 int sadpb = x->sadperbit16;
765 uint32_t bestsme = UINT_MAX;
766 uint32_t distortion;
767 uint32_t sse;
768 int cost_list[5];
769 const MvLimits tmp_mv_limits = x->mv_limits;
770
771 MV best_ref_mv1 = { 0, 0 };
772 MV best_ref_mv1_full; /* full-pixel value of best_ref_mv1 */
773
774 // Save input state
775 struct buf_2d src = x->plane[0].src;
776 struct buf_2d pre = xd->plane[0].pre[0];
777 int i, j, k = 0;
778
779 best_ref_mv1_full.col = best_ref_mv1.col >> 3;
780 best_ref_mv1_full.row = best_ref_mv1.row >> 3;
781
782 // Setup frame pointers
783 x->plane[0].src.buf = arf_frame_buf;
784 x->plane[0].src.stride = stride;
785 xd->plane[0].pre[0].buf = frame_ptr_buf;
786 xd->plane[0].pre[0].stride = stride;
787
788 step_param = mv_sf->reduce_first_step_size;
789 step_param = VPXMIN(step_param, MAX_MVSEARCH_STEPS - 2);
790
791 vp9_set_mv_search_range(&x->mv_limits, &best_ref_mv1);
792
793 vp9_full_pixel_search(cpi, x, TF_BLOCK, &best_ref_mv1_full, step_param,
794 search_method, sadpb, cond_cost_list(cpi, cost_list),
795 &best_ref_mv1, ref_mv, 0, 0);
796
797 /* restore UMV window */
798 x->mv_limits = tmp_mv_limits;
799
800 // find_fractional_mv_step parameters: best_ref_mv1 is for mv rate cost
801 // calculation. The start full mv and the search result are stored in
802 // ref_mv.
803 bestsme = cpi->find_fractional_mv_step(
804 x, ref_mv, &best_ref_mv1, cpi->common.allow_high_precision_mv,
805 x->errorperbit, &cpi->fn_ptr[TF_BLOCK], 0, mv_sf->subpel_search_level,
806 cond_cost_list(cpi, cost_list), NULL, NULL, &distortion, &sse, NULL, BW,
807 BH, USE_8_TAPS_SHARP);
808
809 // DO motion search on 4 16x16 sub_blocks.
810 best_ref_mv1.row = ref_mv->row;
811 best_ref_mv1.col = ref_mv->col;
812 best_ref_mv1_full.col = best_ref_mv1.col >> 3;
813 best_ref_mv1_full.row = best_ref_mv1.row >> 3;
814
815 for (i = 0; i < BH; i += SUB_BH) {
816 for (j = 0; j < BW; j += SUB_BW) {
817 // Setup frame pointers
818 x->plane[0].src.buf = arf_frame_buf + i * stride + j;
819 x->plane[0].src.stride = stride;
820 xd->plane[0].pre[0].buf = frame_ptr_buf + i * stride + j;
821 xd->plane[0].pre[0].stride = stride;
822
823 vp9_set_mv_search_range(&x->mv_limits, &best_ref_mv1);
824 vp9_full_pixel_search(cpi, x, TF_SUB_BLOCK, &best_ref_mv1_full,
825 step_param, search_method_16, sadpb,
826 cond_cost_list(cpi, cost_list), &best_ref_mv1,
827 &blk_mvs[k], 0, 0);
828 /* restore UMV window */
829 x->mv_limits = tmp_mv_limits;
830
831 blk_bestsme[k] = cpi->find_fractional_mv_step(
832 x, &blk_mvs[k], &best_ref_mv1, cpi->common.allow_high_precision_mv,
833 x->errorperbit, &cpi->fn_ptr[TF_SUB_BLOCK], 0,
834 mv_sf->subpel_search_level, cond_cost_list(cpi, cost_list), NULL,
835 NULL, &distortion, &sse, NULL, SUB_BW, SUB_BH, USE_8_TAPS_SHARP);
836 k++;
837 }
838 }
839
840 // Restore input state
841 x->plane[0].src = src;
842 xd->plane[0].pre[0] = pre;
843
844 return bestsme;
845 }
846
vp9_temporal_filter_iterate_row_c(VP9_COMP * cpi,ThreadData * td,int mb_row,int mb_col_start,int mb_col_end)847 void vp9_temporal_filter_iterate_row_c(VP9_COMP *cpi, ThreadData *td,
848 int mb_row, int mb_col_start,
849 int mb_col_end) {
850 ARNRFilterData *arnr_filter_data = &cpi->arnr_filter_data;
851 YV12_BUFFER_CONFIG **frames = arnr_filter_data->frames;
852 int frame_count = arnr_filter_data->frame_count;
853 int alt_ref_index = arnr_filter_data->alt_ref_index;
854 int strength = arnr_filter_data->strength;
855 struct scale_factors *scale = &arnr_filter_data->sf;
856 int byte;
857 int frame;
858 int mb_col;
859 int mb_cols = (frames[alt_ref_index]->y_crop_width + BW - 1) >> BW_LOG2;
860 int mb_rows = (frames[alt_ref_index]->y_crop_height + BH - 1) >> BH_LOG2;
861 DECLARE_ALIGNED(16, uint32_t, accumulator[BLK_PELS * 3]);
862 DECLARE_ALIGNED(16, uint16_t, count[BLK_PELS * 3]);
863 MACROBLOCKD *mbd = &td->mb.e_mbd;
864 YV12_BUFFER_CONFIG *f = frames[alt_ref_index];
865 YV12_BUFFER_CONFIG *dst = arnr_filter_data->dst;
866 uint8_t *dst1, *dst2;
867 #if CONFIG_VP9_HIGHBITDEPTH
868 DECLARE_ALIGNED(16, uint16_t, predictor16[BLK_PELS * 3]);
869 DECLARE_ALIGNED(16, uint8_t, predictor8[BLK_PELS * 3]);
870 uint8_t *predictor;
871 #else
872 DECLARE_ALIGNED(16, uint8_t, predictor[BLK_PELS * 3]);
873 #endif
874 const int mb_uv_height = BH >> mbd->plane[1].subsampling_y;
875 const int mb_uv_width = BW >> mbd->plane[1].subsampling_x;
876 // Addition of the tile col level offsets
877 int mb_y_offset = mb_row * BH * (f->y_stride) + BW * mb_col_start;
878 int mb_uv_offset =
879 mb_row * mb_uv_height * f->uv_stride + mb_uv_width * mb_col_start;
880
881 #if CONFIG_VP9_HIGHBITDEPTH
882 if (mbd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
883 predictor = CONVERT_TO_BYTEPTR(predictor16);
884 } else {
885 predictor = predictor8;
886 }
887 #endif
888
889 // Source frames are extended to 16 pixels. This is different than
890 // L/A/G reference frames that have a border of 32 (VP9ENCBORDERINPIXELS)
891 // A 6/8/12 tap filter is used for motion search and prediction. So the
892 // largest Y mv on a border would then be 16 - TF_INTERP_EXTEND. The UV
893 // blocks are half the size of the Y and therefore only extended by 8.
894 // The largest mv that a UV block can support is 8 - TF_INTERP_EXTEND.
895 // A UV mv is half of a Y mv. (16 - TF_INTERP_EXTEND) >> 1 is greater than
896 // 8 - TF_INTERP_EXTEND. To keep the mv in play for both Y and UV planes,
897 // the max that it can be on a border is therefore 16 - (2 * TF_INTERP_EXTEND
898 // + 1).
899 td->mb.mv_limits.row_min = -((mb_row * BH) + (17 - 2 * TF_INTERP_EXTEND));
900 td->mb.mv_limits.row_max =
901 ((mb_rows - 1 - mb_row) * BH) + (17 - 2 * TF_INTERP_EXTEND);
902
903 for (mb_col = mb_col_start; mb_col < mb_col_end; mb_col++) {
904 int i, j, k;
905 int stride;
906 MV ref_mv;
907
908 vp9_zero_array(accumulator, BLK_PELS * 3);
909 vp9_zero_array(count, BLK_PELS * 3);
910
911 td->mb.mv_limits.col_min = -((mb_col * BW) + (17 - 2 * TF_INTERP_EXTEND));
912 td->mb.mv_limits.col_max =
913 ((mb_cols - 1 - mb_col) * BW) + (17 - 2 * TF_INTERP_EXTEND);
914
915 if (cpi->oxcf.content == VP9E_CONTENT_FILM) {
916 unsigned int src_variance;
917 struct buf_2d src;
918
919 src.buf = f->y_buffer + mb_y_offset;
920 src.stride = f->y_stride;
921
922 #if CONFIG_VP9_HIGHBITDEPTH
923 if (mbd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
924 src_variance =
925 vp9_high_get_sby_perpixel_variance(cpi, &src, TF_BLOCK, mbd->bd);
926 } else {
927 src_variance = vp9_get_sby_perpixel_variance(cpi, &src, TF_BLOCK);
928 }
929 #else
930 src_variance = vp9_get_sby_perpixel_variance(cpi, &src, TF_BLOCK);
931 #endif // CONFIG_VP9_HIGHBITDEPTH
932
933 if (src_variance <= 2) {
934 strength = VPXMAX(0, arnr_filter_data->strength - 2);
935 }
936 }
937
938 for (frame = 0; frame < frame_count; frame++) {
939 // MVs for 4 16x16 sub blocks.
940 MV blk_mvs[4];
941 // Filter weights for 4 16x16 sub blocks.
942 int blk_fw[4] = { 0, 0, 0, 0 };
943 int use_32x32 = 0;
944
945 if (frames[frame] == NULL) continue;
946
947 ref_mv.row = 0;
948 ref_mv.col = 0;
949 blk_mvs[0] = kZeroMv;
950 blk_mvs[1] = kZeroMv;
951 blk_mvs[2] = kZeroMv;
952 blk_mvs[3] = kZeroMv;
953
954 if (frame == alt_ref_index) {
955 blk_fw[0] = blk_fw[1] = blk_fw[2] = blk_fw[3] = 2;
956 use_32x32 = 1;
957 } else {
958 const int thresh_low = 10000;
959 const int thresh_high = 20000;
960 int blk_bestsme[4] = { INT_MAX, INT_MAX, INT_MAX, INT_MAX };
961
962 // Find best match in this frame by MC
963 int err = temporal_filter_find_matching_mb_c(
964 cpi, td, frames[alt_ref_index]->y_buffer + mb_y_offset,
965 frames[frame]->y_buffer + mb_y_offset, frames[frame]->y_stride,
966 &ref_mv, blk_mvs, blk_bestsme);
967
968 int err16 =
969 blk_bestsme[0] + blk_bestsme[1] + blk_bestsme[2] + blk_bestsme[3];
970 int max_err = INT_MIN, min_err = INT_MAX;
971 for (k = 0; k < 4; k++) {
972 if (min_err > blk_bestsme[k]) min_err = blk_bestsme[k];
973 if (max_err < blk_bestsme[k]) max_err = blk_bestsme[k];
974 }
975
976 if (((err * 15 < (err16 << 4)) && max_err - min_err < 10000) ||
977 ((err * 14 < (err16 << 4)) && max_err - min_err < 5000)) {
978 use_32x32 = 1;
979 // Assign higher weight to matching MB if it's error
980 // score is lower. If not applying MC default behavior
981 // is to weight all MBs equal.
982 blk_fw[0] = err < (thresh_low << THR_SHIFT) ? 2
983 : err < (thresh_high << THR_SHIFT) ? 1
984 : 0;
985 blk_fw[1] = blk_fw[2] = blk_fw[3] = blk_fw[0];
986 } else {
987 use_32x32 = 0;
988 for (k = 0; k < 4; k++)
989 blk_fw[k] = blk_bestsme[k] < thresh_low ? 2
990 : blk_bestsme[k] < thresh_high ? 1
991 : 0;
992 }
993
994 for (k = 0; k < 4; k++) {
995 switch (abs(frame - alt_ref_index)) {
996 case 1: blk_fw[k] = VPXMIN(blk_fw[k], 2); break;
997 case 2:
998 case 3: blk_fw[k] = VPXMIN(blk_fw[k], 1); break;
999 default: break;
1000 }
1001 }
1002 }
1003
1004 if (blk_fw[0] | blk_fw[1] | blk_fw[2] | blk_fw[3]) {
1005 // Construct the predictors
1006 temporal_filter_predictors_mb_c(
1007 mbd, frames[frame]->y_buffer + mb_y_offset,
1008 frames[frame]->u_buffer + mb_uv_offset,
1009 frames[frame]->v_buffer + mb_uv_offset, frames[frame]->y_stride,
1010 mb_uv_width, mb_uv_height, ref_mv.row, ref_mv.col, predictor, scale,
1011 mb_col * BW, mb_row * BH, blk_mvs, use_32x32);
1012
1013 #if CONFIG_VP9_HIGHBITDEPTH
1014 if (mbd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
1015 int adj_strength = strength + 2 * (mbd->bd - 8);
1016 // Apply the filter (YUV)
1017 vp9_highbd_apply_temporal_filter(
1018 CONVERT_TO_SHORTPTR(f->y_buffer + mb_y_offset), f->y_stride,
1019 CONVERT_TO_SHORTPTR(predictor), BW,
1020 CONVERT_TO_SHORTPTR(f->u_buffer + mb_uv_offset),
1021 CONVERT_TO_SHORTPTR(f->v_buffer + mb_uv_offset), f->uv_stride,
1022 CONVERT_TO_SHORTPTR(predictor + BLK_PELS),
1023 CONVERT_TO_SHORTPTR(predictor + (BLK_PELS << 1)), mb_uv_width, BW,
1024 BH, mbd->plane[1].subsampling_x, mbd->plane[1].subsampling_y,
1025 adj_strength, blk_fw, use_32x32, accumulator, count,
1026 accumulator + BLK_PELS, count + BLK_PELS,
1027 accumulator + (BLK_PELS << 1), count + (BLK_PELS << 1));
1028 } else {
1029 // Apply the filter (YUV)
1030 vp9_apply_temporal_filter(
1031 f->y_buffer + mb_y_offset, f->y_stride, predictor, BW,
1032 f->u_buffer + mb_uv_offset, f->v_buffer + mb_uv_offset,
1033 f->uv_stride, predictor + BLK_PELS, predictor + (BLK_PELS << 1),
1034 mb_uv_width, BW, BH, mbd->plane[1].subsampling_x,
1035 mbd->plane[1].subsampling_y, strength, blk_fw, use_32x32,
1036 accumulator, count, accumulator + BLK_PELS, count + BLK_PELS,
1037 accumulator + (BLK_PELS << 1), count + (BLK_PELS << 1));
1038 }
1039 #else
1040 // Apply the filter (YUV)
1041 vp9_apply_temporal_filter(
1042 f->y_buffer + mb_y_offset, f->y_stride, predictor, BW,
1043 f->u_buffer + mb_uv_offset, f->v_buffer + mb_uv_offset,
1044 f->uv_stride, predictor + BLK_PELS, predictor + (BLK_PELS << 1),
1045 mb_uv_width, BW, BH, mbd->plane[1].subsampling_x,
1046 mbd->plane[1].subsampling_y, strength, blk_fw, use_32x32,
1047 accumulator, count, accumulator + BLK_PELS, count + BLK_PELS,
1048 accumulator + (BLK_PELS << 1), count + (BLK_PELS << 1));
1049 #endif // CONFIG_VP9_HIGHBITDEPTH
1050 }
1051 }
1052
1053 #if CONFIG_VP9_HIGHBITDEPTH
1054 if (mbd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
1055 uint16_t *dst1_16;
1056 uint16_t *dst2_16;
1057 // Normalize filter output to produce AltRef frame
1058 dst1 = dst->y_buffer;
1059 dst1_16 = CONVERT_TO_SHORTPTR(dst1);
1060 stride = dst->y_stride;
1061 byte = mb_y_offset;
1062 for (i = 0, k = 0; i < BH; i++) {
1063 for (j = 0; j < BW; j++, k++) {
1064 unsigned int pval = accumulator[k] + (count[k] >> 1);
1065 pval *= fixed_divide[count[k]];
1066 pval >>= 19;
1067
1068 dst1_16[byte] = (uint16_t)pval;
1069
1070 // move to next pixel
1071 byte++;
1072 }
1073
1074 byte += stride - BW;
1075 }
1076
1077 dst1 = dst->u_buffer;
1078 dst2 = dst->v_buffer;
1079 dst1_16 = CONVERT_TO_SHORTPTR(dst1);
1080 dst2_16 = CONVERT_TO_SHORTPTR(dst2);
1081 stride = dst->uv_stride;
1082 byte = mb_uv_offset;
1083 for (i = 0, k = BLK_PELS; i < mb_uv_height; i++) {
1084 for (j = 0; j < mb_uv_width; j++, k++) {
1085 int m = k + BLK_PELS;
1086
1087 // U
1088 unsigned int pval = accumulator[k] + (count[k] >> 1);
1089 pval *= fixed_divide[count[k]];
1090 pval >>= 19;
1091 dst1_16[byte] = (uint16_t)pval;
1092
1093 // V
1094 pval = accumulator[m] + (count[m] >> 1);
1095 pval *= fixed_divide[count[m]];
1096 pval >>= 19;
1097 dst2_16[byte] = (uint16_t)pval;
1098
1099 // move to next pixel
1100 byte++;
1101 }
1102
1103 byte += stride - mb_uv_width;
1104 }
1105 } else {
1106 // Normalize filter output to produce AltRef frame
1107 dst1 = dst->y_buffer;
1108 stride = dst->y_stride;
1109 byte = mb_y_offset;
1110 for (i = 0, k = 0; i < BH; i++) {
1111 for (j = 0; j < BW; j++, k++) {
1112 unsigned int pval = accumulator[k] + (count[k] >> 1);
1113 pval *= fixed_divide[count[k]];
1114 pval >>= 19;
1115
1116 dst1[byte] = (uint8_t)pval;
1117
1118 // move to next pixel
1119 byte++;
1120 }
1121 byte += stride - BW;
1122 }
1123
1124 dst1 = dst->u_buffer;
1125 dst2 = dst->v_buffer;
1126 stride = dst->uv_stride;
1127 byte = mb_uv_offset;
1128 for (i = 0, k = BLK_PELS; i < mb_uv_height; i++) {
1129 for (j = 0; j < mb_uv_width; j++, k++) {
1130 int m = k + BLK_PELS;
1131
1132 // U
1133 unsigned int pval = accumulator[k] + (count[k] >> 1);
1134 pval *= fixed_divide[count[k]];
1135 pval >>= 19;
1136 dst1[byte] = (uint8_t)pval;
1137
1138 // V
1139 pval = accumulator[m] + (count[m] >> 1);
1140 pval *= fixed_divide[count[m]];
1141 pval >>= 19;
1142 dst2[byte] = (uint8_t)pval;
1143
1144 // move to next pixel
1145 byte++;
1146 }
1147 byte += stride - mb_uv_width;
1148 }
1149 }
1150 #else
1151 // Normalize filter output to produce AltRef frame
1152 dst1 = dst->y_buffer;
1153 stride = dst->y_stride;
1154 byte = mb_y_offset;
1155 for (i = 0, k = 0; i < BH; i++) {
1156 for (j = 0; j < BW; j++, k++) {
1157 unsigned int pval = accumulator[k] + (count[k] >> 1);
1158 pval *= fixed_divide[count[k]];
1159 pval >>= 19;
1160
1161 dst1[byte] = (uint8_t)pval;
1162
1163 // move to next pixel
1164 byte++;
1165 }
1166 byte += stride - BW;
1167 }
1168
1169 dst1 = dst->u_buffer;
1170 dst2 = dst->v_buffer;
1171 stride = dst->uv_stride;
1172 byte = mb_uv_offset;
1173 for (i = 0, k = BLK_PELS; i < mb_uv_height; i++) {
1174 for (j = 0; j < mb_uv_width; j++, k++) {
1175 int m = k + BLK_PELS;
1176
1177 // U
1178 unsigned int pval = accumulator[k] + (count[k] >> 1);
1179 pval *= fixed_divide[count[k]];
1180 pval >>= 19;
1181 dst1[byte] = (uint8_t)pval;
1182
1183 // V
1184 pval = accumulator[m] + (count[m] >> 1);
1185 pval *= fixed_divide[count[m]];
1186 pval >>= 19;
1187 dst2[byte] = (uint8_t)pval;
1188
1189 // move to next pixel
1190 byte++;
1191 }
1192 byte += stride - mb_uv_width;
1193 }
1194 #endif // CONFIG_VP9_HIGHBITDEPTH
1195 mb_y_offset += BW;
1196 mb_uv_offset += mb_uv_width;
1197 }
1198 }
1199
temporal_filter_iterate_tile_c(VP9_COMP * cpi,int tile_row,int tile_col)1200 static void temporal_filter_iterate_tile_c(VP9_COMP *cpi, int tile_row,
1201 int tile_col) {
1202 VP9_COMMON *const cm = &cpi->common;
1203 const int tile_cols = 1 << cm->log2_tile_cols;
1204 TileInfo *tile_info =
1205 &cpi->tile_data[tile_row * tile_cols + tile_col].tile_info;
1206 const int mb_row_start = (tile_info->mi_row_start) >> TF_SHIFT;
1207 const int mb_row_end = (tile_info->mi_row_end + TF_ROUND) >> TF_SHIFT;
1208 const int mb_col_start = (tile_info->mi_col_start) >> TF_SHIFT;
1209 const int mb_col_end = (tile_info->mi_col_end + TF_ROUND) >> TF_SHIFT;
1210 int mb_row;
1211
1212 for (mb_row = mb_row_start; mb_row < mb_row_end; mb_row++) {
1213 vp9_temporal_filter_iterate_row_c(cpi, &cpi->td, mb_row, mb_col_start,
1214 mb_col_end);
1215 }
1216 }
1217
temporal_filter_iterate_c(VP9_COMP * cpi)1218 static void temporal_filter_iterate_c(VP9_COMP *cpi) {
1219 VP9_COMMON *const cm = &cpi->common;
1220 const int tile_cols = 1 << cm->log2_tile_cols;
1221 const int tile_rows = 1 << cm->log2_tile_rows;
1222 int tile_row, tile_col;
1223 vp9_init_tile_data(cpi);
1224
1225 for (tile_row = 0; tile_row < tile_rows; ++tile_row) {
1226 for (tile_col = 0; tile_col < tile_cols; ++tile_col) {
1227 temporal_filter_iterate_tile_c(cpi, tile_row, tile_col);
1228 }
1229 }
1230 }
1231
1232 // Apply buffer limits and context specific adjustments to arnr filter.
adjust_arnr_filter(VP9_COMP * cpi,int distance,int group_boost,int * arnr_frames,int * frames_backward,int * frames_forward,int * arnr_strength)1233 static void adjust_arnr_filter(VP9_COMP *cpi, int distance, int group_boost,
1234 int *arnr_frames, int *frames_backward,
1235 int *frames_forward, int *arnr_strength) {
1236 const VP9EncoderConfig *const oxcf = &cpi->oxcf;
1237
1238 int max_fwd =
1239 VPXMAX((int)vp9_lookahead_depth(cpi->lookahead) - distance - 1, 0);
1240 int max_bwd = VPXMAX(distance, 0);
1241 int frames = VPXMAX(oxcf->arnr_max_frames, 1);
1242 int q, base_strength, strength;
1243
1244 // Context dependent two pass adjustment to strength.
1245 if (oxcf->pass == 2) {
1246 base_strength = oxcf->arnr_strength + cpi->twopass.arnr_strength_adjustment;
1247 // Clip to allowed range.
1248 base_strength = VPXMIN(6, VPXMAX(0, base_strength));
1249 } else {
1250 base_strength = oxcf->arnr_strength;
1251 }
1252
1253 // Adjust the strength based on active max q.
1254 if (cpi->common.current_video_frame > 1)
1255 q = ((int)vp9_convert_qindex_to_q(cpi->rc.avg_frame_qindex[INTER_FRAME],
1256 cpi->common.bit_depth));
1257 else
1258 q = ((int)vp9_convert_qindex_to_q(cpi->rc.avg_frame_qindex[KEY_FRAME],
1259 cpi->common.bit_depth));
1260 if (q > 16) {
1261 strength = base_strength;
1262 } else {
1263 strength = base_strength - ((16 - q) / 2);
1264 if (strength < 0) strength = 0;
1265 }
1266
1267 // Adjust number of frames in filter and strength based on gf boost level.
1268 frames = VPXMIN(frames, group_boost / 150);
1269
1270 if (strength > group_boost / 300) {
1271 strength = group_boost / 300;
1272 }
1273
1274 if (VPXMIN(max_fwd, max_bwd) >= frames / 2) {
1275 // Handle the even/odd case.
1276 *frames_backward = frames / 2;
1277 *frames_forward = (frames - 1) / 2;
1278 } else {
1279 if (max_fwd < frames / 2) {
1280 *frames_forward = max_fwd;
1281 *frames_backward = VPXMIN(frames - 1 - *frames_forward, max_bwd);
1282 } else {
1283 *frames_backward = max_bwd;
1284 *frames_forward = VPXMIN(frames - 1 - *frames_backward, max_fwd);
1285 }
1286 }
1287
1288 // Set the baseline active filter size.
1289 frames = *frames_backward + 1 + *frames_forward;
1290
1291 // Adjustments for second level arf in multi arf case.
1292 // Leave commented out place holder for possible filtering adjustment with
1293 // new multi-layer arf code.
1294 // if (cpi->oxcf.pass == 2 && cpi->multi_arf_allowed)
1295 // if (gf_group->rf_level[gf_group->index] != GF_ARF_STD) strength >>= 1;
1296
1297 // TODO(jingning): Skip temporal filtering for intermediate frames that will
1298 // be used as show_existing_frame. Need to further explore the possibility to
1299 // apply certain filter.
1300 if (frames <= 1) {
1301 frames = 1;
1302 *frames_backward = 0;
1303 *frames_forward = 0;
1304 }
1305
1306 *arnr_frames = frames;
1307 *arnr_strength = strength;
1308 }
1309
vp9_temporal_filter(VP9_COMP * cpi,int distance)1310 void vp9_temporal_filter(VP9_COMP *cpi, int distance) {
1311 VP9_COMMON *const cm = &cpi->common;
1312 RATE_CONTROL *const rc = &cpi->rc;
1313 MACROBLOCKD *const xd = &cpi->td.mb.e_mbd;
1314 ARNRFilterData *arnr_filter_data = &cpi->arnr_filter_data;
1315 int frame;
1316 int frames_to_blur;
1317 int start_frame;
1318 int strength;
1319 int frames_to_blur_backward;
1320 int frames_to_blur_forward;
1321 struct scale_factors *sf = &arnr_filter_data->sf;
1322 YV12_BUFFER_CONFIG **frames = arnr_filter_data->frames;
1323 int rdmult;
1324
1325 // Apply context specific adjustments to the arnr filter parameters.
1326 adjust_arnr_filter(cpi, distance, rc->gfu_boost, &frames_to_blur,
1327 &frames_to_blur_backward, &frames_to_blur_forward,
1328 &strength);
1329 start_frame = distance + frames_to_blur_forward;
1330
1331 arnr_filter_data->strength = strength;
1332 arnr_filter_data->frame_count = frames_to_blur;
1333 arnr_filter_data->alt_ref_index = frames_to_blur_backward;
1334 arnr_filter_data->dst = &cpi->tf_buffer;
1335
1336 // Setup frame pointers, NULL indicates frame not included in filter.
1337 for (frame = 0; frame < frames_to_blur; ++frame) {
1338 const int which_buffer = start_frame - frame;
1339 struct lookahead_entry *buf =
1340 vp9_lookahead_peek(cpi->lookahead, which_buffer);
1341 frames[frames_to_blur - 1 - frame] = &buf->img;
1342 }
1343
1344 YV12_BUFFER_CONFIG *f = frames[arnr_filter_data->alt_ref_index];
1345 xd->cur_buf = f;
1346 xd->plane[1].subsampling_y = f->subsampling_y;
1347 xd->plane[1].subsampling_x = f->subsampling_x;
1348
1349 if (frames_to_blur > 0) {
1350 // Setup scaling factors. Scaling on each of the arnr frames is not
1351 // supported.
1352 if (cpi->use_svc) {
1353 // In spatial svc the scaling factors might be less then 1/2.
1354 // So we will use non-normative scaling.
1355 int frame_used = 0;
1356 #if CONFIG_VP9_HIGHBITDEPTH
1357 vp9_setup_scale_factors_for_frame(
1358 sf, get_frame_new_buffer(cm)->y_crop_width,
1359 get_frame_new_buffer(cm)->y_crop_height,
1360 get_frame_new_buffer(cm)->y_crop_width,
1361 get_frame_new_buffer(cm)->y_crop_height, cm->use_highbitdepth);
1362 #else
1363 vp9_setup_scale_factors_for_frame(
1364 sf, get_frame_new_buffer(cm)->y_crop_width,
1365 get_frame_new_buffer(cm)->y_crop_height,
1366 get_frame_new_buffer(cm)->y_crop_width,
1367 get_frame_new_buffer(cm)->y_crop_height);
1368 #endif // CONFIG_VP9_HIGHBITDEPTH
1369
1370 for (frame = 0; frame < frames_to_blur; ++frame) {
1371 if (cm->mi_cols * MI_SIZE != frames[frame]->y_width ||
1372 cm->mi_rows * MI_SIZE != frames[frame]->y_height) {
1373 if (vpx_realloc_frame_buffer(&cpi->svc.scaled_frames[frame_used],
1374 cm->width, cm->height, cm->subsampling_x,
1375 cm->subsampling_y,
1376 #if CONFIG_VP9_HIGHBITDEPTH
1377 cm->use_highbitdepth,
1378 #endif
1379 VP9_ENC_BORDER_IN_PIXELS,
1380 cm->byte_alignment, NULL, NULL, NULL)) {
1381 vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
1382 "Failed to reallocate alt_ref_buffer");
1383 }
1384 frames[frame] = vp9_scale_if_required(
1385 cm, frames[frame], &cpi->svc.scaled_frames[frame_used], 0,
1386 EIGHTTAP, 0);
1387 ++frame_used;
1388 }
1389 }
1390 cm->mi = cm->mip + cm->mi_stride + 1;
1391 xd->mi = cm->mi_grid_visible;
1392 xd->mi[0] = cm->mi;
1393 } else {
1394 // ARF is produced at the native frame size and resized when coded.
1395 #if CONFIG_VP9_HIGHBITDEPTH
1396 vp9_setup_scale_factors_for_frame(
1397 sf, frames[0]->y_crop_width, frames[0]->y_crop_height,
1398 frames[0]->y_crop_width, frames[0]->y_crop_height,
1399 cm->use_highbitdepth);
1400 #else
1401 vp9_setup_scale_factors_for_frame(
1402 sf, frames[0]->y_crop_width, frames[0]->y_crop_height,
1403 frames[0]->y_crop_width, frames[0]->y_crop_height);
1404 #endif // CONFIG_VP9_HIGHBITDEPTH
1405 }
1406 }
1407
1408 // Initialize errorperbit and sabperbit.
1409 rdmult = vp9_compute_rd_mult_based_on_qindex(cpi, ARNR_FILT_QINDEX);
1410 set_error_per_bit(&cpi->td.mb, rdmult);
1411 vp9_initialize_me_consts(cpi, &cpi->td.mb, ARNR_FILT_QINDEX);
1412
1413 if (!cpi->row_mt)
1414 temporal_filter_iterate_c(cpi);
1415 else
1416 vp9_temporal_filter_row_mt(cpi);
1417 }
1418