xref: /aosp_15_r20/external/libvpx/vp8/encoder/pickinter.c (revision fb1b10ab9aebc7c7068eedab379b749d7e3900be)
1 /*
2  *  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
3  *
4  *  Use of this source code is governed by a BSD-style license
5  *  that can be found in the LICENSE file in the root of the source
6  *  tree. An additional intellectual property rights grant can be found
7  *  in the file PATENTS.  All contributing project authors may
8  *  be found in the AUTHORS file in the root of the source tree.
9  */
10 
11 #include <assert.h>
12 #include <limits.h>
13 #include "vpx_config.h"
14 #include "./vpx_dsp_rtcd.h"
15 #include "onyx_int.h"
16 #include "modecosts.h"
17 #include "encodeintra.h"
18 #include "vp8/common/common.h"
19 #include "vp8/common/entropymode.h"
20 #include "pickinter.h"
21 #include "vp8/common/findnearmv.h"
22 #include "encodemb.h"
23 #include "vp8/common/reconinter.h"
24 #include "vp8/common/reconintra.h"
25 #include "vp8/common/reconintra4x4.h"
26 #include "vpx_dsp/variance.h"
27 #include "mcomp.h"
28 #include "vp8/common/vp8_skin_detection.h"
29 #include "rdopt.h"
30 #include "vpx_dsp/vpx_dsp_common.h"
31 #include "vpx_mem/vpx_mem.h"
32 #if CONFIG_TEMPORAL_DENOISING
33 #include "denoising.h"
34 #endif
35 
36 #ifdef SPEEDSTATS
37 extern unsigned int cnt_pm;
38 #endif
39 
40 extern const int vp8_ref_frame_order[MAX_MODES];
41 extern const MB_PREDICTION_MODE vp8_mode_order[MAX_MODES];
42 
macroblock_corner_grad(unsigned char * signal,int stride,int offsetx,int offsety,int sgnx,int sgny)43 static int macroblock_corner_grad(unsigned char *signal, int stride,
44                                   int offsetx, int offsety, int sgnx,
45                                   int sgny) {
46   int y1 = signal[offsetx * stride + offsety];
47   int y2 = signal[offsetx * stride + offsety + sgny];
48   int y3 = signal[(offsetx + sgnx) * stride + offsety];
49   int y4 = signal[(offsetx + sgnx) * stride + offsety + sgny];
50   return VPXMAX(VPXMAX(abs(y1 - y2), abs(y1 - y3)), abs(y1 - y4));
51 }
52 
check_dot_artifact_candidate(VP8_COMP * cpi,MACROBLOCK * x,unsigned char * target_last,int stride,unsigned char * last_ref,int mb_row,int mb_col,int channel)53 static int check_dot_artifact_candidate(VP8_COMP *cpi, MACROBLOCK *x,
54                                         unsigned char *target_last, int stride,
55                                         unsigned char *last_ref, int mb_row,
56                                         int mb_col, int channel) {
57   int threshold1 = 6;
58   int threshold2 = 3;
59   unsigned int max_num = (cpi->common.MBs) / 10;
60   int grad_last = 0;
61   int grad_source = 0;
62   int index = mb_row * cpi->common.mb_cols + mb_col;
63   // Threshold for #consecutive (base layer) frames using zero_last mode.
64   int num_frames = 30;
65   int shift = 15;
66   if (channel > 0) {
67     shift = 7;
68   }
69   if (cpi->oxcf.number_of_layers > 1) {
70     num_frames = 20;
71   }
72   x->zero_last_dot_suppress = 0;
73   // Blocks on base layer frames that have been using ZEROMV_LAST repeatedly
74   // (i.e, at least |x| consecutive frames are candidates for increasing the
75   // rd adjustment for zero_last mode.
76   // Only allow this for at most |max_num| blocks per frame.
77   // Don't allow this for screen content input.
78   if (cpi->current_layer == 0 &&
79       cpi->consec_zero_last_mvbias[index] > num_frames &&
80       x->mbs_zero_last_dot_suppress < max_num &&
81       !cpi->oxcf.screen_content_mode) {
82     // If this block is checked here, label it so we don't check it again until
83     // ~|x| framaes later.
84     x->zero_last_dot_suppress = 1;
85     // Dot artifact is noticeable as strong gradient at corners of macroblock,
86     // for flat areas. As a simple detector for now, we look for a high
87     // corner gradient on last ref, and a smaller gradient on source.
88     // Check 4 corners, return if any satisfy condition.
89     // Top-left:
90     grad_last = macroblock_corner_grad(last_ref, stride, 0, 0, 1, 1);
91     grad_source = macroblock_corner_grad(target_last, stride, 0, 0, 1, 1);
92     if (grad_last >= threshold1 && grad_source <= threshold2) {
93       x->mbs_zero_last_dot_suppress++;
94       return 1;
95     }
96     // Top-right:
97     grad_last = macroblock_corner_grad(last_ref, stride, 0, shift, 1, -1);
98     grad_source = macroblock_corner_grad(target_last, stride, 0, shift, 1, -1);
99     if (grad_last >= threshold1 && grad_source <= threshold2) {
100       x->mbs_zero_last_dot_suppress++;
101       return 1;
102     }
103     // Bottom-left:
104     grad_last = macroblock_corner_grad(last_ref, stride, shift, 0, -1, 1);
105     grad_source = macroblock_corner_grad(target_last, stride, shift, 0, -1, 1);
106     if (grad_last >= threshold1 && grad_source <= threshold2) {
107       x->mbs_zero_last_dot_suppress++;
108       return 1;
109     }
110     // Bottom-right:
111     grad_last = macroblock_corner_grad(last_ref, stride, shift, shift, -1, -1);
112     grad_source =
113         macroblock_corner_grad(target_last, stride, shift, shift, -1, -1);
114     if (grad_last >= threshold1 && grad_source <= threshold2) {
115       x->mbs_zero_last_dot_suppress++;
116       return 1;
117     }
118     return 0;
119   }
120   return 0;
121 }
122 
vp8_skip_fractional_mv_step(MACROBLOCK * mb,BLOCK * b,BLOCKD * d,int_mv * bestmv,int_mv * ref_mv,int error_per_bit,const vp8_variance_fn_ptr_t * vfp,int * mvcost[2],int * distortion,unsigned int * sse)123 int vp8_skip_fractional_mv_step(MACROBLOCK *mb, BLOCK *b, BLOCKD *d,
124                                 int_mv *bestmv, int_mv *ref_mv,
125                                 int error_per_bit,
126                                 const vp8_variance_fn_ptr_t *vfp,
127                                 int *mvcost[2], int *distortion,
128                                 unsigned int *sse) {
129   (void)b;
130   (void)d;
131   (void)ref_mv;
132   (void)error_per_bit;
133   (void)vfp;
134   (void)mb;
135   (void)mvcost;
136   (void)distortion;
137   (void)sse;
138   bestmv->as_mv.row = clamp(bestmv->as_mv.row * 8, SHRT_MIN, SHRT_MAX);
139   bestmv->as_mv.col = clamp(bestmv->as_mv.col * 8, SHRT_MIN, SHRT_MAX);
140   return 0;
141 }
142 
vp8_get_inter_mbpred_error(MACROBLOCK * mb,const vp8_variance_fn_ptr_t * vfp,unsigned int * sse,int_mv this_mv)143 int vp8_get_inter_mbpred_error(MACROBLOCK *mb, const vp8_variance_fn_ptr_t *vfp,
144                                unsigned int *sse, int_mv this_mv) {
145   BLOCK *b = &mb->block[0];
146   BLOCKD *d = &mb->e_mbd.block[0];
147   unsigned char *what = (*(b->base_src) + b->src);
148   int what_stride = b->src_stride;
149   int pre_stride = mb->e_mbd.pre.y_stride;
150   unsigned char *in_what = mb->e_mbd.pre.y_buffer + d->offset;
151   int in_what_stride = pre_stride;
152   int xoffset = this_mv.as_mv.col & 7;
153   int yoffset = this_mv.as_mv.row & 7;
154 
155   in_what += (this_mv.as_mv.row >> 3) * pre_stride + (this_mv.as_mv.col >> 3);
156 
157   if (xoffset | yoffset) {
158     return vfp->svf(in_what, in_what_stride, xoffset, yoffset, what,
159                     what_stride, sse);
160   } else {
161     return vfp->vf(what, what_stride, in_what, in_what_stride, sse);
162   }
163 }
164 
get_prediction_error(BLOCK * be,BLOCKD * b)165 static int get_prediction_error(BLOCK *be, BLOCKD *b) {
166   unsigned char *sptr;
167   unsigned char *dptr;
168   sptr = (*(be->base_src) + be->src);
169   dptr = b->predictor;
170 
171   return vpx_get4x4sse_cs(sptr, be->src_stride, dptr, 16);
172 }
173 
pick_intra4x4block(MACROBLOCK * x,int ib,B_PREDICTION_MODE * best_mode,const int * mode_costs,int * bestrate,int * bestdistortion)174 static int pick_intra4x4block(MACROBLOCK *x, int ib,
175                               B_PREDICTION_MODE *best_mode,
176                               const int *mode_costs, int *bestrate,
177                               int *bestdistortion) {
178   BLOCKD *b = &x->e_mbd.block[ib];
179   BLOCK *be = &x->block[ib];
180   int dst_stride = x->e_mbd.dst.y_stride;
181   unsigned char *dst = x->e_mbd.dst.y_buffer + b->offset;
182   B_PREDICTION_MODE mode;
183   int best_rd = INT_MAX;
184   int rate;
185   int distortion;
186 
187   unsigned char *Above = dst - dst_stride;
188   unsigned char *yleft = dst - 1;
189   unsigned char top_left = Above[-1];
190 
191   for (mode = B_DC_PRED; mode <= B_HE_PRED; ++mode) {
192     int this_rd;
193 
194     rate = mode_costs[mode];
195 
196     vp8_intra4x4_predict(Above, yleft, dst_stride, mode, b->predictor, 16,
197                          top_left);
198     distortion = get_prediction_error(be, b);
199     this_rd = RDCOST(x->rdmult, x->rddiv, rate, distortion);
200 
201     if (this_rd < best_rd) {
202       *bestrate = rate;
203       *bestdistortion = distortion;
204       best_rd = this_rd;
205       *best_mode = mode;
206     }
207   }
208 
209   b->bmi.as_mode = *best_mode;
210   vp8_encode_intra4x4block(x, ib);
211   return best_rd;
212 }
213 
pick_intra4x4mby_modes(MACROBLOCK * mb,int * Rate,int * best_dist)214 static int pick_intra4x4mby_modes(MACROBLOCK *mb, int *Rate, int *best_dist) {
215   MACROBLOCKD *const xd = &mb->e_mbd;
216   int i;
217   int cost = mb->mbmode_cost[xd->frame_type][B_PRED];
218   int error;
219   int distortion = 0;
220   const int *bmode_costs;
221 
222   intra_prediction_down_copy(xd, xd->dst.y_buffer - xd->dst.y_stride + 16);
223 
224   bmode_costs = mb->inter_bmode_costs;
225 
226   for (i = 0; i < 16; ++i) {
227     MODE_INFO *const mic = xd->mode_info_context;
228     const int mis = xd->mode_info_stride;
229 
230     B_PREDICTION_MODE best_mode = B_MODE_COUNT;
231     int r = 0, d = 0;
232 
233     if (mb->e_mbd.frame_type == KEY_FRAME) {
234       const B_PREDICTION_MODE A = above_block_mode(mic, i, mis);
235       const B_PREDICTION_MODE L = left_block_mode(mic, i);
236 
237       bmode_costs = mb->bmode_costs[A][L];
238     }
239 
240     pick_intra4x4block(mb, i, &best_mode, bmode_costs, &r, &d);
241 
242     cost += r;
243     distortion += d;
244     assert(best_mode != B_MODE_COUNT);
245     mic->bmi[i].as_mode = best_mode;
246 
247     /* Break out case where we have already exceeded best so far value
248      * that was passed in
249      */
250     if (distortion > *best_dist) break;
251   }
252 
253   *Rate = cost;
254 
255   if (i == 16) {
256     *best_dist = distortion;
257     error = RDCOST(mb->rdmult, mb->rddiv, cost, distortion);
258   } else {
259     *best_dist = INT_MAX;
260     error = INT_MAX;
261   }
262 
263   return error;
264 }
265 
pick_intra_mbuv_mode(MACROBLOCK * mb)266 static void pick_intra_mbuv_mode(MACROBLOCK *mb) {
267   MACROBLOCKD *x = &mb->e_mbd;
268   unsigned char *uabove_row = x->dst.u_buffer - x->dst.uv_stride;
269   unsigned char *vabove_row = x->dst.v_buffer - x->dst.uv_stride;
270   unsigned char *usrc_ptr = (mb->block[16].src + *mb->block[16].base_src);
271   unsigned char *vsrc_ptr = (mb->block[20].src + *mb->block[20].base_src);
272   int uvsrc_stride = mb->block[16].src_stride;
273   unsigned char uleft_col[8];
274   unsigned char vleft_col[8];
275   unsigned char utop_left = uabove_row[-1];
276   unsigned char vtop_left = vabove_row[-1];
277   int i, j;
278   int expected_udc;
279   int expected_vdc;
280   int shift;
281   int Uaverage = 0;
282   int Vaverage = 0;
283   int diff;
284   int pred_error[4] = { 0, 0, 0, 0 }, best_error = INT_MAX;
285   MB_PREDICTION_MODE best_mode = MB_MODE_COUNT;
286 
287   for (i = 0; i < 8; ++i) {
288     uleft_col[i] = x->dst.u_buffer[i * x->dst.uv_stride - 1];
289     vleft_col[i] = x->dst.v_buffer[i * x->dst.uv_stride - 1];
290   }
291 
292   if (!x->up_available && !x->left_available) {
293     expected_udc = 128;
294     expected_vdc = 128;
295   } else {
296     shift = 2;
297 
298     if (x->up_available) {
299       for (i = 0; i < 8; ++i) {
300         Uaverage += uabove_row[i];
301         Vaverage += vabove_row[i];
302       }
303 
304       shift++;
305     }
306 
307     if (x->left_available) {
308       for (i = 0; i < 8; ++i) {
309         Uaverage += uleft_col[i];
310         Vaverage += vleft_col[i];
311       }
312 
313       shift++;
314     }
315 
316     expected_udc = (Uaverage + (1 << (shift - 1))) >> shift;
317     expected_vdc = (Vaverage + (1 << (shift - 1))) >> shift;
318   }
319 
320   for (i = 0; i < 8; ++i) {
321     for (j = 0; j < 8; ++j) {
322       int predu = uleft_col[i] + uabove_row[j] - utop_left;
323       int predv = vleft_col[i] + vabove_row[j] - vtop_left;
324       int u_p, v_p;
325 
326       u_p = usrc_ptr[j];
327       v_p = vsrc_ptr[j];
328 
329       if (predu < 0) predu = 0;
330 
331       if (predu > 255) predu = 255;
332 
333       if (predv < 0) predv = 0;
334 
335       if (predv > 255) predv = 255;
336 
337       diff = u_p - expected_udc;
338       pred_error[DC_PRED] += diff * diff;
339       diff = v_p - expected_vdc;
340       pred_error[DC_PRED] += diff * diff;
341 
342       diff = u_p - uabove_row[j];
343       pred_error[V_PRED] += diff * diff;
344       diff = v_p - vabove_row[j];
345       pred_error[V_PRED] += diff * diff;
346 
347       diff = u_p - uleft_col[i];
348       pred_error[H_PRED] += diff * diff;
349       diff = v_p - vleft_col[i];
350       pred_error[H_PRED] += diff * diff;
351 
352       diff = u_p - predu;
353       pred_error[TM_PRED] += diff * diff;
354       diff = v_p - predv;
355       pred_error[TM_PRED] += diff * diff;
356     }
357 
358     usrc_ptr += uvsrc_stride;
359     vsrc_ptr += uvsrc_stride;
360 
361     if (i == 3) {
362       usrc_ptr = (mb->block[18].src + *mb->block[18].base_src);
363       vsrc_ptr = (mb->block[22].src + *mb->block[22].base_src);
364     }
365   }
366 
367   for (i = DC_PRED; i <= TM_PRED; ++i) {
368     if (best_error > pred_error[i]) {
369       best_error = pred_error[i];
370       best_mode = (MB_PREDICTION_MODE)i;
371     }
372   }
373 
374   assert(best_mode != MB_MODE_COUNT);
375   mb->e_mbd.mode_info_context->mbmi.uv_mode = best_mode;
376 }
377 
update_mvcount(MACROBLOCK * x,int_mv * best_ref_mv)378 static void update_mvcount(MACROBLOCK *x, int_mv *best_ref_mv) {
379   MACROBLOCKD *xd = &x->e_mbd;
380   /* Split MV modes currently not supported when RD is nopt enabled,
381    * therefore, only need to modify MVcount in NEWMV mode. */
382   if (xd->mode_info_context->mbmi.mode == NEWMV) {
383     const int row_val =
384         ((xd->mode_info_context->mbmi.mv.as_mv.row - best_ref_mv->as_mv.row) >>
385          1);
386     const int row_idx = mv_max + row_val;
387     const int col_val =
388         ((xd->mode_info_context->mbmi.mv.as_mv.col - best_ref_mv->as_mv.col) >>
389          1);
390     const int col_idx = mv_max + col_val;
391     if (row_idx >= 0 && row_idx < MVvals && col_idx >= 0 && col_idx < MVvals) {
392       x->MVcount[0][row_idx]++;
393       x->MVcount[1][col_idx]++;
394     }
395   }
396 }
397 
398 #if CONFIG_MULTI_RES_ENCODING
get_lower_res_motion_info(VP8_COMP * cpi,MACROBLOCKD * xd,int * dissim,int * parent_ref_frame,MB_PREDICTION_MODE * parent_mode,int_mv * parent_ref_mv,int mb_row,int mb_col)399 static void get_lower_res_motion_info(VP8_COMP *cpi, MACROBLOCKD *xd,
400                                       int *dissim, int *parent_ref_frame,
401                                       MB_PREDICTION_MODE *parent_mode,
402                                       int_mv *parent_ref_mv, int mb_row,
403                                       int mb_col) {
404   LOWER_RES_MB_INFO *store_mode_info =
405       ((LOWER_RES_FRAME_INFO *)cpi->oxcf.mr_low_res_mode_info)->mb_info;
406   unsigned int parent_mb_index;
407 
408   /* Consider different down_sampling_factor.  */
409   {
410     /* TODO: Removed the loop that supports special down_sampling_factor
411      * such as 2, 4, 8. Will revisit it if needed.
412      * Should also try using a look-up table to see if it helps
413      * performance. */
414     int parent_mb_row, parent_mb_col;
415 
416     parent_mb_row = mb_row * cpi->oxcf.mr_down_sampling_factor.den /
417                     cpi->oxcf.mr_down_sampling_factor.num;
418     parent_mb_col = mb_col * cpi->oxcf.mr_down_sampling_factor.den /
419                     cpi->oxcf.mr_down_sampling_factor.num;
420     parent_mb_index = parent_mb_row * cpi->mr_low_res_mb_cols + parent_mb_col;
421   }
422 
423   /* Read lower-resolution mode & motion result from memory.*/
424   *parent_ref_frame = store_mode_info[parent_mb_index].ref_frame;
425   *parent_mode = store_mode_info[parent_mb_index].mode;
426   *dissim = store_mode_info[parent_mb_index].dissim;
427 
428   /* For highest-resolution encoder, adjust dissim value. Lower its quality
429    * for good performance. */
430   if (cpi->oxcf.mr_encoder_id == (cpi->oxcf.mr_total_resolutions - 1))
431     *dissim >>= 1;
432 
433   if (*parent_ref_frame != INTRA_FRAME) {
434     /* Consider different down_sampling_factor.
435      * The result can be rounded to be more precise, but it takes more time.
436      */
437     (*parent_ref_mv).as_mv.row = store_mode_info[parent_mb_index].mv.as_mv.row *
438                                  cpi->oxcf.mr_down_sampling_factor.num /
439                                  cpi->oxcf.mr_down_sampling_factor.den;
440     (*parent_ref_mv).as_mv.col = store_mode_info[parent_mb_index].mv.as_mv.col *
441                                  cpi->oxcf.mr_down_sampling_factor.num /
442                                  cpi->oxcf.mr_down_sampling_factor.den;
443 
444     vp8_clamp_mv2(parent_ref_mv, xd);
445   }
446 }
447 #endif
448 
check_for_encode_breakout(unsigned int sse,MACROBLOCK * x)449 static void check_for_encode_breakout(unsigned int sse, MACROBLOCK *x) {
450   MACROBLOCKD *xd = &x->e_mbd;
451 
452   unsigned int threshold =
453       (xd->block[0].dequant[1] * xd->block[0].dequant[1] >> 4);
454 
455   if (threshold < x->encode_breakout) threshold = x->encode_breakout;
456 
457   if (sse < threshold) {
458     /* Check u and v to make sure skip is ok */
459     unsigned int sse2 = 0;
460 
461     sse2 = VP8_UVSSE(x);
462 
463     if (sse2 * 2 < x->encode_breakout) {
464       x->skip = 1;
465     } else {
466       x->skip = 0;
467     }
468   }
469 }
470 
evaluate_inter_mode(unsigned int * sse,int rate2,int * distortion2,VP8_COMP * cpi,MACROBLOCK * x,int rd_adj)471 static int evaluate_inter_mode(unsigned int *sse, int rate2, int *distortion2,
472                                VP8_COMP *cpi, MACROBLOCK *x, int rd_adj) {
473   MB_PREDICTION_MODE this_mode = x->e_mbd.mode_info_context->mbmi.mode;
474   int_mv mv = x->e_mbd.mode_info_context->mbmi.mv;
475   int this_rd;
476   int denoise_aggressive = 0;
477   /* Exit early and don't compute the distortion if this macroblock
478    * is marked inactive. */
479   if (cpi->active_map_enabled && x->active_ptr[0] == 0) {
480     *sse = 0;
481     *distortion2 = 0;
482     x->skip = 1;
483     return INT_MAX;
484   }
485 
486   if ((this_mode != NEWMV) || !(cpi->sf.half_pixel_search) ||
487       cpi->common.full_pixel == 1) {
488     *distortion2 =
489         vp8_get_inter_mbpred_error(x, &cpi->fn_ptr[BLOCK_16X16], sse, mv);
490   }
491 
492   this_rd = RDCOST(x->rdmult, x->rddiv, rate2, *distortion2);
493 
494 #if CONFIG_TEMPORAL_DENOISING
495   if (cpi->oxcf.noise_sensitivity > 0) {
496     denoise_aggressive =
497         (cpi->denoiser.denoiser_mode == kDenoiserOnYUVAggressive) ? 1 : 0;
498   }
499 #endif
500 
501   // Adjust rd for ZEROMV and LAST, if LAST is the closest reference frame.
502   // TODO: We should also add condition on distance of closest to current.
503   if (!cpi->oxcf.screen_content_mode && this_mode == ZEROMV &&
504       x->e_mbd.mode_info_context->mbmi.ref_frame == LAST_FRAME &&
505       (denoise_aggressive || (cpi->closest_reference_frame == LAST_FRAME))) {
506     // No adjustment if block is considered to be skin area.
507     if (x->is_skin) rd_adj = 100;
508 
509     this_rd = (int)(((int64_t)this_rd) * rd_adj / 100);
510   }
511 
512   check_for_encode_breakout(*sse, x);
513   return this_rd;
514 }
515 
calculate_zeromv_rd_adjustment(VP8_COMP * cpi,MACROBLOCK * x,int * rd_adjustment)516 static void calculate_zeromv_rd_adjustment(VP8_COMP *cpi, MACROBLOCK *x,
517                                            int *rd_adjustment) {
518   MODE_INFO *mic = x->e_mbd.mode_info_context;
519   int_mv mv_l, mv_a, mv_al;
520   int local_motion_check = 0;
521 
522   if (cpi->lf_zeromv_pct > 40) {
523     /* left mb */
524     mic -= 1;
525     mv_l = mic->mbmi.mv;
526 
527     if (mic->mbmi.ref_frame != INTRA_FRAME) {
528       if (abs(mv_l.as_mv.row) < 8 && abs(mv_l.as_mv.col) < 8) {
529         local_motion_check++;
530       }
531     }
532 
533     /* above-left mb */
534     mic -= x->e_mbd.mode_info_stride;
535     mv_al = mic->mbmi.mv;
536 
537     if (mic->mbmi.ref_frame != INTRA_FRAME) {
538       if (abs(mv_al.as_mv.row) < 8 && abs(mv_al.as_mv.col) < 8) {
539         local_motion_check++;
540       }
541     }
542 
543     /* above mb */
544     mic += 1;
545     mv_a = mic->mbmi.mv;
546 
547     if (mic->mbmi.ref_frame != INTRA_FRAME) {
548       if (abs(mv_a.as_mv.row) < 8 && abs(mv_a.as_mv.col) < 8) {
549         local_motion_check++;
550       }
551     }
552 
553     if (((!x->e_mbd.mb_to_top_edge || !x->e_mbd.mb_to_left_edge) &&
554          local_motion_check > 0) ||
555         local_motion_check > 2) {
556       *rd_adjustment = 80;
557     } else if (local_motion_check > 0) {
558       *rd_adjustment = 90;
559     }
560   }
561 }
562 
vp8_pick_inter_mode(VP8_COMP * cpi,MACROBLOCK * x,int recon_yoffset,int recon_uvoffset,int * returnrate,int * returndistortion,int * returnintra,int mb_row,int mb_col)563 void vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
564                          int recon_uvoffset, int *returnrate,
565                          int *returndistortion, int *returnintra, int mb_row,
566                          int mb_col) {
567   BLOCK *b = &x->block[0];
568   BLOCKD *d = &x->e_mbd.block[0];
569   MACROBLOCKD *xd = &x->e_mbd;
570   MB_MODE_INFO best_mbmode;
571 
572   int_mv best_ref_mv_sb[2] = { { 0 }, { 0 } };
573   int_mv mode_mv_sb[2][MB_MODE_COUNT];
574   int_mv best_ref_mv;
575   int_mv *mode_mv;
576   MB_PREDICTION_MODE this_mode;
577   int num00;
578   int mdcounts[4];
579   int best_rd = INT_MAX;
580   int rd_adjustment = 100;
581   int best_intra_rd = INT_MAX;
582   int mode_index;
583   int rate;
584   int rate2;
585   int distortion2;
586   int bestsme = INT_MAX;
587   int best_mode_index = 0;
588   unsigned int sse = UINT_MAX, best_rd_sse = UINT_MAX;
589 #if CONFIG_TEMPORAL_DENOISING
590   unsigned int zero_mv_sse = UINT_MAX, best_sse = UINT_MAX;
591 #endif
592 
593   int sf_improved_mv_pred = cpi->sf.improved_mv_pred;
594 
595 #if CONFIG_MULTI_RES_ENCODING
596   int dissim = INT_MAX;
597   int parent_ref_frame = 0;
598   int_mv parent_ref_mv;
599   MB_PREDICTION_MODE parent_mode = 0;
600   int parent_ref_valid = 0;
601 #endif
602 
603   int_mv mvp;
604 
605   int near_sadidx[8] = { 0, 1, 2, 3, 4, 5, 6, 7 };
606   int saddone = 0;
607   /* search range got from mv_pred(). It uses step_param levels. (0-7) */
608   int sr = 0;
609 
610   unsigned char *plane[4][3] = { { 0, 0 } };
611   int ref_frame_map[4];
612   int sign_bias = 0;
613   int dot_artifact_candidate = 0;
614   get_predictor_pointers(cpi, plane, recon_yoffset, recon_uvoffset);
615 
616   // If the current frame is using LAST as a reference, check for
617   // biasing the mode selection for dot artifacts.
618   if (cpi->ref_frame_flags & VP8_LAST_FRAME) {
619     unsigned char *target_y = x->src.y_buffer;
620     unsigned char *target_u = x->block[16].src + *x->block[16].base_src;
621     unsigned char *target_v = x->block[20].src + *x->block[20].base_src;
622     int stride = x->src.y_stride;
623     int stride_uv = x->block[16].src_stride;
624 #if CONFIG_TEMPORAL_DENOISING
625     if (cpi->oxcf.noise_sensitivity) {
626       const int uv_denoise = (cpi->oxcf.noise_sensitivity >= 2) ? 1 : 0;
627       target_y =
628           cpi->denoiser.yv12_running_avg[LAST_FRAME].y_buffer + recon_yoffset;
629       stride = cpi->denoiser.yv12_running_avg[LAST_FRAME].y_stride;
630       if (uv_denoise) {
631         target_u = cpi->denoiser.yv12_running_avg[LAST_FRAME].u_buffer +
632                    recon_uvoffset;
633         target_v = cpi->denoiser.yv12_running_avg[LAST_FRAME].v_buffer +
634                    recon_uvoffset;
635         stride_uv = cpi->denoiser.yv12_running_avg[LAST_FRAME].uv_stride;
636       }
637     }
638 #endif
639     assert(plane[LAST_FRAME][0] != NULL);
640     dot_artifact_candidate = check_dot_artifact_candidate(
641         cpi, x, target_y, stride, plane[LAST_FRAME][0], mb_row, mb_col, 0);
642     // If not found in Y channel, check UV channel.
643     if (!dot_artifact_candidate) {
644       assert(plane[LAST_FRAME][1] != NULL);
645       dot_artifact_candidate = check_dot_artifact_candidate(
646           cpi, x, target_u, stride_uv, plane[LAST_FRAME][1], mb_row, mb_col, 1);
647       if (!dot_artifact_candidate) {
648         assert(plane[LAST_FRAME][2] != NULL);
649         dot_artifact_candidate = check_dot_artifact_candidate(
650             cpi, x, target_v, stride_uv, plane[LAST_FRAME][2], mb_row, mb_col,
651             2);
652       }
653     }
654   }
655 
656 #if CONFIG_MULTI_RES_ENCODING
657   // |parent_ref_valid| will be set here if potentially we can do mv resue for
658   // this higher resol (|cpi->oxcf.mr_encoder_id| > 0) frame.
659   // |parent_ref_valid| may be reset depending on |parent_ref_frame| for
660   // the current macroblock below.
661   parent_ref_valid = cpi->oxcf.mr_encoder_id && cpi->mr_low_res_mv_avail;
662   if (parent_ref_valid) {
663     int parent_ref_flag;
664 
665     get_lower_res_motion_info(cpi, xd, &dissim, &parent_ref_frame, &parent_mode,
666                               &parent_ref_mv, mb_row, mb_col);
667 
668     /* TODO(jkoleszar): The references available (ref_frame_flags) to the
669      * lower res encoder should match those available to this encoder, but
670      * there seems to be a situation where this mismatch can happen in the
671      * case of frame dropping and temporal layers. For example,
672      * GOLD being disallowed in ref_frame_flags, but being returned as
673      * parent_ref_frame.
674      *
675      * In this event, take the conservative approach of disabling the
676      * lower res info for this MB.
677      */
678 
679     parent_ref_flag = 0;
680     // Note availability for mv reuse is only based on last and golden.
681     if (parent_ref_frame == LAST_FRAME)
682       parent_ref_flag = (cpi->ref_frame_flags & VP8_LAST_FRAME);
683     else if (parent_ref_frame == GOLDEN_FRAME)
684       parent_ref_flag = (cpi->ref_frame_flags & VP8_GOLD_FRAME);
685 
686     // assert(!parent_ref_frame || parent_ref_flag);
687 
688     // If |parent_ref_frame| did not match either last or golden then
689     // shut off mv reuse.
690     if (parent_ref_frame && !parent_ref_flag) parent_ref_valid = 0;
691 
692     // Don't do mv reuse since we want to allow for another mode besides
693     // ZEROMV_LAST to remove dot artifact.
694     if (dot_artifact_candidate) parent_ref_valid = 0;
695   }
696 #endif
697 
698   // Check if current macroblock is in skin area.
699   x->is_skin = 0;
700   if (!cpi->oxcf.screen_content_mode) {
701     int block_index = mb_row * cpi->common.mb_cols + mb_col;
702     x->is_skin = cpi->skin_map[block_index];
703   }
704 #if CONFIG_TEMPORAL_DENOISING
705   if (cpi->oxcf.noise_sensitivity) {
706     // Under aggressive denoising mode, should we use skin map to reduce
707     // denoiser
708     // and ZEROMV bias? Will need to revisit the accuracy of this detection for
709     // very noisy input. For now keep this as is (i.e., don't turn it off).
710     // if (cpi->denoiser.denoiser_mode == kDenoiserOnYUVAggressive)
711     //   x->is_skin = 0;
712   }
713 #endif
714 
715   mode_mv = mode_mv_sb[sign_bias];
716   best_ref_mv.as_int = 0;
717   memset(mode_mv_sb, 0, sizeof(mode_mv_sb));
718   memset(&best_mbmode, 0, sizeof(best_mbmode));
719 
720 /* Setup search priorities */
721 #if CONFIG_MULTI_RES_ENCODING
722   if (parent_ref_valid && parent_ref_frame && dissim < 8) {
723     ref_frame_map[0] = -1;
724     ref_frame_map[1] = parent_ref_frame;
725     ref_frame_map[2] = -1;
726     ref_frame_map[3] = -1;
727   } else
728 #endif
729     get_reference_search_order(cpi, ref_frame_map);
730 
731   /* Check to see if there is at least 1 valid reference frame that we need
732    * to calculate near_mvs.
733    */
734   if (ref_frame_map[1] > 0) {
735     sign_bias = vp8_find_near_mvs_bias(
736         &x->e_mbd, x->e_mbd.mode_info_context, mode_mv_sb, best_ref_mv_sb,
737         mdcounts, ref_frame_map[1], cpi->common.ref_frame_sign_bias);
738 
739     mode_mv = mode_mv_sb[sign_bias];
740     best_ref_mv.as_int = best_ref_mv_sb[sign_bias].as_int;
741   }
742 
743   /* Count of the number of MBs tested so far this frame */
744   x->mbs_tested_so_far++;
745 
746   *returnintra = INT_MAX;
747   x->skip = 0;
748 
749   x->e_mbd.mode_info_context->mbmi.ref_frame = INTRA_FRAME;
750 
751   /* If the frame has big static background and current MB is in low
752    *  motion area, its mode decision is biased to ZEROMV mode.
753    *  No adjustment if cpu_used is <= -12 (i.e., cpi->Speed >= 12).
754    *  At such speed settings, ZEROMV is already heavily favored.
755    */
756   if (cpi->Speed < 12) {
757     calculate_zeromv_rd_adjustment(cpi, x, &rd_adjustment);
758   }
759 
760 #if CONFIG_TEMPORAL_DENOISING
761   if (cpi->oxcf.noise_sensitivity) {
762     rd_adjustment = (int)(rd_adjustment *
763                           cpi->denoiser.denoise_pars.pickmode_mv_bias / 100);
764   }
765 #endif
766 
767   if (dot_artifact_candidate) {
768     // Bias against ZEROMV_LAST mode.
769     rd_adjustment = 150;
770   }
771 
772   /* if we encode a new mv this is important
773    * find the best new motion vector
774    */
775   for (mode_index = 0; mode_index < MAX_MODES; ++mode_index) {
776     int frame_cost;
777     int this_rd = INT_MAX;
778     int this_ref_frame = ref_frame_map[vp8_ref_frame_order[mode_index]];
779 
780     if (best_rd <= x->rd_threshes[mode_index]) continue;
781 
782     if (this_ref_frame < 0) continue;
783 
784     x->e_mbd.mode_info_context->mbmi.ref_frame = this_ref_frame;
785 
786     /* everything but intra */
787     if (x->e_mbd.mode_info_context->mbmi.ref_frame) {
788       x->e_mbd.pre.y_buffer = plane[this_ref_frame][0];
789       x->e_mbd.pre.u_buffer = plane[this_ref_frame][1];
790       x->e_mbd.pre.v_buffer = plane[this_ref_frame][2];
791 
792       if (sign_bias != cpi->common.ref_frame_sign_bias[this_ref_frame]) {
793         sign_bias = cpi->common.ref_frame_sign_bias[this_ref_frame];
794         mode_mv = mode_mv_sb[sign_bias];
795         best_ref_mv.as_int = best_ref_mv_sb[sign_bias].as_int;
796       }
797 
798 #if CONFIG_MULTI_RES_ENCODING
799       if (parent_ref_valid) {
800         if (vp8_mode_order[mode_index] == NEARESTMV &&
801             mode_mv[NEARESTMV].as_int == 0)
802           continue;
803         if (vp8_mode_order[mode_index] == NEARMV && mode_mv[NEARMV].as_int == 0)
804           continue;
805 
806         if (vp8_mode_order[mode_index] == NEWMV && parent_mode == ZEROMV &&
807             best_ref_mv.as_int == 0)
808           continue;
809         else if (vp8_mode_order[mode_index] == NEWMV && dissim == 0 &&
810                  best_ref_mv.as_int == parent_ref_mv.as_int)
811           continue;
812       }
813 #endif
814     }
815 
816     /* Check to see if the testing frequency for this mode is at its max
817      * If so then prevent it from being tested and increase the threshold
818      * for its testing */
819     if (x->mode_test_hit_counts[mode_index] &&
820         (cpi->mode_check_freq[mode_index] > 1)) {
821       if (x->mbs_tested_so_far <= (cpi->mode_check_freq[mode_index] *
822                                    x->mode_test_hit_counts[mode_index])) {
823         /* Increase the threshold for coding this mode to make it less
824          * likely to be chosen */
825         x->rd_thresh_mult[mode_index] += 4;
826 
827         if (x->rd_thresh_mult[mode_index] > MAX_THRESHMULT) {
828           x->rd_thresh_mult[mode_index] = MAX_THRESHMULT;
829         }
830 
831         x->rd_threshes[mode_index] =
832             (cpi->rd_baseline_thresh[mode_index] >> 7) *
833             x->rd_thresh_mult[mode_index];
834         continue;
835       }
836     }
837 
838     /* We have now reached the point where we are going to test the current
839      * mode so increment the counter for the number of times it has been
840      * tested */
841     x->mode_test_hit_counts[mode_index]++;
842 
843     rate2 = 0;
844     distortion2 = 0;
845 
846     this_mode = vp8_mode_order[mode_index];
847 
848     x->e_mbd.mode_info_context->mbmi.mode = this_mode;
849     x->e_mbd.mode_info_context->mbmi.uv_mode = DC_PRED;
850 
851     /* Work out the cost assosciated with selecting the reference frame */
852     frame_cost = x->ref_frame_cost[x->e_mbd.mode_info_context->mbmi.ref_frame];
853     rate2 += frame_cost;
854 
855     /* Only consider ZEROMV/ALTREF_FRAME for alt ref frame,
856      * unless ARNR filtering is enabled in which case we want
857      * an unfiltered alternative */
858     if (cpi->is_src_frame_alt_ref && (cpi->oxcf.arnr_max_frames == 0)) {
859       if (this_mode != ZEROMV ||
860           x->e_mbd.mode_info_context->mbmi.ref_frame != ALTREF_FRAME) {
861         continue;
862       }
863     }
864 
865     switch (this_mode) {
866       case B_PRED:
867         /* Pass best so far to pick_intra4x4mby_modes to use as breakout */
868         distortion2 = best_rd_sse;
869         pick_intra4x4mby_modes(x, &rate, &distortion2);
870 
871         if (distortion2 == INT_MAX) {
872           this_rd = INT_MAX;
873         } else {
874           rate2 += rate;
875           distortion2 = vpx_variance16x16(*(b->base_src), b->src_stride,
876                                           x->e_mbd.predictor, 16, &sse);
877           this_rd = RDCOST(x->rdmult, x->rddiv, rate2, distortion2);
878 
879           if (this_rd < best_intra_rd) {
880             best_intra_rd = this_rd;
881             *returnintra = distortion2;
882           }
883         }
884 
885         break;
886 
887       case SPLITMV:
888 
889         /* Split MV modes currently not supported when RD is not enabled. */
890         break;
891 
892       case DC_PRED:
893       case V_PRED:
894       case H_PRED:
895       case TM_PRED:
896         vp8_build_intra_predictors_mby_s(
897             xd, xd->dst.y_buffer - xd->dst.y_stride, xd->dst.y_buffer - 1,
898             xd->dst.y_stride, xd->predictor, 16);
899         distortion2 = vpx_variance16x16(*(b->base_src), b->src_stride,
900                                         x->e_mbd.predictor, 16, &sse);
901         rate2 += x->mbmode_cost[x->e_mbd.frame_type]
902                                [x->e_mbd.mode_info_context->mbmi.mode];
903         this_rd = RDCOST(x->rdmult, x->rddiv, rate2, distortion2);
904 
905         if (this_rd < best_intra_rd) {
906           best_intra_rd = this_rd;
907           *returnintra = distortion2;
908         }
909         break;
910 
911       case NEWMV: {
912         int thissme;
913         int step_param;
914         int further_steps;
915         int n = 0;
916         int sadpb = x->sadperbit16;
917         int_mv mvp_full;
918 
919         int col_min = ((best_ref_mv.as_mv.col + 7) >> 3) - MAX_FULL_PEL_VAL;
920         int row_min = ((best_ref_mv.as_mv.row + 7) >> 3) - MAX_FULL_PEL_VAL;
921         int col_max = (best_ref_mv.as_mv.col >> 3) + MAX_FULL_PEL_VAL;
922         int row_max = (best_ref_mv.as_mv.row >> 3) + MAX_FULL_PEL_VAL;
923 
924         int tmp_col_min = x->mv_col_min;
925         int tmp_col_max = x->mv_col_max;
926         int tmp_row_min = x->mv_row_min;
927         int tmp_row_max = x->mv_row_max;
928 
929         int speed_adjust = (cpi->Speed > 5) ? ((cpi->Speed >= 8) ? 3 : 2) : 1;
930 
931         /* Further step/diamond searches as necessary */
932         step_param = cpi->sf.first_step + speed_adjust;
933 
934 #if CONFIG_MULTI_RES_ENCODING
935         /* If lower-res frame is not available for mv reuse (because of
936            frame dropping or different temporal layer pattern), then higher
937            resol encoder does motion search without any previous knowledge.
938            Also, since last frame motion info is not stored, then we can not
939            use improved_mv_pred. */
940         if (cpi->oxcf.mr_encoder_id) sf_improved_mv_pred = 0;
941 
942         // Only use parent MV as predictor if this candidate reference frame
943         // (|this_ref_frame|) is equal to |parent_ref_frame|.
944         if (parent_ref_valid && (parent_ref_frame == this_ref_frame)) {
945           /* Use parent MV as predictor. Adjust search range
946            * accordingly.
947            */
948           mvp.as_int = parent_ref_mv.as_int;
949           mvp_full.as_mv.col = parent_ref_mv.as_mv.col >> 3;
950           mvp_full.as_mv.row = parent_ref_mv.as_mv.row >> 3;
951 
952           if (dissim <= 32)
953             step_param += 3;
954           else if (dissim <= 128)
955             step_param += 2;
956           else
957             step_param += 1;
958         } else
959 #endif
960         {
961           if (sf_improved_mv_pred) {
962             if (!saddone) {
963               vp8_cal_sad(cpi, xd, x, recon_yoffset, &near_sadidx[0]);
964               saddone = 1;
965             }
966 
967             vp8_mv_pred(cpi, &x->e_mbd, x->e_mbd.mode_info_context, &mvp,
968                         x->e_mbd.mode_info_context->mbmi.ref_frame,
969                         cpi->common.ref_frame_sign_bias, &sr, &near_sadidx[0]);
970 
971             sr += speed_adjust;
972             /* adjust search range according to sr from mv prediction */
973             if (sr > step_param) step_param = sr;
974 
975             mvp_full.as_mv.col = mvp.as_mv.col >> 3;
976             mvp_full.as_mv.row = mvp.as_mv.row >> 3;
977           } else {
978             mvp.as_int = best_ref_mv.as_int;
979             mvp_full.as_mv.col = best_ref_mv.as_mv.col >> 3;
980             mvp_full.as_mv.row = best_ref_mv.as_mv.row >> 3;
981           }
982         }
983 
984 #if CONFIG_MULTI_RES_ENCODING
985         if (parent_ref_valid && (parent_ref_frame == this_ref_frame) &&
986             dissim <= 2 &&
987             VPXMAX(abs(best_ref_mv.as_mv.row - parent_ref_mv.as_mv.row),
988                    abs(best_ref_mv.as_mv.col - parent_ref_mv.as_mv.col)) <= 4) {
989           d->bmi.mv.as_int = mvp_full.as_int;
990           mode_mv[NEWMV].as_int = mvp_full.as_int;
991 
992           cpi->find_fractional_mv_step(
993               x, b, d, &d->bmi.mv, &best_ref_mv, x->errorperbit,
994               &cpi->fn_ptr[BLOCK_16X16], cpi->mb.mvcost, &distortion2, &sse);
995         } else
996 #endif
997         {
998           /* Get intersection of UMV window and valid MV window to
999            * reduce # of checks in diamond search. */
1000           if (x->mv_col_min < col_min) x->mv_col_min = col_min;
1001           if (x->mv_col_max > col_max) x->mv_col_max = col_max;
1002           if (x->mv_row_min < row_min) x->mv_row_min = row_min;
1003           if (x->mv_row_max > row_max) x->mv_row_max = row_max;
1004 
1005           further_steps =
1006               (cpi->Speed >= 8)
1007                   ? 0
1008                   : (cpi->sf.max_step_search_steps - 1 - step_param);
1009 
1010           if (cpi->sf.search_method == HEX) {
1011 #if CONFIG_MULTI_RES_ENCODING
1012             /* TODO: In higher-res pick_inter_mode, step_param is used to
1013              * modify hex search range. Here, set step_param to 0 not to
1014              * change the behavior in lowest-resolution encoder.
1015              * Will improve it later.
1016              */
1017             /* Set step_param to 0 to ensure large-range motion search
1018              * when mv reuse if not valid (i.e. |parent_ref_valid| = 0),
1019              * or if this candidate reference frame (|this_ref_frame|) is
1020              * not equal to |parent_ref_frame|.
1021              */
1022             if (!parent_ref_valid || (parent_ref_frame != this_ref_frame))
1023               step_param = 0;
1024 #endif
1025             bestsme = vp8_hex_search(x, b, d, &mvp_full, &d->bmi.mv, step_param,
1026                                      sadpb, &cpi->fn_ptr[BLOCK_16X16],
1027                                      x->mvsadcost, &best_ref_mv);
1028             mode_mv[NEWMV].as_int = d->bmi.mv.as_int;
1029           } else {
1030             bestsme = cpi->diamond_search_sad(
1031                 x, b, d, &mvp_full, &d->bmi.mv, step_param, sadpb, &num00,
1032                 &cpi->fn_ptr[BLOCK_16X16], x->mvcost, &best_ref_mv);
1033             mode_mv[NEWMV].as_int = d->bmi.mv.as_int;
1034 
1035             /* Further step/diamond searches as necessary */
1036             n = num00;
1037             num00 = 0;
1038 
1039             while (n < further_steps) {
1040               n++;
1041 
1042               if (num00) {
1043                 num00--;
1044               } else {
1045                 thissme = cpi->diamond_search_sad(
1046                     x, b, d, &mvp_full, &d->bmi.mv, step_param + n, sadpb,
1047                     &num00, &cpi->fn_ptr[BLOCK_16X16], x->mvcost, &best_ref_mv);
1048                 if (thissme < bestsme) {
1049                   bestsme = thissme;
1050                   mode_mv[NEWMV].as_int = d->bmi.mv.as_int;
1051                 } else {
1052                   d->bmi.mv.as_int = mode_mv[NEWMV].as_int;
1053                 }
1054               }
1055             }
1056           }
1057 
1058           x->mv_col_min = tmp_col_min;
1059           x->mv_col_max = tmp_col_max;
1060           x->mv_row_min = tmp_row_min;
1061           x->mv_row_max = tmp_row_max;
1062 
1063           if (bestsme < INT_MAX) {
1064             cpi->find_fractional_mv_step(
1065                 x, b, d, &d->bmi.mv, &best_ref_mv, x->errorperbit,
1066                 &cpi->fn_ptr[BLOCK_16X16], cpi->mb.mvcost, &distortion2, &sse);
1067           }
1068         }
1069 
1070         mode_mv[NEWMV].as_int = d->bmi.mv.as_int;
1071         // The clamp below is not necessary from the perspective
1072         // of VP8 bitstream, but is added to improve ChromeCast
1073         // mirroring's robustness. Please do not remove.
1074         vp8_clamp_mv2(&mode_mv[this_mode], xd);
1075         /* mv cost; */
1076         rate2 +=
1077             vp8_mv_bit_cost(&mode_mv[NEWMV], &best_ref_mv, cpi->mb.mvcost, 128);
1078       }
1079         // fall through
1080 
1081       case NEARESTMV:
1082       case NEARMV:
1083         if (mode_mv[this_mode].as_int == 0) continue;
1084         // fall through
1085 
1086       case ZEROMV:
1087 
1088         /* Trap vectors that reach beyond the UMV borders
1089          * Note that ALL New MV, Nearest MV Near MV and Zero MV code drops
1090          * through to this point because of the lack of break statements
1091          * in the previous two cases.
1092          */
1093         if (((mode_mv[this_mode].as_mv.row >> 3) < x->mv_row_min) ||
1094             ((mode_mv[this_mode].as_mv.row >> 3) > x->mv_row_max) ||
1095             ((mode_mv[this_mode].as_mv.col >> 3) < x->mv_col_min) ||
1096             ((mode_mv[this_mode].as_mv.col >> 3) > x->mv_col_max)) {
1097           continue;
1098         }
1099 
1100         rate2 += vp8_cost_mv_ref(this_mode, mdcounts);
1101         x->e_mbd.mode_info_context->mbmi.mv.as_int = mode_mv[this_mode].as_int;
1102         this_rd = evaluate_inter_mode(&sse, rate2, &distortion2, cpi, x,
1103                                       rd_adjustment);
1104 
1105         break;
1106       default: break;
1107     }
1108 
1109 #if CONFIG_TEMPORAL_DENOISING
1110     if (cpi->oxcf.noise_sensitivity) {
1111       /* Store for later use by denoiser. */
1112       // Don't denoise with GOLDEN OR ALTREF is they are old reference
1113       // frames (greater than MAX_GF_ARF_DENOISE_RANGE frames in past).
1114       int skip_old_reference = ((this_ref_frame != LAST_FRAME) &&
1115                                 (cpi->common.current_video_frame -
1116                                      cpi->current_ref_frames[this_ref_frame] >
1117                                  MAX_GF_ARF_DENOISE_RANGE))
1118                                    ? 1
1119                                    : 0;
1120       if (this_mode == ZEROMV && sse < zero_mv_sse && !skip_old_reference) {
1121         zero_mv_sse = sse;
1122         x->best_zeromv_reference_frame =
1123             x->e_mbd.mode_info_context->mbmi.ref_frame;
1124       }
1125 
1126       // Store the best NEWMV in x for later use in the denoiser.
1127       if (x->e_mbd.mode_info_context->mbmi.mode == NEWMV && sse < best_sse &&
1128           !skip_old_reference) {
1129         best_sse = sse;
1130         x->best_sse_inter_mode = NEWMV;
1131         x->best_sse_mv = x->e_mbd.mode_info_context->mbmi.mv;
1132         x->need_to_clamp_best_mvs =
1133             x->e_mbd.mode_info_context->mbmi.need_to_clamp_mvs;
1134         x->best_reference_frame = x->e_mbd.mode_info_context->mbmi.ref_frame;
1135       }
1136     }
1137 #endif
1138 
1139     if (this_rd < best_rd || x->skip) {
1140       /* Note index of best mode */
1141       best_mode_index = mode_index;
1142 
1143       *returnrate = rate2;
1144       *returndistortion = distortion2;
1145       best_rd_sse = sse;
1146       best_rd = this_rd;
1147       memcpy(&best_mbmode, &x->e_mbd.mode_info_context->mbmi,
1148              sizeof(MB_MODE_INFO));
1149 
1150       /* Testing this mode gave rise to an improvement in best error
1151        * score. Lower threshold a bit for next time
1152        */
1153       x->rd_thresh_mult[mode_index] =
1154           (x->rd_thresh_mult[mode_index] >= (MIN_THRESHMULT + 2))
1155               ? x->rd_thresh_mult[mode_index] - 2
1156               : MIN_THRESHMULT;
1157       x->rd_threshes[mode_index] = (cpi->rd_baseline_thresh[mode_index] >> 7) *
1158                                    x->rd_thresh_mult[mode_index];
1159     }
1160 
1161     /* If the mode did not help improve the best error case then raise the
1162      * threshold for testing that mode next time around.
1163      */
1164     else {
1165       x->rd_thresh_mult[mode_index] += 4;
1166 
1167       if (x->rd_thresh_mult[mode_index] > MAX_THRESHMULT) {
1168         x->rd_thresh_mult[mode_index] = MAX_THRESHMULT;
1169       }
1170 
1171       x->rd_threshes[mode_index] = (cpi->rd_baseline_thresh[mode_index] >> 7) *
1172                                    x->rd_thresh_mult[mode_index];
1173     }
1174 
1175     if (x->skip) break;
1176   }
1177 
1178   /* Reduce the activation RD thresholds for the best choice mode */
1179   if ((cpi->rd_baseline_thresh[best_mode_index] > 0) &&
1180       (cpi->rd_baseline_thresh[best_mode_index] < (INT_MAX >> 2))) {
1181     int best_adjustment = (x->rd_thresh_mult[best_mode_index] >> 3);
1182 
1183     x->rd_thresh_mult[best_mode_index] =
1184         (x->rd_thresh_mult[best_mode_index] >=
1185          (MIN_THRESHMULT + best_adjustment))
1186             ? x->rd_thresh_mult[best_mode_index] - best_adjustment
1187             : MIN_THRESHMULT;
1188     x->rd_threshes[best_mode_index] =
1189         (cpi->rd_baseline_thresh[best_mode_index] >> 7) *
1190         x->rd_thresh_mult[best_mode_index];
1191   }
1192 
1193   {
1194     int this_rdbin = (*returndistortion >> 7);
1195 
1196     if (this_rdbin >= 1024) {
1197       this_rdbin = 1023;
1198     }
1199 
1200     x->error_bins[this_rdbin]++;
1201   }
1202 
1203 #if CONFIG_TEMPORAL_DENOISING
1204   if (cpi->oxcf.noise_sensitivity) {
1205     int block_index = mb_row * cpi->common.mb_cols + mb_col;
1206     int reevaluate = 0;
1207     int is_noisy = 0;
1208     if (x->best_sse_inter_mode == DC_PRED) {
1209       /* No best MV found. */
1210       x->best_sse_inter_mode = best_mbmode.mode;
1211       x->best_sse_mv = best_mbmode.mv;
1212       x->need_to_clamp_best_mvs = best_mbmode.need_to_clamp_mvs;
1213       x->best_reference_frame = best_mbmode.ref_frame;
1214       best_sse = best_rd_sse;
1215     }
1216     // For non-skin blocks that have selected ZEROMV for this current frame,
1217     // and have been selecting ZEROMV_LAST (on the base layer frame) at
1218     // least |x~20| consecutive past frames in a row, label the block for
1219     // possible increase in denoising strength. We also condition this
1220     // labeling on there being significant denoising in the scene
1221     if (cpi->oxcf.noise_sensitivity == 4) {
1222       if (cpi->denoiser.nmse_source_diff >
1223           70 * cpi->denoiser.threshold_aggressive_mode / 100) {
1224         is_noisy = 1;
1225       }
1226     } else {
1227       if (cpi->mse_source_denoised > 1000) is_noisy = 1;
1228     }
1229     x->increase_denoising = 0;
1230     if (!x->is_skin && x->best_sse_inter_mode == ZEROMV &&
1231         (x->best_reference_frame == LAST_FRAME ||
1232          x->best_reference_frame == cpi->closest_reference_frame) &&
1233         cpi->consec_zero_last[block_index] >= 20 && is_noisy) {
1234       x->increase_denoising = 1;
1235     }
1236     x->denoise_zeromv = 0;
1237     vp8_denoiser_denoise_mb(&cpi->denoiser, x, best_sse, zero_mv_sse,
1238                             recon_yoffset, recon_uvoffset, &cpi->common.lf_info,
1239                             mb_row, mb_col, block_index,
1240                             cpi->consec_zero_last_mvbias[block_index]);
1241 
1242     // Reevaluate ZEROMV after denoising: for large noise content
1243     // (i.e., cpi->mse_source_denoised is above threshold), do this for all
1244     // blocks that did not pick ZEROMV as best mode but are using ZEROMV
1245     // for denoising. Otherwise, always re-evaluate for blocks that picked
1246     // INTRA mode as best mode.
1247     // Avoid blocks that have been biased against ZERO_LAST
1248     // (i.e., dot artifact candidate blocks).
1249     reevaluate = (best_mbmode.ref_frame == INTRA_FRAME) ||
1250                  (best_mbmode.mode != ZEROMV && x->denoise_zeromv &&
1251                   cpi->mse_source_denoised > 2000);
1252     if (!dot_artifact_candidate && reevaluate &&
1253         x->best_zeromv_reference_frame != INTRA_FRAME) {
1254       int this_rd = 0;
1255       int this_ref_frame = x->best_zeromv_reference_frame;
1256       rd_adjustment = 100;
1257       rate2 =
1258           x->ref_frame_cost[this_ref_frame] + vp8_cost_mv_ref(ZEROMV, mdcounts);
1259       distortion2 = 0;
1260 
1261       /* set up the proper prediction buffers for the frame */
1262       x->e_mbd.mode_info_context->mbmi.ref_frame = this_ref_frame;
1263       x->e_mbd.pre.y_buffer = plane[this_ref_frame][0];
1264       x->e_mbd.pre.u_buffer = plane[this_ref_frame][1];
1265       x->e_mbd.pre.v_buffer = plane[this_ref_frame][2];
1266 
1267       x->e_mbd.mode_info_context->mbmi.mode = ZEROMV;
1268       x->e_mbd.mode_info_context->mbmi.uv_mode = DC_PRED;
1269       x->e_mbd.mode_info_context->mbmi.mv.as_int = 0;
1270       this_rd =
1271           evaluate_inter_mode(&sse, rate2, &distortion2, cpi, x, rd_adjustment);
1272 
1273       if (this_rd < best_rd) {
1274         memcpy(&best_mbmode, &x->e_mbd.mode_info_context->mbmi,
1275                sizeof(MB_MODE_INFO));
1276       }
1277     }
1278   }
1279 #endif
1280 
1281   if (cpi->is_src_frame_alt_ref &&
1282       (best_mbmode.mode != ZEROMV || best_mbmode.ref_frame != ALTREF_FRAME)) {
1283     x->e_mbd.mode_info_context->mbmi.mode = ZEROMV;
1284     x->e_mbd.mode_info_context->mbmi.ref_frame = ALTREF_FRAME;
1285     x->e_mbd.mode_info_context->mbmi.mv.as_int = 0;
1286     x->e_mbd.mode_info_context->mbmi.uv_mode = DC_PRED;
1287     x->e_mbd.mode_info_context->mbmi.mb_skip_coeff =
1288         (cpi->common.mb_no_coeff_skip);
1289     x->e_mbd.mode_info_context->mbmi.partitioning = 0;
1290 
1291     return;
1292   }
1293 
1294   /* set to the best mb mode, this copy can be skip if x->skip since it
1295    * already has the right content */
1296   if (!x->skip) {
1297     memcpy(&x->e_mbd.mode_info_context->mbmi, &best_mbmode,
1298            sizeof(MB_MODE_INFO));
1299   }
1300 
1301   if (best_mbmode.mode <= B_PRED) {
1302     /* set mode_info_context->mbmi.uv_mode */
1303     pick_intra_mbuv_mode(x);
1304   }
1305 
1306   if (sign_bias !=
1307       cpi->common.ref_frame_sign_bias[xd->mode_info_context->mbmi.ref_frame]) {
1308     best_ref_mv.as_int = best_ref_mv_sb[!sign_bias].as_int;
1309   }
1310 
1311   update_mvcount(x, &best_ref_mv);
1312 }
1313 
vp8_pick_intra_mode(MACROBLOCK * x,int * rate)1314 void vp8_pick_intra_mode(MACROBLOCK *x, int *rate) {
1315   int error4x4, error16x16 = INT_MAX;
1316   int rate_, best_rate = 0, distortion, best_sse;
1317   MB_PREDICTION_MODE mode, best_mode = DC_PRED;
1318   int this_rd;
1319   unsigned int sse;
1320   BLOCK *b = &x->block[0];
1321   MACROBLOCKD *xd = &x->e_mbd;
1322 
1323   xd->mode_info_context->mbmi.ref_frame = INTRA_FRAME;
1324 
1325   pick_intra_mbuv_mode(x);
1326 
1327   for (mode = DC_PRED; mode <= TM_PRED; ++mode) {
1328     xd->mode_info_context->mbmi.mode = mode;
1329     vp8_build_intra_predictors_mby_s(xd, xd->dst.y_buffer - xd->dst.y_stride,
1330                                      xd->dst.y_buffer - 1, xd->dst.y_stride,
1331                                      xd->predictor, 16);
1332     distortion = vpx_variance16x16(*(b->base_src), b->src_stride, xd->predictor,
1333                                    16, &sse);
1334     rate_ = x->mbmode_cost[xd->frame_type][mode];
1335     this_rd = RDCOST(x->rdmult, x->rddiv, rate_, distortion);
1336 
1337     if (error16x16 > this_rd) {
1338       error16x16 = this_rd;
1339       best_mode = mode;
1340       best_sse = sse;
1341       best_rate = rate_;
1342     }
1343   }
1344   xd->mode_info_context->mbmi.mode = best_mode;
1345 
1346   error4x4 = pick_intra4x4mby_modes(x, &rate_, &best_sse);
1347   if (error4x4 < error16x16) {
1348     xd->mode_info_context->mbmi.mode = B_PRED;
1349     best_rate = rate_;
1350   }
1351 
1352   *rate = best_rate;
1353 }
1354