xref: /aosp_15_r20/external/libvpx/vp8/encoder/encodeframe.c (revision fb1b10ab9aebc7c7068eedab379b749d7e3900be)
1 /*
2  *  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
3  *
4  *  Use of this source code is governed by a BSD-style license
5  *  that can be found in the LICENSE file in the root of the source
6  *  tree. An additional intellectual property rights grant can be found
7  *  in the file PATENTS.  All contributing project authors may
8  *  be found in the AUTHORS file in the root of the source tree.
9  */
10 #include <limits.h>
11 #include <stdio.h>
12 
13 #include "vpx_config.h"
14 
15 #include "vp8/common/common.h"
16 #include "vp8/common/entropymode.h"
17 #include "vp8/common/extend.h"
18 #include "vp8/common/invtrans.h"
19 #include "vp8/common/quant_common.h"
20 #include "vp8/common/reconinter.h"
21 #include "vp8/common/setupintrarecon.h"
22 #include "vp8/common/threading.h"
23 #include "vp8/encoder/bitstream.h"
24 #include "vp8/encoder/encodeframe.h"
25 #include "vp8/encoder/encodeintra.h"
26 #include "vp8/encoder/encodemb.h"
27 #include "vp8/encoder/onyx_int.h"
28 #include "vp8/encoder/pickinter.h"
29 #include "vp8/encoder/rdopt.h"
30 #include "vp8_rtcd.h"
31 #include "vpx/internal/vpx_codec_internal.h"
32 #include "vpx_dsp_rtcd.h"
33 #include "vpx_mem/vpx_mem.h"
34 #include "vpx_ports/vpx_timer.h"
35 
36 #if CONFIG_MULTITHREAD
37 #include "vp8/encoder/ethreading.h"
38 #endif
39 
40 extern void vp8_stuff_mb(VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t);
41 static void adjust_act_zbin(VP8_COMP *cpi, MACROBLOCK *x);
42 
43 #ifdef MODE_STATS
44 unsigned int inter_y_modes[10] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
45 unsigned int inter_uv_modes[4] = { 0, 0, 0, 0 };
46 unsigned int inter_b_modes[15] = {
47   0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
48 };
49 unsigned int y_modes[5] = { 0, 0, 0, 0, 0 };
50 unsigned int uv_modes[4] = { 0, 0, 0, 0 };
51 unsigned int b_modes[14] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
52 #endif
53 
54 /* activity_avg must be positive, or flat regions could get a zero weight
55  *  (infinite lambda), which confounds analysis.
56  * This also avoids the need for divide by zero checks in
57  *  vp8_activity_masking().
58  */
59 #define VP8_ACTIVITY_AVG_MIN (64)
60 
61 /* This is used as a reference when computing the source variance for the
62  *  purposes of activity masking.
63  * Eventually this should be replaced by custom no-reference routines,
64  *  which will be faster.
65  */
66 static const unsigned char VP8_VAR_OFFS[16] = { 128, 128, 128, 128, 128, 128,
67                                                 128, 128, 128, 128, 128, 128,
68                                                 128, 128, 128, 128 };
69 
70 /* Original activity measure from Tim T's code. */
tt_activity_measure(MACROBLOCK * x)71 static unsigned int tt_activity_measure(MACROBLOCK *x) {
72   unsigned int act;
73   unsigned int sse;
74   /* TODO: This could also be done over smaller areas (8x8), but that would
75    *  require extensive changes elsewhere, as lambda is assumed to be fixed
76    *  over an entire MB in most of the code.
77    * Another option is to compute four 8x8 variances, and pick a single
78    *  lambda using a non-linear combination (e.g., the smallest, or second
79    *  smallest, etc.).
80    */
81   act = vpx_variance16x16(x->src.y_buffer, x->src.y_stride, VP8_VAR_OFFS, 0,
82                           &sse);
83   act = act << 4;
84 
85   /* If the region is flat, lower the activity some more. */
86   if (act < 8 << 12) act = act < 5 << 12 ? act : 5 << 12;
87 
88   return act;
89 }
90 
91 /* Measure the activity of the current macroblock
92  * What we measure here is TBD so abstracted to this function
93  */
94 #define ALT_ACT_MEASURE 1
mb_activity_measure(MACROBLOCK * x,int mb_row,int mb_col)95 static unsigned int mb_activity_measure(MACROBLOCK *x, int mb_row, int mb_col) {
96   unsigned int mb_activity;
97 
98   if (ALT_ACT_MEASURE) {
99     int use_dc_pred = (mb_col || mb_row) && (!mb_col || !mb_row);
100 
101     /* Or use an alternative. */
102     mb_activity = vp8_encode_intra(x, use_dc_pred);
103   } else {
104     /* Original activity measure from Tim T's code. */
105     mb_activity = tt_activity_measure(x);
106   }
107 
108   if (mb_activity < VP8_ACTIVITY_AVG_MIN) mb_activity = VP8_ACTIVITY_AVG_MIN;
109 
110   return mb_activity;
111 }
112 
113 /* Calculate an "average" mb activity value for the frame */
114 #define ACT_MEDIAN 0
calc_av_activity(VP8_COMP * cpi,int64_t activity_sum)115 static void calc_av_activity(VP8_COMP *cpi, int64_t activity_sum) {
116 #if ACT_MEDIAN
117   /* Find median: Simple n^2 algorithm for experimentation */
118   {
119     unsigned int median;
120     unsigned int i, j;
121     unsigned int *sortlist;
122     unsigned int tmp;
123 
124     /* Create a list to sort to */
125     CHECK_MEM_ERROR(&cpi->common.error, sortlist,
126                     vpx_calloc(sizeof(unsigned int), cpi->common.MBs));
127 
128     /* Copy map to sort list */
129     memcpy(sortlist, cpi->mb_activity_map,
130            sizeof(unsigned int) * cpi->common.MBs);
131 
132     /* Ripple each value down to its correct position */
133     for (i = 1; i < cpi->common.MBs; ++i) {
134       for (j = i; j > 0; j--) {
135         if (sortlist[j] < sortlist[j - 1]) {
136           /* Swap values */
137           tmp = sortlist[j - 1];
138           sortlist[j - 1] = sortlist[j];
139           sortlist[j] = tmp;
140         } else
141           break;
142       }
143     }
144 
145     /* Even number MBs so estimate median as mean of two either side. */
146     median = (1 + sortlist[cpi->common.MBs >> 1] +
147               sortlist[(cpi->common.MBs >> 1) + 1]) >>
148              1;
149 
150     cpi->activity_avg = median;
151 
152     vpx_free(sortlist);
153   }
154 #else
155   /* Simple mean for now */
156   cpi->activity_avg = (unsigned int)(activity_sum / cpi->common.MBs);
157 #endif
158 
159   if (cpi->activity_avg < VP8_ACTIVITY_AVG_MIN) {
160     cpi->activity_avg = VP8_ACTIVITY_AVG_MIN;
161   }
162 
163   /* Experimental code: return fixed value normalized for several clips */
164   if (ALT_ACT_MEASURE) cpi->activity_avg = 100000;
165 }
166 
167 #define USE_ACT_INDEX 0
168 #define OUTPUT_NORM_ACT_STATS 0
169 
170 #if USE_ACT_INDEX
171 /* Calculate and activity index for each mb */
calc_activity_index(VP8_COMP * cpi,MACROBLOCK * x)172 static void calc_activity_index(VP8_COMP *cpi, MACROBLOCK *x) {
173   VP8_COMMON *const cm = &cpi->common;
174   int mb_row, mb_col;
175 
176   int64_t act;
177   int64_t a;
178   int64_t b;
179 
180 #if OUTPUT_NORM_ACT_STATS
181   FILE *f = fopen("norm_act.stt", "a");
182   fprintf(f, "\n%12d\n", cpi->activity_avg);
183 #endif
184 
185   /* Reset pointers to start of activity map */
186   x->mb_activity_ptr = cpi->mb_activity_map;
187 
188   /* Calculate normalized mb activity number. */
189   for (mb_row = 0; mb_row < cm->mb_rows; ++mb_row) {
190     /* for each macroblock col in image */
191     for (mb_col = 0; mb_col < cm->mb_cols; ++mb_col) {
192       /* Read activity from the map */
193       act = *(x->mb_activity_ptr);
194 
195       /* Calculate a normalized activity number */
196       a = act + 4 * cpi->activity_avg;
197       b = 4 * act + cpi->activity_avg;
198 
199       if (b >= a)
200         *(x->activity_ptr) = (int)((b + (a >> 1)) / a) - 1;
201       else
202         *(x->activity_ptr) = 1 - (int)((a + (b >> 1)) / b);
203 
204 #if OUTPUT_NORM_ACT_STATS
205       fprintf(f, " %6d", *(x->mb_activity_ptr));
206 #endif
207       /* Increment activity map pointers */
208       x->mb_activity_ptr++;
209     }
210 
211 #if OUTPUT_NORM_ACT_STATS
212     fprintf(f, "\n");
213 #endif
214   }
215 
216 #if OUTPUT_NORM_ACT_STATS
217   fclose(f);
218 #endif
219 }
220 #endif
221 
222 /* Loop through all MBs. Note activity of each, average activity and
223  * calculate a normalized activity for each
224  */
build_activity_map(VP8_COMP * cpi)225 static void build_activity_map(VP8_COMP *cpi) {
226   MACROBLOCK *const x = &cpi->mb;
227   MACROBLOCKD *xd = &x->e_mbd;
228   VP8_COMMON *const cm = &cpi->common;
229 
230 #if ALT_ACT_MEASURE
231   YV12_BUFFER_CONFIG *new_yv12 = &cm->yv12_fb[cm->new_fb_idx];
232   int recon_yoffset;
233   int recon_y_stride = new_yv12->y_stride;
234 #endif
235 
236   int mb_row, mb_col;
237   unsigned int mb_activity;
238   int64_t activity_sum = 0;
239 
240   /* for each macroblock row in image */
241   for (mb_row = 0; mb_row < cm->mb_rows; ++mb_row) {
242 #if ALT_ACT_MEASURE
243     /* reset above block coeffs */
244     xd->up_available = (mb_row != 0);
245     recon_yoffset = (mb_row * recon_y_stride * 16);
246 #endif
247     /* for each macroblock col in image */
248     for (mb_col = 0; mb_col < cm->mb_cols; ++mb_col) {
249 #if ALT_ACT_MEASURE
250       xd->dst.y_buffer = new_yv12->y_buffer + recon_yoffset;
251       xd->left_available = (mb_col != 0);
252       recon_yoffset += 16;
253 #endif
254       /* Copy current mb to a buffer */
255       vp8_copy_mem16x16(x->src.y_buffer, x->src.y_stride, x->thismb, 16);
256 
257       /* measure activity */
258       mb_activity = mb_activity_measure(x, mb_row, mb_col);
259 
260       /* Keep frame sum */
261       activity_sum += mb_activity;
262 
263       /* Store MB level activity details. */
264       *x->mb_activity_ptr = mb_activity;
265 
266       /* Increment activity map pointer */
267       x->mb_activity_ptr++;
268 
269       /* adjust to the next column of source macroblocks */
270       x->src.y_buffer += 16;
271     }
272 
273     /* adjust to the next row of mbs */
274     x->src.y_buffer += 16 * x->src.y_stride - 16 * cm->mb_cols;
275 
276 #if ALT_ACT_MEASURE
277     /* extend the recon for intra prediction */
278     vp8_extend_mb_row(new_yv12, xd->dst.y_buffer + 16, xd->dst.u_buffer + 8,
279                       xd->dst.v_buffer + 8);
280 #endif
281   }
282 
283   /* Calculate an "average" MB activity */
284   calc_av_activity(cpi, activity_sum);
285 
286 #if USE_ACT_INDEX
287   /* Calculate an activity index number of each mb */
288   calc_activity_index(cpi, x);
289 #endif
290 }
291 
292 /* Macroblock activity masking */
vp8_activity_masking(VP8_COMP * cpi,MACROBLOCK * x)293 void vp8_activity_masking(VP8_COMP *cpi, MACROBLOCK *x) {
294 #if USE_ACT_INDEX
295   x->rdmult += *(x->mb_activity_ptr) * (x->rdmult >> 2);
296   x->errorperbit = x->rdmult * 100 / (110 * x->rddiv);
297   x->errorperbit += (x->errorperbit == 0);
298 #else
299   int64_t a;
300   int64_t b;
301   int64_t act = *(x->mb_activity_ptr);
302 
303   /* Apply the masking to the RD multiplier. */
304   a = act + (2 * cpi->activity_avg);
305   b = (2 * act) + cpi->activity_avg;
306 
307   x->rdmult = (unsigned int)(((int64_t)x->rdmult * b + (a >> 1)) / a);
308   x->errorperbit = x->rdmult * 100 / (110 * x->rddiv);
309   x->errorperbit += (x->errorperbit == 0);
310 #endif
311 
312   /* Activity based Zbin adjustment */
313   adjust_act_zbin(cpi, x);
314 }
315 
encode_mb_row(VP8_COMP * cpi,VP8_COMMON * cm,int mb_row,MACROBLOCK * x,MACROBLOCKD * xd,TOKENEXTRA ** tp,int * segment_counts,int * totalrate)316 static void encode_mb_row(VP8_COMP *cpi, VP8_COMMON *cm, int mb_row,
317                           MACROBLOCK *x, MACROBLOCKD *xd, TOKENEXTRA **tp,
318                           int *segment_counts, int *totalrate) {
319   int recon_yoffset, recon_uvoffset;
320   int mb_col;
321   int ref_fb_idx = cm->lst_fb_idx;
322   int dst_fb_idx = cm->new_fb_idx;
323   int recon_y_stride = cm->yv12_fb[ref_fb_idx].y_stride;
324   int recon_uv_stride = cm->yv12_fb[ref_fb_idx].uv_stride;
325   int map_index = (mb_row * cpi->common.mb_cols);
326 
327 #if (CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING)
328   const int num_part = (1 << cm->multi_token_partition);
329   TOKENEXTRA *tp_start = cpi->tok;
330   vp8_writer *w;
331 #endif
332 
333 #if CONFIG_MULTITHREAD
334   const int nsync = cpi->mt_sync_range;
335   vpx_atomic_int rightmost_col = VPX_ATOMIC_INIT(cm->mb_cols + nsync);
336   const vpx_atomic_int *last_row_current_mb_col;
337   vpx_atomic_int *current_mb_col = NULL;
338 
339   if (vpx_atomic_load_acquire(&cpi->b_multi_threaded) != 0) {
340     current_mb_col = &cpi->mt_current_mb_col[mb_row];
341   }
342   if (vpx_atomic_load_acquire(&cpi->b_multi_threaded) != 0 && mb_row != 0) {
343     last_row_current_mb_col = &cpi->mt_current_mb_col[mb_row - 1];
344   } else {
345     last_row_current_mb_col = &rightmost_col;
346   }
347 #endif
348 
349 #if (CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING)
350   if (num_part > 1)
351     w = &cpi->bc[1 + (mb_row % num_part)];
352   else
353     w = &cpi->bc[1];
354 #endif
355 
356   /* reset above block coeffs */
357   xd->above_context = cm->above_context;
358 
359   xd->up_available = (mb_row != 0);
360   recon_yoffset = (mb_row * recon_y_stride * 16);
361   recon_uvoffset = (mb_row * recon_uv_stride * 8);
362 
363   cpi->tplist[mb_row].start = *tp;
364   /* printf("Main mb_row = %d\n", mb_row); */
365 
366   /* Distance of Mb to the top & bottom edges, specified in 1/8th pel
367    * units as they are always compared to values that are in 1/8th pel
368    */
369   xd->mb_to_top_edge = -((mb_row * 16) << 3);
370   xd->mb_to_bottom_edge = ((cm->mb_rows - 1 - mb_row) * 16) << 3;
371 
372   /* Set up limit values for vertical motion vector components
373    * to prevent them extending beyond the UMV borders
374    */
375   x->mv_row_min = -((mb_row * 16) + (VP8BORDERINPIXELS - 16));
376   x->mv_row_max = ((cm->mb_rows - 1 - mb_row) * 16) + (VP8BORDERINPIXELS - 16);
377 
378   /* Set the mb activity pointer to the start of the row. */
379   x->mb_activity_ptr = &cpi->mb_activity_map[map_index];
380 
381   /* for each macroblock col in image */
382   for (mb_col = 0; mb_col < cm->mb_cols; ++mb_col) {
383 #if (CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING)
384     *tp = cpi->tok;
385 #endif
386     /* Distance of Mb to the left & right edges, specified in
387      * 1/8th pel units as they are always compared to values
388      * that are in 1/8th pel units
389      */
390     xd->mb_to_left_edge = -((mb_col * 16) << 3);
391     xd->mb_to_right_edge = ((cm->mb_cols - 1 - mb_col) * 16) << 3;
392 
393     /* Set up limit values for horizontal motion vector components
394      * to prevent them extending beyond the UMV borders
395      */
396     x->mv_col_min = -((mb_col * 16) + (VP8BORDERINPIXELS - 16));
397     x->mv_col_max =
398         ((cm->mb_cols - 1 - mb_col) * 16) + (VP8BORDERINPIXELS - 16);
399 
400     xd->dst.y_buffer = cm->yv12_fb[dst_fb_idx].y_buffer + recon_yoffset;
401     xd->dst.u_buffer = cm->yv12_fb[dst_fb_idx].u_buffer + recon_uvoffset;
402     xd->dst.v_buffer = cm->yv12_fb[dst_fb_idx].v_buffer + recon_uvoffset;
403     xd->left_available = (mb_col != 0);
404 
405     x->rddiv = cpi->RDDIV;
406     x->rdmult = cpi->RDMULT;
407 
408     /* Copy current mb to a buffer */
409     vp8_copy_mem16x16(x->src.y_buffer, x->src.y_stride, x->thismb, 16);
410 
411 #if CONFIG_MULTITHREAD
412     if (vpx_atomic_load_acquire(&cpi->b_multi_threaded) != 0) {
413       if (((mb_col - 1) % nsync) == 0) {
414         vpx_atomic_store_release(current_mb_col, mb_col - 1);
415       }
416 
417       if (mb_row && !(mb_col & (nsync - 1))) {
418         vp8_atomic_spin_wait(mb_col, last_row_current_mb_col, nsync);
419       }
420     }
421 #endif
422 
423     if (cpi->oxcf.tuning == VP8_TUNE_SSIM) vp8_activity_masking(cpi, x);
424 
425     /* Is segmentation enabled */
426     /* MB level adjustment to quantizer */
427     if (xd->segmentation_enabled) {
428       /* Code to set segment id in xd->mbmi.segment_id for current MB
429        * (with range checking)
430        */
431       if (cpi->segmentation_map[map_index + mb_col] <= 3) {
432         xd->mode_info_context->mbmi.segment_id =
433             cpi->segmentation_map[map_index + mb_col];
434       } else {
435         xd->mode_info_context->mbmi.segment_id = 0;
436       }
437 
438       vp8cx_mb_init_quantizer(cpi, x, 1);
439     } else {
440       /* Set to Segment 0 by default */
441       xd->mode_info_context->mbmi.segment_id = 0;
442     }
443 
444     x->active_ptr = cpi->active_map + map_index + mb_col;
445 
446     if (cm->frame_type == KEY_FRAME) {
447       const int intra_rate_cost = vp8cx_encode_intra_macroblock(cpi, x, tp);
448       if (INT_MAX - *totalrate > intra_rate_cost)
449         *totalrate += intra_rate_cost;
450       else
451         *totalrate = INT_MAX;
452 #ifdef MODE_STATS
453       y_modes[xd->mbmi.mode]++;
454 #endif
455     } else {
456       const int inter_rate_cost = vp8cx_encode_inter_macroblock(
457           cpi, x, tp, recon_yoffset, recon_uvoffset, mb_row, mb_col);
458       if (INT_MAX - *totalrate > inter_rate_cost)
459         *totalrate += inter_rate_cost;
460       else
461         *totalrate = INT_MAX;
462 
463 #ifdef MODE_STATS
464       inter_y_modes[xd->mbmi.mode]++;
465 
466       if (xd->mbmi.mode == SPLITMV) {
467         int b;
468 
469         for (b = 0; b < xd->mbmi.partition_count; ++b) {
470           inter_b_modes[x->partition->bmi[b].mode]++;
471         }
472       }
473 
474 #endif
475 
476       // Keep track of how many (consecutive) times a  block is coded
477       // as ZEROMV_LASTREF, for base layer frames.
478       // Reset to 0 if its coded as anything else.
479       if (cpi->current_layer == 0) {
480         if (xd->mode_info_context->mbmi.mode == ZEROMV &&
481             xd->mode_info_context->mbmi.ref_frame == LAST_FRAME) {
482           // Increment, check for wrap-around.
483           if (cpi->consec_zero_last[map_index + mb_col] < 255) {
484             cpi->consec_zero_last[map_index + mb_col] += 1;
485           }
486           if (cpi->consec_zero_last_mvbias[map_index + mb_col] < 255) {
487             cpi->consec_zero_last_mvbias[map_index + mb_col] += 1;
488           }
489         } else {
490           cpi->consec_zero_last[map_index + mb_col] = 0;
491           cpi->consec_zero_last_mvbias[map_index + mb_col] = 0;
492         }
493         if (x->zero_last_dot_suppress) {
494           cpi->consec_zero_last_mvbias[map_index + mb_col] = 0;
495         }
496       }
497 
498       /* Special case code for cyclic refresh
499        * If cyclic update enabled then copy xd->mbmi.segment_id; (which
500        * may have been updated based on mode during
501        * vp8cx_encode_inter_macroblock()) back into the global
502        * segmentation map
503        */
504       if ((cpi->current_layer == 0) &&
505           (cpi->cyclic_refresh_mode_enabled && xd->segmentation_enabled)) {
506         cpi->segmentation_map[map_index + mb_col] =
507             xd->mode_info_context->mbmi.segment_id;
508 
509         /* If the block has been refreshed mark it as clean (the
510          * magnitude of the -ve influences how long it will be before
511          * we consider another refresh):
512          * Else if it was coded (last frame 0,0) and has not already
513          * been refreshed then mark it as a candidate for cleanup
514          * next time (marked 0) else mark it as dirty (1).
515          */
516         if (xd->mode_info_context->mbmi.segment_id) {
517           cpi->cyclic_refresh_map[map_index + mb_col] = -1;
518         } else if ((xd->mode_info_context->mbmi.mode == ZEROMV) &&
519                    (xd->mode_info_context->mbmi.ref_frame == LAST_FRAME)) {
520           if (cpi->cyclic_refresh_map[map_index + mb_col] == 1) {
521             cpi->cyclic_refresh_map[map_index + mb_col] = 0;
522           }
523         } else {
524           cpi->cyclic_refresh_map[map_index + mb_col] = 1;
525         }
526       }
527     }
528 
529     cpi->tplist[mb_row].stop = *tp;
530 
531 #if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
532     /* pack tokens for this MB */
533     {
534       int tok_count = *tp - tp_start;
535       vp8_pack_tokens(w, tp_start, tok_count);
536     }
537 #endif
538     /* Increment pointer into gf usage flags structure. */
539     x->gf_active_ptr++;
540 
541     /* Increment the activity mask pointers. */
542     x->mb_activity_ptr++;
543 
544     /* adjust to the next column of macroblocks */
545     x->src.y_buffer += 16;
546     x->src.u_buffer += 8;
547     x->src.v_buffer += 8;
548 
549     recon_yoffset += 16;
550     recon_uvoffset += 8;
551 
552     /* Keep track of segment usage */
553     segment_counts[xd->mode_info_context->mbmi.segment_id]++;
554 
555     /* skip to next mb */
556     xd->mode_info_context++;
557     x->partition_info++;
558     xd->above_context++;
559   }
560 
561   /* extend the recon for intra prediction */
562   vp8_extend_mb_row(&cm->yv12_fb[dst_fb_idx], xd->dst.y_buffer + 16,
563                     xd->dst.u_buffer + 8, xd->dst.v_buffer + 8);
564 
565 #if CONFIG_MULTITHREAD
566   if (vpx_atomic_load_acquire(&cpi->b_multi_threaded) != 0) {
567     vpx_atomic_store_release(current_mb_col,
568                              vpx_atomic_load_acquire(&rightmost_col));
569   }
570 #endif
571 
572   /* this is to account for the border */
573   xd->mode_info_context++;
574   x->partition_info++;
575 }
576 
init_encode_frame_mb_context(VP8_COMP * cpi)577 static void init_encode_frame_mb_context(VP8_COMP *cpi) {
578   MACROBLOCK *const x = &cpi->mb;
579   VP8_COMMON *const cm = &cpi->common;
580   MACROBLOCKD *const xd = &x->e_mbd;
581 
582   /* GF active flags data structure */
583   x->gf_active_ptr = (signed char *)cpi->gf_active_flags;
584 
585   /* Activity map pointer */
586   x->mb_activity_ptr = cpi->mb_activity_map;
587 
588   x->act_zbin_adj = 0;
589 
590   x->partition_info = x->pi;
591 
592   xd->mode_info_context = cm->mi;
593   xd->mode_info_stride = cm->mode_info_stride;
594 
595   xd->frame_type = cm->frame_type;
596 
597   /* reset intra mode contexts */
598   if (cm->frame_type == KEY_FRAME) vp8_init_mbmode_probs(cm);
599 
600   /* Copy data over into macro block data structures. */
601   x->src = *cpi->Source;
602   xd->pre = cm->yv12_fb[cm->lst_fb_idx];
603   xd->dst = cm->yv12_fb[cm->new_fb_idx];
604 
605   /* set up frame for intra coded blocks */
606   vp8_setup_intra_recon(&cm->yv12_fb[cm->new_fb_idx]);
607 
608   vp8_build_block_offsets(x);
609 
610   xd->mode_info_context->mbmi.mode = DC_PRED;
611   xd->mode_info_context->mbmi.uv_mode = DC_PRED;
612 
613   xd->left_context = &cm->left_context;
614 
615   x->mvc = cm->fc.mvc;
616 
617   memset(cm->above_context, 0, sizeof(ENTROPY_CONTEXT_PLANES) * cm->mb_cols);
618 
619   /* Special case treatment when GF and ARF are not sensible options
620    * for reference
621    */
622   if (cpi->ref_frame_flags == VP8_LAST_FRAME) {
623     vp8_calc_ref_frame_costs(x->ref_frame_cost, cpi->prob_intra_coded, 255,
624                              128);
625   } else if ((cpi->oxcf.number_of_layers > 1) &&
626              (cpi->ref_frame_flags == VP8_GOLD_FRAME)) {
627     vp8_calc_ref_frame_costs(x->ref_frame_cost, cpi->prob_intra_coded, 1, 255);
628   } else if ((cpi->oxcf.number_of_layers > 1) &&
629              (cpi->ref_frame_flags == VP8_ALTR_FRAME)) {
630     vp8_calc_ref_frame_costs(x->ref_frame_cost, cpi->prob_intra_coded, 1, 1);
631   } else {
632     vp8_calc_ref_frame_costs(x->ref_frame_cost, cpi->prob_intra_coded,
633                              cpi->prob_last_coded, cpi->prob_gf_coded);
634   }
635 
636   xd->fullpixel_mask = ~0;
637   if (cm->full_pixel) xd->fullpixel_mask = ~7;
638 
639   vp8_zero(x->coef_counts);
640   vp8_zero(x->ymode_count);
641   vp8_zero(x->uv_mode_count);
642   x->prediction_error = 0;
643   x->intra_error = 0;
644   vp8_zero(x->count_mb_ref_frame_usage);
645 }
646 
647 #if CONFIG_MULTITHREAD
sum_coef_counts(MACROBLOCK * x,MACROBLOCK * x_thread)648 static void sum_coef_counts(MACROBLOCK *x, MACROBLOCK *x_thread) {
649   int i = 0;
650   do {
651     int j = 0;
652     do {
653       int k = 0;
654       do {
655         /* at every context */
656 
657         /* calc probs and branch cts for this frame only */
658         int t = 0; /* token/prob index */
659 
660         do {
661           x->coef_counts[i][j][k][t] += x_thread->coef_counts[i][j][k][t];
662         } while (++t < ENTROPY_NODES);
663       } while (++k < PREV_COEF_CONTEXTS);
664     } while (++j < COEF_BANDS);
665   } while (++i < BLOCK_TYPES);
666 }
667 #endif  // CONFIG_MULTITHREAD
668 
vp8_encode_frame(VP8_COMP * cpi)669 void vp8_encode_frame(VP8_COMP *cpi) {
670   int mb_row;
671   MACROBLOCK *const x = &cpi->mb;
672   VP8_COMMON *const cm = &cpi->common;
673   MACROBLOCKD *const xd = &x->e_mbd;
674   TOKENEXTRA *tp = cpi->tok;
675   int segment_counts[MAX_MB_SEGMENTS];
676   int totalrate;
677 #if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
678   BOOL_CODER *bc = &cpi->bc[1]; /* bc[0] is for control partition */
679   const int num_part = (1 << cm->multi_token_partition);
680 #endif
681 
682   memset(segment_counts, 0, sizeof(segment_counts));
683   totalrate = 0;
684 
685   if (cpi->compressor_speed == 2) {
686     if (cpi->oxcf.cpu_used < 0) {
687       cpi->Speed = -(cpi->oxcf.cpu_used);
688     } else {
689       vp8_auto_select_speed(cpi);
690     }
691   }
692 
693   /* Functions setup for all frame types so we can use MC in AltRef */
694   if (!cm->use_bilinear_mc_filter) {
695     xd->subpixel_predict = vp8_sixtap_predict4x4;
696     xd->subpixel_predict8x4 = vp8_sixtap_predict8x4;
697     xd->subpixel_predict8x8 = vp8_sixtap_predict8x8;
698     xd->subpixel_predict16x16 = vp8_sixtap_predict16x16;
699   } else {
700     xd->subpixel_predict = vp8_bilinear_predict4x4;
701     xd->subpixel_predict8x4 = vp8_bilinear_predict8x4;
702     xd->subpixel_predict8x8 = vp8_bilinear_predict8x8;
703     xd->subpixel_predict16x16 = vp8_bilinear_predict16x16;
704   }
705 
706   cpi->mb.skip_true_count = 0;
707   cpi->tok_count = 0;
708 
709 #if 0
710     /* Experimental code */
711     cpi->frame_distortion = 0;
712     cpi->last_mb_distortion = 0;
713 #endif
714 
715   xd->mode_info_context = cm->mi;
716 
717   vp8_zero(cpi->mb.MVcount);
718 
719   vp8cx_frame_init_quantizer(cpi);
720 
721   vp8_initialize_rd_consts(cpi, x,
722                            vp8_dc_quant(cm->base_qindex, cm->y1dc_delta_q));
723 
724   vp8cx_initialize_me_consts(cpi, cm->base_qindex);
725 
726   if (cpi->oxcf.tuning == VP8_TUNE_SSIM) {
727     /* Initialize encode frame context. */
728     init_encode_frame_mb_context(cpi);
729 
730     /* Build a frame level activity map */
731     build_activity_map(cpi);
732   }
733 
734   /* re-init encode frame context. */
735   init_encode_frame_mb_context(cpi);
736 
737 #if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
738   {
739     int i;
740     for (i = 0; i < num_part; ++i) {
741       vp8_start_encode(&bc[i], cpi->partition_d[i + 1],
742                        cpi->partition_d_end[i + 1]);
743       bc[i].error = &cm->error;
744     }
745   }
746 
747 #endif
748 
749   {
750     struct vpx_usec_timer emr_timer;
751     vpx_usec_timer_start(&emr_timer);
752 
753 #if CONFIG_MULTITHREAD
754     if (vpx_atomic_load_acquire(&cpi->b_multi_threaded)) {
755       int i;
756 
757       vp8cx_init_mbrthread_data(cpi, x, cpi->mb_row_ei,
758                                 cpi->encoding_thread_count);
759 
760       if (cpi->mt_current_mb_col_size != cm->mb_rows) {
761         vpx_free(cpi->mt_current_mb_col);
762         cpi->mt_current_mb_col = NULL;
763         cpi->mt_current_mb_col_size = 0;
764         CHECK_MEM_ERROR(
765             &cpi->common.error, cpi->mt_current_mb_col,
766             vpx_malloc(sizeof(*cpi->mt_current_mb_col) * cm->mb_rows));
767         cpi->mt_current_mb_col_size = cm->mb_rows;
768       }
769       for (i = 0; i < cm->mb_rows; ++i)
770         vpx_atomic_store_release(&cpi->mt_current_mb_col[i], -1);
771 
772       for (i = 0; i < cpi->encoding_thread_count; ++i) {
773         vp8_sem_post(&cpi->h_event_start_encoding[i]);
774       }
775 
776       for (mb_row = 0; mb_row < cm->mb_rows;
777            mb_row += (cpi->encoding_thread_count + 1)) {
778         vp8_zero(cm->left_context);
779 
780 #if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
781         tp = cpi->tok;
782 #else
783         tp = cpi->tok + mb_row * (cm->mb_cols * 16 * 24);
784 #endif
785 
786         encode_mb_row(cpi, cm, mb_row, x, xd, &tp, segment_counts, &totalrate);
787 
788         /* adjust to the next row of mbs */
789         x->src.y_buffer +=
790             16 * x->src.y_stride * (cpi->encoding_thread_count + 1) -
791             16 * cm->mb_cols;
792         x->src.u_buffer +=
793             8 * x->src.uv_stride * (cpi->encoding_thread_count + 1) -
794             8 * cm->mb_cols;
795         x->src.v_buffer +=
796             8 * x->src.uv_stride * (cpi->encoding_thread_count + 1) -
797             8 * cm->mb_cols;
798 
799         xd->mode_info_context +=
800             xd->mode_info_stride * cpi->encoding_thread_count;
801         x->partition_info += xd->mode_info_stride * cpi->encoding_thread_count;
802         x->gf_active_ptr += cm->mb_cols * cpi->encoding_thread_count;
803       }
804       /* Wait for all the threads to finish. */
805       for (i = 0; i < cpi->encoding_thread_count; ++i) {
806         vp8_sem_wait(&cpi->h_event_end_encoding[i]);
807       }
808 
809       for (mb_row = 0; mb_row < cm->mb_rows; ++mb_row) {
810         cpi->tok_count += (unsigned int)(cpi->tplist[mb_row].stop -
811                                          cpi->tplist[mb_row].start);
812       }
813 
814       if (xd->segmentation_enabled) {
815         int j;
816 
817         if (xd->segmentation_enabled) {
818           for (i = 0; i < cpi->encoding_thread_count; ++i) {
819             for (j = 0; j < 4; ++j) {
820               segment_counts[j] += cpi->mb_row_ei[i].segment_counts[j];
821             }
822           }
823         }
824       }
825 
826       for (i = 0; i < cpi->encoding_thread_count; ++i) {
827         int mode_count;
828         int c_idx;
829         totalrate += cpi->mb_row_ei[i].totalrate;
830 
831         cpi->mb.skip_true_count += cpi->mb_row_ei[i].mb.skip_true_count;
832 
833         for (mode_count = 0; mode_count < VP8_YMODES; ++mode_count) {
834           cpi->mb.ymode_count[mode_count] +=
835               cpi->mb_row_ei[i].mb.ymode_count[mode_count];
836         }
837 
838         for (mode_count = 0; mode_count < VP8_UV_MODES; ++mode_count) {
839           cpi->mb.uv_mode_count[mode_count] +=
840               cpi->mb_row_ei[i].mb.uv_mode_count[mode_count];
841         }
842 
843         for (c_idx = 0; c_idx < MVvals; ++c_idx) {
844           cpi->mb.MVcount[0][c_idx] += cpi->mb_row_ei[i].mb.MVcount[0][c_idx];
845           cpi->mb.MVcount[1][c_idx] += cpi->mb_row_ei[i].mb.MVcount[1][c_idx];
846         }
847 
848         cpi->mb.prediction_error += cpi->mb_row_ei[i].mb.prediction_error;
849         cpi->mb.intra_error += cpi->mb_row_ei[i].mb.intra_error;
850 
851         for (c_idx = 0; c_idx < MAX_REF_FRAMES; ++c_idx) {
852           cpi->mb.count_mb_ref_frame_usage[c_idx] +=
853               cpi->mb_row_ei[i].mb.count_mb_ref_frame_usage[c_idx];
854         }
855 
856         for (c_idx = 0; c_idx < MAX_ERROR_BINS; ++c_idx) {
857           cpi->mb.error_bins[c_idx] += cpi->mb_row_ei[i].mb.error_bins[c_idx];
858         }
859 
860         /* add up counts for each thread */
861         sum_coef_counts(x, &cpi->mb_row_ei[i].mb);
862       }
863 
864     } else
865 #endif  // CONFIG_MULTITHREAD
866     {
867 
868       /* for each macroblock row in image */
869       for (mb_row = 0; mb_row < cm->mb_rows; ++mb_row) {
870         vp8_zero(cm->left_context);
871 
872 #if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
873         tp = cpi->tok;
874 #endif
875 
876         encode_mb_row(cpi, cm, mb_row, x, xd, &tp, segment_counts, &totalrate);
877 
878         /* adjust to the next row of mbs */
879         x->src.y_buffer += 16 * x->src.y_stride - 16 * cm->mb_cols;
880         x->src.u_buffer += 8 * x->src.uv_stride - 8 * cm->mb_cols;
881         x->src.v_buffer += 8 * x->src.uv_stride - 8 * cm->mb_cols;
882       }
883 
884       cpi->tok_count = (unsigned int)(tp - cpi->tok);
885     }
886 
887 #if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
888     {
889       int i;
890       for (i = 0; i < num_part; ++i) {
891         vp8_stop_encode(&bc[i]);
892         cpi->partition_sz[i + 1] = bc[i].pos;
893       }
894     }
895 #endif
896 
897     vpx_usec_timer_mark(&emr_timer);
898     cpi->time_encode_mb_row += vpx_usec_timer_elapsed(&emr_timer);
899   }
900 
901   // Work out the segment probabilities if segmentation is enabled
902   // and needs to be updated
903   if (xd->segmentation_enabled && xd->update_mb_segmentation_map) {
904     int tot_count;
905     int i;
906 
907     /* Set to defaults */
908     memset(xd->mb_segment_tree_probs, 255, sizeof(xd->mb_segment_tree_probs));
909 
910     tot_count = segment_counts[0] + segment_counts[1] + segment_counts[2] +
911                 segment_counts[3];
912 
913     if (tot_count) {
914       xd->mb_segment_tree_probs[0] =
915           ((segment_counts[0] + segment_counts[1]) * 255) / tot_count;
916 
917       tot_count = segment_counts[0] + segment_counts[1];
918 
919       if (tot_count > 0) {
920         xd->mb_segment_tree_probs[1] = (segment_counts[0] * 255) / tot_count;
921       }
922 
923       tot_count = segment_counts[2] + segment_counts[3];
924 
925       if (tot_count > 0) {
926         xd->mb_segment_tree_probs[2] = (segment_counts[2] * 255) / tot_count;
927       }
928 
929       /* Zero probabilities not allowed */
930       for (i = 0; i < MB_FEATURE_TREE_PROBS; ++i) {
931         if (xd->mb_segment_tree_probs[i] == 0) xd->mb_segment_tree_probs[i] = 1;
932       }
933     }
934   }
935 
936   /* projected_frame_size in units of BYTES */
937   cpi->projected_frame_size = totalrate >> 8;
938 
939   /* Make a note of the percentage MBs coded Intra. */
940   if (cm->frame_type == KEY_FRAME) {
941     cpi->this_frame_percent_intra = 100;
942   } else {
943     int tot_modes;
944 
945     tot_modes = cpi->mb.count_mb_ref_frame_usage[INTRA_FRAME] +
946                 cpi->mb.count_mb_ref_frame_usage[LAST_FRAME] +
947                 cpi->mb.count_mb_ref_frame_usage[GOLDEN_FRAME] +
948                 cpi->mb.count_mb_ref_frame_usage[ALTREF_FRAME];
949 
950     if (tot_modes) {
951       cpi->this_frame_percent_intra =
952           cpi->mb.count_mb_ref_frame_usage[INTRA_FRAME] * 100 / tot_modes;
953     }
954   }
955 
956 #if !CONFIG_REALTIME_ONLY
957   /* Adjust the projected reference frame usage probability numbers to
958    * reflect what we have just seen. This may be useful when we make
959    * multiple iterations of the recode loop rather than continuing to use
960    * values from the previous frame.
961    */
962   if ((cm->frame_type != KEY_FRAME) &&
963       ((cpi->oxcf.number_of_layers > 1) ||
964        (!cm->refresh_alt_ref_frame && !cm->refresh_golden_frame))) {
965     vp8_convert_rfct_to_prob(cpi);
966   }
967 #endif
968 }
vp8_setup_block_ptrs(MACROBLOCK * x)969 void vp8_setup_block_ptrs(MACROBLOCK *x) {
970   int r, c;
971   int i;
972 
973   for (r = 0; r < 4; ++r) {
974     for (c = 0; c < 4; ++c) {
975       x->block[r * 4 + c].src_diff = x->src_diff + r * 4 * 16 + c * 4;
976     }
977   }
978 
979   for (r = 0; r < 2; ++r) {
980     for (c = 0; c < 2; ++c) {
981       x->block[16 + r * 2 + c].src_diff = x->src_diff + 256 + r * 4 * 8 + c * 4;
982     }
983   }
984 
985   for (r = 0; r < 2; ++r) {
986     for (c = 0; c < 2; ++c) {
987       x->block[20 + r * 2 + c].src_diff = x->src_diff + 320 + r * 4 * 8 + c * 4;
988     }
989   }
990 
991   x->block[24].src_diff = x->src_diff + 384;
992 
993   for (i = 0; i < 25; ++i) {
994     x->block[i].coeff = x->coeff + i * 16;
995   }
996 }
997 
vp8_build_block_offsets(MACROBLOCK * x)998 void vp8_build_block_offsets(MACROBLOCK *x) {
999   int block = 0;
1000   int br, bc;
1001 
1002   vp8_build_block_doffsets(&x->e_mbd);
1003 
1004   /* y blocks */
1005   x->thismb_ptr = &x->thismb[0];
1006   for (br = 0; br < 4; ++br) {
1007     for (bc = 0; bc < 4; ++bc) {
1008       BLOCK *this_block = &x->block[block];
1009       this_block->base_src = &x->thismb_ptr;
1010       this_block->src_stride = 16;
1011       this_block->src = 4 * br * 16 + 4 * bc;
1012       ++block;
1013     }
1014   }
1015 
1016   /* u blocks */
1017   for (br = 0; br < 2; ++br) {
1018     for (bc = 0; bc < 2; ++bc) {
1019       BLOCK *this_block = &x->block[block];
1020       this_block->base_src = &x->src.u_buffer;
1021       this_block->src_stride = x->src.uv_stride;
1022       this_block->src = 4 * br * this_block->src_stride + 4 * bc;
1023       ++block;
1024     }
1025   }
1026 
1027   /* v blocks */
1028   for (br = 0; br < 2; ++br) {
1029     for (bc = 0; bc < 2; ++bc) {
1030       BLOCK *this_block = &x->block[block];
1031       this_block->base_src = &x->src.v_buffer;
1032       this_block->src_stride = x->src.uv_stride;
1033       this_block->src = 4 * br * this_block->src_stride + 4 * bc;
1034       ++block;
1035     }
1036   }
1037 }
1038 
sum_intra_stats(VP8_COMP * cpi,MACROBLOCK * x)1039 static void sum_intra_stats(VP8_COMP *cpi, MACROBLOCK *x) {
1040   const MACROBLOCKD *xd = &x->e_mbd;
1041   const MB_PREDICTION_MODE m = xd->mode_info_context->mbmi.mode;
1042   const MB_PREDICTION_MODE uvm = xd->mode_info_context->mbmi.uv_mode;
1043 
1044 #ifdef MODE_STATS
1045   const int is_key = cpi->common.frame_type == KEY_FRAME;
1046 
1047   ++(is_key ? uv_modes : inter_uv_modes)[uvm];
1048 
1049   if (m == B_PRED) {
1050     unsigned int *const bct = is_key ? b_modes : inter_b_modes;
1051 
1052     int b = 0;
1053 
1054     do {
1055       ++bct[xd->block[b].bmi.mode];
1056     } while (++b < 16);
1057   }
1058 
1059 #else
1060   (void)cpi;
1061 #endif
1062 
1063   ++x->ymode_count[m];
1064   ++x->uv_mode_count[uvm];
1065 }
1066 
1067 /* Experimental stub function to create a per MB zbin adjustment based on
1068  * some previously calculated measure of MB activity.
1069  */
adjust_act_zbin(VP8_COMP * cpi,MACROBLOCK * x)1070 static void adjust_act_zbin(VP8_COMP *cpi, MACROBLOCK *x) {
1071 #if USE_ACT_INDEX
1072   x->act_zbin_adj = *(x->mb_activity_ptr);
1073 #else
1074   int64_t a;
1075   int64_t b;
1076   int64_t act = *(x->mb_activity_ptr);
1077 
1078   /* Apply the masking to the RD multiplier. */
1079   a = act + 4 * cpi->activity_avg;
1080   b = 4 * act + cpi->activity_avg;
1081 
1082   if (act > cpi->activity_avg) {
1083     x->act_zbin_adj = (int)(((int64_t)b + (a >> 1)) / a) - 1;
1084   } else {
1085     x->act_zbin_adj = 1 - (int)(((int64_t)a + (b >> 1)) / b);
1086   }
1087 #endif
1088 }
1089 
vp8cx_encode_intra_macroblock(VP8_COMP * cpi,MACROBLOCK * x,TOKENEXTRA ** t)1090 int vp8cx_encode_intra_macroblock(VP8_COMP *cpi, MACROBLOCK *x,
1091                                   TOKENEXTRA **t) {
1092   MACROBLOCKD *xd = &x->e_mbd;
1093   int rate;
1094 
1095   if (cpi->sf.RD && cpi->compressor_speed != 2) {
1096     vp8_rd_pick_intra_mode(x, &rate);
1097   } else {
1098     vp8_pick_intra_mode(x, &rate);
1099   }
1100 
1101   if (cpi->oxcf.tuning == VP8_TUNE_SSIM) {
1102     adjust_act_zbin(cpi, x);
1103     vp8_update_zbin_extra(cpi, x);
1104   }
1105 
1106   if (x->e_mbd.mode_info_context->mbmi.mode == B_PRED) {
1107     vp8_encode_intra4x4mby(x);
1108   } else {
1109     vp8_encode_intra16x16mby(x);
1110   }
1111 
1112   vp8_encode_intra16x16mbuv(x);
1113 
1114   sum_intra_stats(cpi, x);
1115 
1116   vp8_tokenize_mb(cpi, x, t);
1117 
1118   if (xd->mode_info_context->mbmi.mode != B_PRED) vp8_inverse_transform_mby(xd);
1119 
1120   vp8_dequant_idct_add_uv_block(xd->qcoeff + 16 * 16, xd->dequant_uv,
1121                                 xd->dst.u_buffer, xd->dst.v_buffer,
1122                                 xd->dst.uv_stride, xd->eobs + 16);
1123   return rate;
1124 }
1125 #ifdef SPEEDSTATS
1126 extern int cnt_pm;
1127 #endif
1128 
1129 extern void vp8_fix_contexts(MACROBLOCKD *x);
1130 
vp8cx_encode_inter_macroblock(VP8_COMP * cpi,MACROBLOCK * x,TOKENEXTRA ** t,int recon_yoffset,int recon_uvoffset,int mb_row,int mb_col)1131 int vp8cx_encode_inter_macroblock(VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t,
1132                                   int recon_yoffset, int recon_uvoffset,
1133                                   int mb_row, int mb_col) {
1134   MACROBLOCKD *const xd = &x->e_mbd;
1135   int intra_error = 0;
1136   int rate;
1137   int distortion;
1138 
1139   x->skip = 0;
1140 
1141   if (xd->segmentation_enabled) {
1142     x->encode_breakout =
1143         cpi->segment_encode_breakout[xd->mode_info_context->mbmi.segment_id];
1144   } else {
1145     x->encode_breakout = cpi->oxcf.encode_breakout;
1146   }
1147 
1148 #if CONFIG_TEMPORAL_DENOISING
1149   /* Reset the best sse mode/mv for each macroblock. */
1150   x->best_reference_frame = INTRA_FRAME;
1151   x->best_zeromv_reference_frame = INTRA_FRAME;
1152   x->best_sse_inter_mode = 0;
1153   x->best_sse_mv.as_int = 0;
1154   x->need_to_clamp_best_mvs = 0;
1155 #endif
1156 
1157   if (cpi->sf.RD) {
1158     int zbin_mode_boost_enabled = x->zbin_mode_boost_enabled;
1159 
1160     /* Are we using the fast quantizer for the mode selection? */
1161     if (cpi->sf.use_fastquant_for_pick) {
1162       x->quantize_b = vp8_fast_quantize_b;
1163 
1164       /* the fast quantizer does not use zbin_extra, so
1165        * do not recalculate */
1166       x->zbin_mode_boost_enabled = 0;
1167     }
1168     vp8_rd_pick_inter_mode(cpi, x, recon_yoffset, recon_uvoffset, &rate,
1169                            &distortion, &intra_error, mb_row, mb_col);
1170 
1171     /* switch back to the regular quantizer for the encode */
1172     if (cpi->sf.improved_quant) {
1173       x->quantize_b = vp8_regular_quantize_b;
1174     }
1175 
1176     /* restore cpi->zbin_mode_boost_enabled */
1177     x->zbin_mode_boost_enabled = zbin_mode_boost_enabled;
1178 
1179   } else {
1180     vp8_pick_inter_mode(cpi, x, recon_yoffset, recon_uvoffset, &rate,
1181                         &distortion, &intra_error, mb_row, mb_col);
1182   }
1183 
1184   x->prediction_error += distortion;
1185   x->intra_error += intra_error;
1186 
1187   if (cpi->oxcf.tuning == VP8_TUNE_SSIM) {
1188     /* Adjust the zbin based on this MB rate. */
1189     adjust_act_zbin(cpi, x);
1190   }
1191 
1192 #if 0
1193     /* Experimental RD code */
1194     cpi->frame_distortion += distortion;
1195     cpi->last_mb_distortion = distortion;
1196 #endif
1197 
1198   /* MB level adjutment to quantizer setup */
1199   if (xd->segmentation_enabled) {
1200     /* If cyclic update enabled */
1201     if (cpi->current_layer == 0 && cpi->cyclic_refresh_mode_enabled) {
1202       /* Clear segment_id back to 0 if not coded (last frame 0,0) */
1203       if ((xd->mode_info_context->mbmi.segment_id == 1) &&
1204           ((xd->mode_info_context->mbmi.ref_frame != LAST_FRAME) ||
1205            (xd->mode_info_context->mbmi.mode != ZEROMV))) {
1206         xd->mode_info_context->mbmi.segment_id = 0;
1207 
1208         /* segment_id changed, so update */
1209         vp8cx_mb_init_quantizer(cpi, x, 1);
1210       }
1211     }
1212   }
1213 
1214   {
1215     /* Experimental code.
1216      * Special case for gf and arf zeromv modes, for 1 temporal layer.
1217      * Increase zbin size to supress noise.
1218      */
1219     x->zbin_mode_boost = 0;
1220     if (x->zbin_mode_boost_enabled) {
1221       if (xd->mode_info_context->mbmi.ref_frame != INTRA_FRAME) {
1222         if (xd->mode_info_context->mbmi.mode == ZEROMV) {
1223           if (xd->mode_info_context->mbmi.ref_frame != LAST_FRAME &&
1224               cpi->oxcf.number_of_layers == 1) {
1225             x->zbin_mode_boost = GF_ZEROMV_ZBIN_BOOST;
1226           } else {
1227             x->zbin_mode_boost = LF_ZEROMV_ZBIN_BOOST;
1228           }
1229         } else if (xd->mode_info_context->mbmi.mode == SPLITMV) {
1230           x->zbin_mode_boost = 0;
1231         } else {
1232           x->zbin_mode_boost = MV_ZBIN_BOOST;
1233         }
1234       }
1235     }
1236 
1237     /* The fast quantizer doesn't use zbin_extra, only do so with
1238      * the regular quantizer. */
1239     if (cpi->sf.improved_quant) vp8_update_zbin_extra(cpi, x);
1240   }
1241 
1242   x->count_mb_ref_frame_usage[xd->mode_info_context->mbmi.ref_frame]++;
1243 
1244   if (xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME) {
1245     vp8_encode_intra16x16mbuv(x);
1246 
1247     if (xd->mode_info_context->mbmi.mode == B_PRED) {
1248       vp8_encode_intra4x4mby(x);
1249     } else {
1250       vp8_encode_intra16x16mby(x);
1251     }
1252 
1253     sum_intra_stats(cpi, x);
1254   } else {
1255     int ref_fb_idx;
1256 
1257     if (xd->mode_info_context->mbmi.ref_frame == LAST_FRAME) {
1258       ref_fb_idx = cpi->common.lst_fb_idx;
1259     } else if (xd->mode_info_context->mbmi.ref_frame == GOLDEN_FRAME) {
1260       ref_fb_idx = cpi->common.gld_fb_idx;
1261     } else {
1262       ref_fb_idx = cpi->common.alt_fb_idx;
1263     }
1264 
1265     xd->pre.y_buffer = cpi->common.yv12_fb[ref_fb_idx].y_buffer + recon_yoffset;
1266     xd->pre.u_buffer =
1267         cpi->common.yv12_fb[ref_fb_idx].u_buffer + recon_uvoffset;
1268     xd->pre.v_buffer =
1269         cpi->common.yv12_fb[ref_fb_idx].v_buffer + recon_uvoffset;
1270 
1271     if (!x->skip) {
1272       vp8_encode_inter16x16(x);
1273     } else {
1274       vp8_build_inter16x16_predictors_mb(xd, xd->dst.y_buffer, xd->dst.u_buffer,
1275                                          xd->dst.v_buffer, xd->dst.y_stride,
1276                                          xd->dst.uv_stride);
1277     }
1278   }
1279 
1280   if (!x->skip) {
1281     vp8_tokenize_mb(cpi, x, t);
1282 
1283     if (xd->mode_info_context->mbmi.mode != B_PRED) {
1284       vp8_inverse_transform_mby(xd);
1285     }
1286 
1287     vp8_dequant_idct_add_uv_block(xd->qcoeff + 16 * 16, xd->dequant_uv,
1288                                   xd->dst.u_buffer, xd->dst.v_buffer,
1289                                   xd->dst.uv_stride, xd->eobs + 16);
1290   } else {
1291     /* always set mb_skip_coeff as it is needed by the loopfilter */
1292     xd->mode_info_context->mbmi.mb_skip_coeff = 1;
1293 
1294     if (cpi->common.mb_no_coeff_skip) {
1295       x->skip_true_count++;
1296       vp8_fix_contexts(xd);
1297     } else {
1298       vp8_stuff_mb(cpi, x, t);
1299     }
1300   }
1301 
1302   return rate;
1303 }
1304