xref: /aosp_15_r20/external/libaom/av1/encoder/tx_search.c (revision 77c1e3ccc04c968bd2bc212e87364f250e820521)
1 /*
2  * Copyright (c) 2020, Alliance for Open Media. All rights reserved.
3  *
4  * This source code is subject to the terms of the BSD 2 Clause License and
5  * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
6  * was not distributed with this source code in the LICENSE file, you can
7  * obtain it at www.aomedia.org/license/software. If the Alliance for Open
8  * Media Patent License 1.0 was not distributed with this source code in the
9  * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
10  */
11 
12 #include "av1/common/cfl.h"
13 #include "av1/common/reconintra.h"
14 #include "av1/encoder/block.h"
15 #include "av1/encoder/hybrid_fwd_txfm.h"
16 #include "av1/common/idct.h"
17 #include "av1/encoder/model_rd.h"
18 #include "av1/encoder/random.h"
19 #include "av1/encoder/rdopt_utils.h"
20 #include "av1/encoder/sorting_network.h"
21 #include "av1/encoder/tx_prune_model_weights.h"
22 #include "av1/encoder/tx_search.h"
23 #include "av1/encoder/txb_rdopt.h"
24 
25 #define PROB_THRESH_OFFSET_TX_TYPE 100
26 
27 struct rdcost_block_args {
28   const AV1_COMP *cpi;
29   MACROBLOCK *x;
30   ENTROPY_CONTEXT t_above[MAX_MIB_SIZE];
31   ENTROPY_CONTEXT t_left[MAX_MIB_SIZE];
32   RD_STATS rd_stats;
33   int64_t current_rd;
34   int64_t best_rd;
35   int exit_early;
36   int incomplete_exit;
37   FAST_TX_SEARCH_MODE ftxs_mode;
38   int skip_trellis;
39 };
40 
41 typedef struct {
42   int64_t rd;
43   int txb_entropy_ctx;
44   TX_TYPE tx_type;
45 } TxCandidateInfo;
46 
47 // origin_threshold * 128 / 100
48 static const uint32_t skip_pred_threshold[3][BLOCK_SIZES_ALL] = {
49   {
50       64, 64, 64, 70, 60, 60, 68, 68, 68, 68, 68,
51       68, 68, 68, 68, 68, 64, 64, 70, 70, 68, 68,
52   },
53   {
54       88, 88, 88, 86, 87, 87, 68, 68, 68, 68, 68,
55       68, 68, 68, 68, 68, 88, 88, 86, 86, 68, 68,
56   },
57   {
58       90, 93, 93, 90, 93, 93, 74, 74, 74, 74, 74,
59       74, 74, 74, 74, 74, 90, 90, 90, 90, 74, 74,
60   },
61 };
62 
63 // lookup table for predict_skip_txfm
64 // int max_tx_size = max_txsize_rect_lookup[bsize];
65 // if (tx_size_high[max_tx_size] > 16 || tx_size_wide[max_tx_size] > 16)
66 //   max_tx_size = AOMMIN(max_txsize_lookup[bsize], TX_16X16);
67 static const TX_SIZE max_predict_sf_tx_size[BLOCK_SIZES_ALL] = {
68   TX_4X4,   TX_4X8,   TX_8X4,   TX_8X8,   TX_8X16,  TX_16X8,
69   TX_16X16, TX_16X16, TX_16X16, TX_16X16, TX_16X16, TX_16X16,
70   TX_16X16, TX_16X16, TX_16X16, TX_16X16, TX_4X16,  TX_16X4,
71   TX_8X8,   TX_8X8,   TX_16X16, TX_16X16,
72 };
73 
74 // look-up table for sqrt of number of pixels in a transform block
75 // rounded up to the nearest integer.
76 static const int sqrt_tx_pixels_2d[TX_SIZES_ALL] = { 4,  8,  16, 32, 32, 6,  6,
77                                                      12, 12, 23, 23, 32, 32, 8,
78                                                      8,  16, 16, 23, 23 };
79 
get_block_residue_hash(MACROBLOCK * x,BLOCK_SIZE bsize)80 static inline uint32_t get_block_residue_hash(MACROBLOCK *x, BLOCK_SIZE bsize) {
81   const int rows = block_size_high[bsize];
82   const int cols = block_size_wide[bsize];
83   const int16_t *diff = x->plane[0].src_diff;
84   const uint32_t hash =
85       av1_get_crc32c_value(&x->txfm_search_info.mb_rd_record->crc_calculator,
86                            (uint8_t *)diff, 2 * rows * cols);
87   return (hash << 5) + bsize;
88 }
89 
find_mb_rd_info(const MB_RD_RECORD * const mb_rd_record,const int64_t ref_best_rd,const uint32_t hash)90 static inline int32_t find_mb_rd_info(const MB_RD_RECORD *const mb_rd_record,
91                                       const int64_t ref_best_rd,
92                                       const uint32_t hash) {
93   int32_t match_index = -1;
94   if (ref_best_rd != INT64_MAX) {
95     for (int i = 0; i < mb_rd_record->num; ++i) {
96       const int index = (mb_rd_record->index_start + i) % RD_RECORD_BUFFER_LEN;
97       // If there is a match in the mb_rd_record, fetch the RD decision and
98       // terminate early.
99       if (mb_rd_record->mb_rd_info[index].hash_value == hash) {
100         match_index = index;
101         break;
102       }
103     }
104   }
105   return match_index;
106 }
107 
fetch_mb_rd_info(int n4,const MB_RD_INFO * const mb_rd_info,RD_STATS * const rd_stats,MACROBLOCK * const x)108 static inline void fetch_mb_rd_info(int n4, const MB_RD_INFO *const mb_rd_info,
109                                     RD_STATS *const rd_stats,
110                                     MACROBLOCK *const x) {
111   MACROBLOCKD *const xd = &x->e_mbd;
112   MB_MODE_INFO *const mbmi = xd->mi[0];
113   mbmi->tx_size = mb_rd_info->tx_size;
114   memcpy(x->txfm_search_info.blk_skip, mb_rd_info->blk_skip,
115          sizeof(mb_rd_info->blk_skip[0]) * n4);
116   av1_copy(mbmi->inter_tx_size, mb_rd_info->inter_tx_size);
117   av1_copy_array(xd->tx_type_map, mb_rd_info->tx_type_map, n4);
118   *rd_stats = mb_rd_info->rd_stats;
119 }
120 
av1_pixel_diff_dist(const MACROBLOCK * x,int plane,int blk_row,int blk_col,const BLOCK_SIZE plane_bsize,const BLOCK_SIZE tx_bsize,unsigned int * block_mse_q8)121 int64_t av1_pixel_diff_dist(const MACROBLOCK *x, int plane, int blk_row,
122                             int blk_col, const BLOCK_SIZE plane_bsize,
123                             const BLOCK_SIZE tx_bsize,
124                             unsigned int *block_mse_q8) {
125   int visible_rows, visible_cols;
126   const MACROBLOCKD *xd = &x->e_mbd;
127   get_txb_dimensions(xd, plane, plane_bsize, blk_row, blk_col, tx_bsize, NULL,
128                      NULL, &visible_cols, &visible_rows);
129   const int diff_stride = block_size_wide[plane_bsize];
130   const int16_t *diff = x->plane[plane].src_diff;
131 
132   diff += ((blk_row * diff_stride + blk_col) << MI_SIZE_LOG2);
133   uint64_t sse =
134       aom_sum_squares_2d_i16(diff, diff_stride, visible_cols, visible_rows);
135   if (block_mse_q8 != NULL) {
136     if (visible_cols > 0 && visible_rows > 0)
137       *block_mse_q8 =
138           (unsigned int)((256 * sse) / (visible_cols * visible_rows));
139     else
140       *block_mse_q8 = UINT_MAX;
141   }
142   return sse;
143 }
144 
145 // Computes the residual block's SSE and mean on all visible 4x4s in the
146 // transform block
pixel_diff_stats(MACROBLOCK * x,int plane,int blk_row,int blk_col,const BLOCK_SIZE plane_bsize,const BLOCK_SIZE tx_bsize,unsigned int * block_mse_q8,int64_t * per_px_mean,uint64_t * block_var)147 static inline int64_t pixel_diff_stats(
148     MACROBLOCK *x, int plane, int blk_row, int blk_col,
149     const BLOCK_SIZE plane_bsize, const BLOCK_SIZE tx_bsize,
150     unsigned int *block_mse_q8, int64_t *per_px_mean, uint64_t *block_var) {
151   int visible_rows, visible_cols;
152   const MACROBLOCKD *xd = &x->e_mbd;
153   get_txb_dimensions(xd, plane, plane_bsize, blk_row, blk_col, tx_bsize, NULL,
154                      NULL, &visible_cols, &visible_rows);
155   const int diff_stride = block_size_wide[plane_bsize];
156   const int16_t *diff = x->plane[plane].src_diff;
157 
158   diff += ((blk_row * diff_stride + blk_col) << MI_SIZE_LOG2);
159   uint64_t sse = 0;
160   int sum = 0;
161   sse = aom_sum_sse_2d_i16(diff, diff_stride, visible_cols, visible_rows, &sum);
162   if (visible_cols > 0 && visible_rows > 0) {
163     double norm_factor = 1.0 / (visible_cols * visible_rows);
164     int sign_sum = sum > 0 ? 1 : -1;
165     // Conversion to transform domain
166     *per_px_mean = (int64_t)(norm_factor * abs(sum)) << 7;
167     *per_px_mean = sign_sum * (*per_px_mean);
168     *block_mse_q8 = (unsigned int)(norm_factor * (256 * sse));
169     *block_var = (uint64_t)(sse - (uint64_t)(norm_factor * sum * sum));
170   } else {
171     *block_mse_q8 = UINT_MAX;
172   }
173   return sse;
174 }
175 
176 // Uses simple features on top of DCT coefficients to quickly predict
177 // whether optimal RD decision is to skip encoding the residual.
178 // The sse value is stored in dist.
predict_skip_txfm(MACROBLOCK * x,BLOCK_SIZE bsize,int64_t * dist,int reduced_tx_set)179 static int predict_skip_txfm(MACROBLOCK *x, BLOCK_SIZE bsize, int64_t *dist,
180                              int reduced_tx_set) {
181   const TxfmSearchParams *txfm_params = &x->txfm_search_params;
182   const int bw = block_size_wide[bsize];
183   const int bh = block_size_high[bsize];
184   const MACROBLOCKD *xd = &x->e_mbd;
185   const int16_t dc_q = av1_dc_quant_QTX(x->qindex, 0, xd->bd);
186 
187   *dist = av1_pixel_diff_dist(x, 0, 0, 0, bsize, bsize, NULL);
188 
189   const int64_t mse = *dist / bw / bh;
190   // Normalized quantizer takes the transform upscaling factor (8 for tx size
191   // smaller than 32) into account.
192   const int16_t normalized_dc_q = dc_q >> 3;
193   const int64_t mse_thresh = (int64_t)normalized_dc_q * normalized_dc_q / 8;
194   // For faster early skip decision, use dist to compare against threshold so
195   // that quality risk is less for the skip=1 decision. Otherwise, use mse
196   // since the fwd_txfm coeff checks will take care of quality
197   // TODO(any): Use dist to return 0 when skip_txfm_level is 1
198   int64_t pred_err = (txfm_params->skip_txfm_level >= 2) ? *dist : mse;
199   // Predict not to skip when error is larger than threshold.
200   if (pred_err > mse_thresh) return 0;
201   // Return as skip otherwise for aggressive early skip
202   else if (txfm_params->skip_txfm_level >= 2)
203     return 1;
204 
205   const int max_tx_size = max_predict_sf_tx_size[bsize];
206   const int tx_h = tx_size_high[max_tx_size];
207   const int tx_w = tx_size_wide[max_tx_size];
208   DECLARE_ALIGNED(32, tran_low_t, coefs[32 * 32]);
209   TxfmParam param;
210   param.tx_type = DCT_DCT;
211   param.tx_size = max_tx_size;
212   param.bd = xd->bd;
213   param.is_hbd = is_cur_buf_hbd(xd);
214   param.lossless = 0;
215   param.tx_set_type = av1_get_ext_tx_set_type(
216       param.tx_size, is_inter_block(xd->mi[0]), reduced_tx_set);
217   const int bd_idx = (xd->bd == 8) ? 0 : ((xd->bd == 10) ? 1 : 2);
218   const uint32_t max_qcoef_thresh = skip_pred_threshold[bd_idx][bsize];
219   const int16_t *src_diff = x->plane[0].src_diff;
220   const int n_coeff = tx_w * tx_h;
221   const int16_t ac_q = av1_ac_quant_QTX(x->qindex, 0, xd->bd);
222   const uint32_t dc_thresh = max_qcoef_thresh * dc_q;
223   const uint32_t ac_thresh = max_qcoef_thresh * ac_q;
224   for (int row = 0; row < bh; row += tx_h) {
225     for (int col = 0; col < bw; col += tx_w) {
226       av1_fwd_txfm(src_diff + col, coefs, bw, &param);
227       // Operating on TX domain, not pixels; we want the QTX quantizers
228       const uint32_t dc_coef = (((uint32_t)abs(coefs[0])) << 7);
229       if (dc_coef >= dc_thresh) return 0;
230       for (int i = 1; i < n_coeff; ++i) {
231         const uint32_t ac_coef = (((uint32_t)abs(coefs[i])) << 7);
232         if (ac_coef >= ac_thresh) return 0;
233       }
234     }
235     src_diff += tx_h * bw;
236   }
237   return 1;
238 }
239 
240 // Used to set proper context for early termination with skip = 1.
set_skip_txfm(MACROBLOCK * x,RD_STATS * rd_stats,BLOCK_SIZE bsize,int64_t dist)241 static inline void set_skip_txfm(MACROBLOCK *x, RD_STATS *rd_stats,
242                                  BLOCK_SIZE bsize, int64_t dist) {
243   MACROBLOCKD *const xd = &x->e_mbd;
244   MB_MODE_INFO *const mbmi = xd->mi[0];
245   const int n4 = bsize_to_num_blk(bsize);
246   const TX_SIZE tx_size = max_txsize_rect_lookup[bsize];
247   memset(xd->tx_type_map, DCT_DCT, sizeof(xd->tx_type_map[0]) * n4);
248   memset(mbmi->inter_tx_size, tx_size, sizeof(mbmi->inter_tx_size));
249   mbmi->tx_size = tx_size;
250   for (int i = 0; i < n4; ++i)
251     set_blk_skip(x->txfm_search_info.blk_skip, 0, i, 1);
252   rd_stats->skip_txfm = 1;
253   if (is_cur_buf_hbd(xd)) dist = ROUND_POWER_OF_TWO(dist, (xd->bd - 8) * 2);
254   rd_stats->dist = rd_stats->sse = (dist << 4);
255   // Though decision is to make the block as skip based on luma stats,
256   // it is possible that block becomes non skip after chroma rd. In addition
257   // intermediate non skip costs calculated by caller function will be
258   // incorrect, if rate is set as  zero (i.e., if zero_blk_rate is not
259   // accounted). Hence intermediate rate is populated to code the luma tx blks
260   // as skip, the caller function based on final rd decision (i.e., skip vs
261   // non-skip) sets the final rate accordingly. Here the rate populated
262   // corresponds to coding all the tx blocks with zero_blk_rate (based on max tx
263   // size possible) in the current block. Eg: For 128*128 block, rate would be
264   // 4 * zero_blk_rate where zero_blk_rate corresponds to coding of one 64x64 tx
265   // block as 'all zeros'
266   ENTROPY_CONTEXT ctxa[MAX_MIB_SIZE];
267   ENTROPY_CONTEXT ctxl[MAX_MIB_SIZE];
268   av1_get_entropy_contexts(bsize, &xd->plane[0], ctxa, ctxl);
269   ENTROPY_CONTEXT *ta = ctxa;
270   ENTROPY_CONTEXT *tl = ctxl;
271   const TX_SIZE txs_ctx = get_txsize_entropy_ctx(tx_size);
272   TXB_CTX txb_ctx;
273   get_txb_ctx(bsize, tx_size, 0, ta, tl, &txb_ctx);
274   const int zero_blk_rate = x->coeff_costs.coeff_costs[txs_ctx][PLANE_TYPE_Y]
275                                 .txb_skip_cost[txb_ctx.txb_skip_ctx][1];
276   rd_stats->rate = zero_blk_rate *
277                    (block_size_wide[bsize] >> tx_size_wide_log2[tx_size]) *
278                    (block_size_high[bsize] >> tx_size_high_log2[tx_size]);
279 }
280 
save_mb_rd_info(int n4,uint32_t hash,const MACROBLOCK * const x,const RD_STATS * const rd_stats,MB_RD_RECORD * mb_rd_record)281 static inline void save_mb_rd_info(int n4, uint32_t hash,
282                                    const MACROBLOCK *const x,
283                                    const RD_STATS *const rd_stats,
284                                    MB_RD_RECORD *mb_rd_record) {
285   int index;
286   if (mb_rd_record->num < RD_RECORD_BUFFER_LEN) {
287     index =
288         (mb_rd_record->index_start + mb_rd_record->num) % RD_RECORD_BUFFER_LEN;
289     ++mb_rd_record->num;
290   } else {
291     index = mb_rd_record->index_start;
292     mb_rd_record->index_start =
293         (mb_rd_record->index_start + 1) % RD_RECORD_BUFFER_LEN;
294   }
295   MB_RD_INFO *const mb_rd_info = &mb_rd_record->mb_rd_info[index];
296   const MACROBLOCKD *const xd = &x->e_mbd;
297   const MB_MODE_INFO *const mbmi = xd->mi[0];
298   mb_rd_info->hash_value = hash;
299   mb_rd_info->tx_size = mbmi->tx_size;
300   memcpy(mb_rd_info->blk_skip, x->txfm_search_info.blk_skip,
301          sizeof(mb_rd_info->blk_skip[0]) * n4);
302   av1_copy(mb_rd_info->inter_tx_size, mbmi->inter_tx_size);
303   av1_copy_array(mb_rd_info->tx_type_map, xd->tx_type_map, n4);
304   mb_rd_info->rd_stats = *rd_stats;
305 }
306 
get_search_init_depth(int mi_width,int mi_height,int is_inter,const SPEED_FEATURES * sf,int tx_size_search_method)307 static int get_search_init_depth(int mi_width, int mi_height, int is_inter,
308                                  const SPEED_FEATURES *sf,
309                                  int tx_size_search_method) {
310   if (tx_size_search_method == USE_LARGESTALL) return MAX_VARTX_DEPTH;
311 
312   if (sf->tx_sf.tx_size_search_lgr_block) {
313     if (mi_width > mi_size_wide[BLOCK_64X64] ||
314         mi_height > mi_size_high[BLOCK_64X64])
315       return MAX_VARTX_DEPTH;
316   }
317 
318   if (is_inter) {
319     return (mi_height != mi_width)
320                ? sf->tx_sf.inter_tx_size_search_init_depth_rect
321                : sf->tx_sf.inter_tx_size_search_init_depth_sqr;
322   } else {
323     return (mi_height != mi_width)
324                ? sf->tx_sf.intra_tx_size_search_init_depth_rect
325                : sf->tx_sf.intra_tx_size_search_init_depth_sqr;
326   }
327 }
328 
329 static inline void select_tx_block(
330     const AV1_COMP *cpi, MACROBLOCK *x, int blk_row, int blk_col, int block,
331     TX_SIZE tx_size, int depth, BLOCK_SIZE plane_bsize, ENTROPY_CONTEXT *ta,
332     ENTROPY_CONTEXT *tl, TXFM_CONTEXT *tx_above, TXFM_CONTEXT *tx_left,
333     RD_STATS *rd_stats, int64_t prev_level_rd, int64_t ref_best_rd,
334     int *is_cost_valid, FAST_TX_SEARCH_MODE ftxs_mode);
335 
336 // NOTE: CONFIG_COLLECT_RD_STATS has 3 possible values
337 // 0: Do not collect any RD stats
338 // 1: Collect RD stats for transform units
339 // 2: Collect RD stats for partition units
340 #if CONFIG_COLLECT_RD_STATS
341 
get_energy_distribution_fine(const AV1_COMP * cpi,BLOCK_SIZE bsize,const uint8_t * src,int src_stride,const uint8_t * dst,int dst_stride,int need_4th,double * hordist,double * verdist)342 static inline void get_energy_distribution_fine(
343     const AV1_COMP *cpi, BLOCK_SIZE bsize, const uint8_t *src, int src_stride,
344     const uint8_t *dst, int dst_stride, int need_4th, double *hordist,
345     double *verdist) {
346   const int bw = block_size_wide[bsize];
347   const int bh = block_size_high[bsize];
348   unsigned int esq[16] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
349 
350   if (bsize < BLOCK_16X16 || (bsize >= BLOCK_4X16 && bsize <= BLOCK_32X8)) {
351     // Special cases: calculate 'esq' values manually, as we don't have 'vf'
352     // functions for the 16 (very small) sub-blocks of this block.
353     const int w_shift = (bw == 4) ? 0 : (bw == 8) ? 1 : (bw == 16) ? 2 : 3;
354     const int h_shift = (bh == 4) ? 0 : (bh == 8) ? 1 : (bh == 16) ? 2 : 3;
355     assert(bw <= 32);
356     assert(bh <= 32);
357     assert(((bw - 1) >> w_shift) + (((bh - 1) >> h_shift) << 2) == 15);
358     if (cpi->common.seq_params->use_highbitdepth) {
359       const uint16_t *src16 = CONVERT_TO_SHORTPTR(src);
360       const uint16_t *dst16 = CONVERT_TO_SHORTPTR(dst);
361       for (int i = 0; i < bh; ++i)
362         for (int j = 0; j < bw; ++j) {
363           const int index = (j >> w_shift) + ((i >> h_shift) << 2);
364           esq[index] +=
365               (src16[j + i * src_stride] - dst16[j + i * dst_stride]) *
366               (src16[j + i * src_stride] - dst16[j + i * dst_stride]);
367         }
368     } else {
369       for (int i = 0; i < bh; ++i)
370         for (int j = 0; j < bw; ++j) {
371           const int index = (j >> w_shift) + ((i >> h_shift) << 2);
372           esq[index] += (src[j + i * src_stride] - dst[j + i * dst_stride]) *
373                         (src[j + i * src_stride] - dst[j + i * dst_stride]);
374         }
375     }
376   } else {  // Calculate 'esq' values using 'vf' functions on the 16 sub-blocks.
377     const int f_index =
378         (bsize < BLOCK_SIZES) ? bsize - BLOCK_16X16 : bsize - BLOCK_8X16;
379     assert(f_index >= 0 && f_index < BLOCK_SIZES_ALL);
380     const BLOCK_SIZE subsize = (BLOCK_SIZE)f_index;
381     assert(block_size_wide[bsize] == 4 * block_size_wide[subsize]);
382     assert(block_size_high[bsize] == 4 * block_size_high[subsize]);
383     cpi->ppi->fn_ptr[subsize].vf(src, src_stride, dst, dst_stride, &esq[0]);
384     cpi->ppi->fn_ptr[subsize].vf(src + bw / 4, src_stride, dst + bw / 4,
385                                  dst_stride, &esq[1]);
386     cpi->ppi->fn_ptr[subsize].vf(src + bw / 2, src_stride, dst + bw / 2,
387                                  dst_stride, &esq[2]);
388     cpi->ppi->fn_ptr[subsize].vf(src + 3 * bw / 4, src_stride, dst + 3 * bw / 4,
389                                  dst_stride, &esq[3]);
390     src += bh / 4 * src_stride;
391     dst += bh / 4 * dst_stride;
392 
393     cpi->ppi->fn_ptr[subsize].vf(src, src_stride, dst, dst_stride, &esq[4]);
394     cpi->ppi->fn_ptr[subsize].vf(src + bw / 4, src_stride, dst + bw / 4,
395                                  dst_stride, &esq[5]);
396     cpi->ppi->fn_ptr[subsize].vf(src + bw / 2, src_stride, dst + bw / 2,
397                                  dst_stride, &esq[6]);
398     cpi->ppi->fn_ptr[subsize].vf(src + 3 * bw / 4, src_stride, dst + 3 * bw / 4,
399                                  dst_stride, &esq[7]);
400     src += bh / 4 * src_stride;
401     dst += bh / 4 * dst_stride;
402 
403     cpi->ppi->fn_ptr[subsize].vf(src, src_stride, dst, dst_stride, &esq[8]);
404     cpi->ppi->fn_ptr[subsize].vf(src + bw / 4, src_stride, dst + bw / 4,
405                                  dst_stride, &esq[9]);
406     cpi->ppi->fn_ptr[subsize].vf(src + bw / 2, src_stride, dst + bw / 2,
407                                  dst_stride, &esq[10]);
408     cpi->ppi->fn_ptr[subsize].vf(src + 3 * bw / 4, src_stride, dst + 3 * bw / 4,
409                                  dst_stride, &esq[11]);
410     src += bh / 4 * src_stride;
411     dst += bh / 4 * dst_stride;
412 
413     cpi->ppi->fn_ptr[subsize].vf(src, src_stride, dst, dst_stride, &esq[12]);
414     cpi->ppi->fn_ptr[subsize].vf(src + bw / 4, src_stride, dst + bw / 4,
415                                  dst_stride, &esq[13]);
416     cpi->ppi->fn_ptr[subsize].vf(src + bw / 2, src_stride, dst + bw / 2,
417                                  dst_stride, &esq[14]);
418     cpi->ppi->fn_ptr[subsize].vf(src + 3 * bw / 4, src_stride, dst + 3 * bw / 4,
419                                  dst_stride, &esq[15]);
420   }
421 
422   double total = (double)esq[0] + esq[1] + esq[2] + esq[3] + esq[4] + esq[5] +
423                  esq[6] + esq[7] + esq[8] + esq[9] + esq[10] + esq[11] +
424                  esq[12] + esq[13] + esq[14] + esq[15];
425   if (total > 0) {
426     const double e_recip = 1.0 / total;
427     hordist[0] = ((double)esq[0] + esq[4] + esq[8] + esq[12]) * e_recip;
428     hordist[1] = ((double)esq[1] + esq[5] + esq[9] + esq[13]) * e_recip;
429     hordist[2] = ((double)esq[2] + esq[6] + esq[10] + esq[14]) * e_recip;
430     if (need_4th) {
431       hordist[3] = ((double)esq[3] + esq[7] + esq[11] + esq[15]) * e_recip;
432     }
433     verdist[0] = ((double)esq[0] + esq[1] + esq[2] + esq[3]) * e_recip;
434     verdist[1] = ((double)esq[4] + esq[5] + esq[6] + esq[7]) * e_recip;
435     verdist[2] = ((double)esq[8] + esq[9] + esq[10] + esq[11]) * e_recip;
436     if (need_4th) {
437       verdist[3] = ((double)esq[12] + esq[13] + esq[14] + esq[15]) * e_recip;
438     }
439   } else {
440     hordist[0] = verdist[0] = 0.25;
441     hordist[1] = verdist[1] = 0.25;
442     hordist[2] = verdist[2] = 0.25;
443     if (need_4th) {
444       hordist[3] = verdist[3] = 0.25;
445     }
446   }
447 }
448 
get_sse_norm(const int16_t * diff,int stride,int w,int h)449 static double get_sse_norm(const int16_t *diff, int stride, int w, int h) {
450   double sum = 0.0;
451   for (int j = 0; j < h; ++j) {
452     for (int i = 0; i < w; ++i) {
453       const int err = diff[j * stride + i];
454       sum += err * err;
455     }
456   }
457   assert(w > 0 && h > 0);
458   return sum / (w * h);
459 }
460 
get_sad_norm(const int16_t * diff,int stride,int w,int h)461 static double get_sad_norm(const int16_t *diff, int stride, int w, int h) {
462   double sum = 0.0;
463   for (int j = 0; j < h; ++j) {
464     for (int i = 0; i < w; ++i) {
465       sum += abs(diff[j * stride + i]);
466     }
467   }
468   assert(w > 0 && h > 0);
469   return sum / (w * h);
470 }
471 
get_2x2_normalized_sses_and_sads(const AV1_COMP * const cpi,BLOCK_SIZE tx_bsize,const uint8_t * const src,int src_stride,const uint8_t * const dst,int dst_stride,const int16_t * const src_diff,int diff_stride,double * const sse_norm_arr,double * const sad_norm_arr)472 static inline void get_2x2_normalized_sses_and_sads(
473     const AV1_COMP *const cpi, BLOCK_SIZE tx_bsize, const uint8_t *const src,
474     int src_stride, const uint8_t *const dst, int dst_stride,
475     const int16_t *const src_diff, int diff_stride, double *const sse_norm_arr,
476     double *const sad_norm_arr) {
477   const BLOCK_SIZE tx_bsize_half =
478       get_partition_subsize(tx_bsize, PARTITION_SPLIT);
479   if (tx_bsize_half == BLOCK_INVALID) {  // manually calculate stats
480     const int half_width = block_size_wide[tx_bsize] / 2;
481     const int half_height = block_size_high[tx_bsize] / 2;
482     for (int row = 0; row < 2; ++row) {
483       for (int col = 0; col < 2; ++col) {
484         const int16_t *const this_src_diff =
485             src_diff + row * half_height * diff_stride + col * half_width;
486         if (sse_norm_arr) {
487           sse_norm_arr[row * 2 + col] =
488               get_sse_norm(this_src_diff, diff_stride, half_width, half_height);
489         }
490         if (sad_norm_arr) {
491           sad_norm_arr[row * 2 + col] =
492               get_sad_norm(this_src_diff, diff_stride, half_width, half_height);
493         }
494       }
495     }
496   } else {  // use function pointers to calculate stats
497     const int half_width = block_size_wide[tx_bsize_half];
498     const int half_height = block_size_high[tx_bsize_half];
499     const int num_samples_half = half_width * half_height;
500     for (int row = 0; row < 2; ++row) {
501       for (int col = 0; col < 2; ++col) {
502         const uint8_t *const this_src =
503             src + row * half_height * src_stride + col * half_width;
504         const uint8_t *const this_dst =
505             dst + row * half_height * dst_stride + col * half_width;
506 
507         if (sse_norm_arr) {
508           unsigned int this_sse;
509           cpi->ppi->fn_ptr[tx_bsize_half].vf(this_src, src_stride, this_dst,
510                                              dst_stride, &this_sse);
511           sse_norm_arr[row * 2 + col] = (double)this_sse / num_samples_half;
512         }
513 
514         if (sad_norm_arr) {
515           const unsigned int this_sad = cpi->ppi->fn_ptr[tx_bsize_half].sdf(
516               this_src, src_stride, this_dst, dst_stride);
517           sad_norm_arr[row * 2 + col] = (double)this_sad / num_samples_half;
518         }
519       }
520     }
521   }
522 }
523 
524 #if CONFIG_COLLECT_RD_STATS == 1
get_mean(const int16_t * diff,int stride,int w,int h)525 static double get_mean(const int16_t *diff, int stride, int w, int h) {
526   double sum = 0.0;
527   for (int j = 0; j < h; ++j) {
528     for (int i = 0; i < w; ++i) {
529       sum += diff[j * stride + i];
530     }
531   }
532   assert(w > 0 && h > 0);
533   return sum / (w * h);
534 }
PrintTransformUnitStats(const AV1_COMP * const cpi,MACROBLOCK * x,const RD_STATS * const rd_stats,int blk_row,int blk_col,BLOCK_SIZE plane_bsize,TX_SIZE tx_size,TX_TYPE tx_type,int64_t rd)535 static inline void PrintTransformUnitStats(
536     const AV1_COMP *const cpi, MACROBLOCK *x, const RD_STATS *const rd_stats,
537     int blk_row, int blk_col, BLOCK_SIZE plane_bsize, TX_SIZE tx_size,
538     TX_TYPE tx_type, int64_t rd) {
539   if (rd_stats->rate == INT_MAX || rd_stats->dist == INT64_MAX) return;
540 
541   // Generate small sample to restrict output size.
542   static unsigned int seed = 21743;
543   if (lcg_rand16(&seed) % 256 > 0) return;
544 
545   const char output_file[] = "tu_stats.txt";
546   FILE *fout = fopen(output_file, "a");
547   if (!fout) return;
548 
549   const BLOCK_SIZE tx_bsize = txsize_to_bsize[tx_size];
550   const MACROBLOCKD *const xd = &x->e_mbd;
551   const int plane = 0;
552   struct macroblock_plane *const p = &x->plane[plane];
553   const struct macroblockd_plane *const pd = &xd->plane[plane];
554   const int txw = tx_size_wide[tx_size];
555   const int txh = tx_size_high[tx_size];
556   const int dequant_shift = (is_cur_buf_hbd(xd)) ? xd->bd - 5 : 3;
557   const int q_step = p->dequant_QTX[1] >> dequant_shift;
558   const int num_samples = txw * txh;
559 
560   const double rate_norm = (double)rd_stats->rate / num_samples;
561   const double dist_norm = (double)rd_stats->dist / num_samples;
562 
563   fprintf(fout, "%g %g", rate_norm, dist_norm);
564 
565   const int src_stride = p->src.stride;
566   const uint8_t *const src =
567       &p->src.buf[(blk_row * src_stride + blk_col) << MI_SIZE_LOG2];
568   const int dst_stride = pd->dst.stride;
569   const uint8_t *const dst =
570       &pd->dst.buf[(blk_row * dst_stride + blk_col) << MI_SIZE_LOG2];
571   unsigned int sse;
572   cpi->ppi->fn_ptr[tx_bsize].vf(src, src_stride, dst, dst_stride, &sse);
573   const double sse_norm = (double)sse / num_samples;
574 
575   const unsigned int sad =
576       cpi->ppi->fn_ptr[tx_bsize].sdf(src, src_stride, dst, dst_stride);
577   const double sad_norm = (double)sad / num_samples;
578 
579   fprintf(fout, " %g %g", sse_norm, sad_norm);
580 
581   const int diff_stride = block_size_wide[plane_bsize];
582   const int16_t *const src_diff =
583       &p->src_diff[(blk_row * diff_stride + blk_col) << MI_SIZE_LOG2];
584 
585   double sse_norm_arr[4], sad_norm_arr[4];
586   get_2x2_normalized_sses_and_sads(cpi, tx_bsize, src, src_stride, dst,
587                                    dst_stride, src_diff, diff_stride,
588                                    sse_norm_arr, sad_norm_arr);
589   for (int i = 0; i < 4; ++i) {
590     fprintf(fout, " %g", sse_norm_arr[i]);
591   }
592   for (int i = 0; i < 4; ++i) {
593     fprintf(fout, " %g", sad_norm_arr[i]);
594   }
595 
596   const TX_TYPE_1D tx_type_1d_row = htx_tab[tx_type];
597   const TX_TYPE_1D tx_type_1d_col = vtx_tab[tx_type];
598 
599   fprintf(fout, " %d %d %d %d %d", q_step, tx_size_wide[tx_size],
600           tx_size_high[tx_size], tx_type_1d_row, tx_type_1d_col);
601 
602   int model_rate;
603   int64_t model_dist;
604   model_rd_sse_fn[MODELRD_CURVFIT](cpi, x, tx_bsize, plane, sse, num_samples,
605                                    &model_rate, &model_dist);
606   const double model_rate_norm = (double)model_rate / num_samples;
607   const double model_dist_norm = (double)model_dist / num_samples;
608   fprintf(fout, " %g %g", model_rate_norm, model_dist_norm);
609 
610   const double mean = get_mean(src_diff, diff_stride, txw, txh);
611   float hor_corr, vert_corr;
612   av1_get_horver_correlation_full(src_diff, diff_stride, txw, txh, &hor_corr,
613                                   &vert_corr);
614   fprintf(fout, " %g %g %g", mean, hor_corr, vert_corr);
615 
616   double hdist[4] = { 0 }, vdist[4] = { 0 };
617   get_energy_distribution_fine(cpi, tx_bsize, src, src_stride, dst, dst_stride,
618                                1, hdist, vdist);
619   fprintf(fout, " %g %g %g %g %g %g %g %g", hdist[0], hdist[1], hdist[2],
620           hdist[3], vdist[0], vdist[1], vdist[2], vdist[3]);
621 
622   fprintf(fout, " %d %" PRId64, x->rdmult, rd);
623 
624   fprintf(fout, "\n");
625   fclose(fout);
626 }
627 #endif  // CONFIG_COLLECT_RD_STATS == 1
628 
629 #if CONFIG_COLLECT_RD_STATS >= 2
get_sse(const AV1_COMP * cpi,const MACROBLOCK * x)630 static int64_t get_sse(const AV1_COMP *cpi, const MACROBLOCK *x) {
631   const AV1_COMMON *cm = &cpi->common;
632   const int num_planes = av1_num_planes(cm);
633   const MACROBLOCKD *xd = &x->e_mbd;
634   const MB_MODE_INFO *mbmi = xd->mi[0];
635   int64_t total_sse = 0;
636   for (int plane = 0; plane < num_planes; ++plane) {
637     const struct macroblock_plane *const p = &x->plane[plane];
638     const struct macroblockd_plane *const pd = &xd->plane[plane];
639     const BLOCK_SIZE bs =
640         get_plane_block_size(mbmi->bsize, pd->subsampling_x, pd->subsampling_y);
641     unsigned int sse;
642 
643     if (plane) continue;
644 
645     cpi->ppi->fn_ptr[bs].vf(p->src.buf, p->src.stride, pd->dst.buf,
646                             pd->dst.stride, &sse);
647     total_sse += sse;
648   }
649   total_sse <<= 4;
650   return total_sse;
651 }
652 
get_est_rate_dist(const TileDataEnc * tile_data,BLOCK_SIZE bsize,int64_t sse,int * est_residue_cost,int64_t * est_dist)653 static int get_est_rate_dist(const TileDataEnc *tile_data, BLOCK_SIZE bsize,
654                              int64_t sse, int *est_residue_cost,
655                              int64_t *est_dist) {
656   const InterModeRdModel *md = &tile_data->inter_mode_rd_models[bsize];
657   if (md->ready) {
658     if (sse < md->dist_mean) {
659       *est_residue_cost = 0;
660       *est_dist = sse;
661     } else {
662       *est_dist = (int64_t)round(md->dist_mean);
663       const double est_ld = md->a * sse + md->b;
664       // Clamp estimated rate cost by INT_MAX / 2.
665       // TODO([email protected]): find better solution than clamping.
666       if (fabs(est_ld) < 1e-2) {
667         *est_residue_cost = INT_MAX / 2;
668       } else {
669         double est_residue_cost_dbl = ((sse - md->dist_mean) / est_ld);
670         if (est_residue_cost_dbl < 0) {
671           *est_residue_cost = 0;
672         } else {
673           *est_residue_cost =
674               (int)AOMMIN((int64_t)round(est_residue_cost_dbl), INT_MAX / 2);
675         }
676       }
677       if (*est_residue_cost <= 0) {
678         *est_residue_cost = 0;
679         *est_dist = sse;
680       }
681     }
682     return 1;
683   }
684   return 0;
685 }
686 
get_highbd_diff_mean(const uint8_t * src8,int src_stride,const uint8_t * dst8,int dst_stride,int w,int h)687 static double get_highbd_diff_mean(const uint8_t *src8, int src_stride,
688                                    const uint8_t *dst8, int dst_stride, int w,
689                                    int h) {
690   const uint16_t *src = CONVERT_TO_SHORTPTR(src8);
691   const uint16_t *dst = CONVERT_TO_SHORTPTR(dst8);
692   double sum = 0.0;
693   for (int j = 0; j < h; ++j) {
694     for (int i = 0; i < w; ++i) {
695       const int diff = src[j * src_stride + i] - dst[j * dst_stride + i];
696       sum += diff;
697     }
698   }
699   assert(w > 0 && h > 0);
700   return sum / (w * h);
701 }
702 
get_diff_mean(const uint8_t * src,int src_stride,const uint8_t * dst,int dst_stride,int w,int h)703 static double get_diff_mean(const uint8_t *src, int src_stride,
704                             const uint8_t *dst, int dst_stride, int w, int h) {
705   double sum = 0.0;
706   for (int j = 0; j < h; ++j) {
707     for (int i = 0; i < w; ++i) {
708       const int diff = src[j * src_stride + i] - dst[j * dst_stride + i];
709       sum += diff;
710     }
711   }
712   assert(w > 0 && h > 0);
713   return sum / (w * h);
714 }
715 
PrintPredictionUnitStats(const AV1_COMP * const cpi,const TileDataEnc * tile_data,MACROBLOCK * x,const RD_STATS * const rd_stats,BLOCK_SIZE plane_bsize)716 static inline void PrintPredictionUnitStats(const AV1_COMP *const cpi,
717                                             const TileDataEnc *tile_data,
718                                             MACROBLOCK *x,
719                                             const RD_STATS *const rd_stats,
720                                             BLOCK_SIZE plane_bsize) {
721   if (rd_stats->rate == INT_MAX || rd_stats->dist == INT64_MAX) return;
722 
723   if (cpi->sf.inter_sf.inter_mode_rd_model_estimation == 1 &&
724       (tile_data == NULL ||
725        !tile_data->inter_mode_rd_models[plane_bsize].ready))
726     return;
727   (void)tile_data;
728   // Generate small sample to restrict output size.
729   static unsigned int seed = 95014;
730 
731   if ((lcg_rand16(&seed) % (1 << (14 - num_pels_log2_lookup[plane_bsize]))) !=
732       1)
733     return;
734 
735   const char output_file[] = "pu_stats.txt";
736   FILE *fout = fopen(output_file, "a");
737   if (!fout) return;
738 
739   MACROBLOCKD *const xd = &x->e_mbd;
740   const int plane = 0;
741   struct macroblock_plane *const p = &x->plane[plane];
742   struct macroblockd_plane *pd = &xd->plane[plane];
743   const int diff_stride = block_size_wide[plane_bsize];
744   int bw, bh;
745   get_txb_dimensions(xd, plane, plane_bsize, 0, 0, plane_bsize, NULL, NULL, &bw,
746                      &bh);
747   const int num_samples = bw * bh;
748   const int dequant_shift = (is_cur_buf_hbd(xd)) ? xd->bd - 5 : 3;
749   const int q_step = p->dequant_QTX[1] >> dequant_shift;
750   const int shift = (xd->bd - 8);
751 
752   const double rate_norm = (double)rd_stats->rate / num_samples;
753   const double dist_norm = (double)rd_stats->dist / num_samples;
754   const double rdcost_norm =
755       (double)RDCOST(x->rdmult, rd_stats->rate, rd_stats->dist) / num_samples;
756 
757   fprintf(fout, "%g %g %g", rate_norm, dist_norm, rdcost_norm);
758 
759   const int src_stride = p->src.stride;
760   const uint8_t *const src = p->src.buf;
761   const int dst_stride = pd->dst.stride;
762   const uint8_t *const dst = pd->dst.buf;
763   const int16_t *const src_diff = p->src_diff;
764 
765   int64_t sse = calculate_sse(xd, p, pd, bw, bh);
766   const double sse_norm = (double)sse / num_samples;
767 
768   const unsigned int sad =
769       cpi->ppi->fn_ptr[plane_bsize].sdf(src, src_stride, dst, dst_stride);
770   const double sad_norm =
771       (double)sad / (1 << num_pels_log2_lookup[plane_bsize]);
772 
773   fprintf(fout, " %g %g", sse_norm, sad_norm);
774 
775   double sse_norm_arr[4], sad_norm_arr[4];
776   get_2x2_normalized_sses_and_sads(cpi, plane_bsize, src, src_stride, dst,
777                                    dst_stride, src_diff, diff_stride,
778                                    sse_norm_arr, sad_norm_arr);
779   if (shift) {
780     for (int k = 0; k < 4; ++k) sse_norm_arr[k] /= (1 << (2 * shift));
781     for (int k = 0; k < 4; ++k) sad_norm_arr[k] /= (1 << shift);
782   }
783   for (int i = 0; i < 4; ++i) {
784     fprintf(fout, " %g", sse_norm_arr[i]);
785   }
786   for (int i = 0; i < 4; ++i) {
787     fprintf(fout, " %g", sad_norm_arr[i]);
788   }
789 
790   fprintf(fout, " %d %d %d %d", q_step, x->rdmult, bw, bh);
791 
792   int model_rate;
793   int64_t model_dist;
794   model_rd_sse_fn[MODELRD_CURVFIT](cpi, x, plane_bsize, plane, sse, num_samples,
795                                    &model_rate, &model_dist);
796   const double model_rdcost_norm =
797       (double)RDCOST(x->rdmult, model_rate, model_dist) / num_samples;
798   const double model_rate_norm = (double)model_rate / num_samples;
799   const double model_dist_norm = (double)model_dist / num_samples;
800   fprintf(fout, " %g %g %g", model_rate_norm, model_dist_norm,
801           model_rdcost_norm);
802 
803   double mean;
804   if (is_cur_buf_hbd(xd)) {
805     mean = get_highbd_diff_mean(p->src.buf, p->src.stride, pd->dst.buf,
806                                 pd->dst.stride, bw, bh);
807   } else {
808     mean = get_diff_mean(p->src.buf, p->src.stride, pd->dst.buf, pd->dst.stride,
809                          bw, bh);
810   }
811   mean /= (1 << shift);
812   float hor_corr, vert_corr;
813   av1_get_horver_correlation_full(src_diff, diff_stride, bw, bh, &hor_corr,
814                                   &vert_corr);
815   fprintf(fout, " %g %g %g", mean, hor_corr, vert_corr);
816 
817   double hdist[4] = { 0 }, vdist[4] = { 0 };
818   get_energy_distribution_fine(cpi, plane_bsize, src, src_stride, dst,
819                                dst_stride, 1, hdist, vdist);
820   fprintf(fout, " %g %g %g %g %g %g %g %g", hdist[0], hdist[1], hdist[2],
821           hdist[3], vdist[0], vdist[1], vdist[2], vdist[3]);
822 
823   if (cpi->sf.inter_sf.inter_mode_rd_model_estimation == 1) {
824     assert(tile_data->inter_mode_rd_models[plane_bsize].ready);
825     const int64_t overall_sse = get_sse(cpi, x);
826     int est_residue_cost = 0;
827     int64_t est_dist = 0;
828     get_est_rate_dist(tile_data, plane_bsize, overall_sse, &est_residue_cost,
829                       &est_dist);
830     const double est_residue_cost_norm = (double)est_residue_cost / num_samples;
831     const double est_dist_norm = (double)est_dist / num_samples;
832     const double est_rdcost_norm =
833         (double)RDCOST(x->rdmult, est_residue_cost, est_dist) / num_samples;
834     fprintf(fout, " %g %g %g", est_residue_cost_norm, est_dist_norm,
835             est_rdcost_norm);
836   }
837 
838   fprintf(fout, "\n");
839   fclose(fout);
840 }
841 #endif  // CONFIG_COLLECT_RD_STATS >= 2
842 #endif  // CONFIG_COLLECT_RD_STATS
843 
inverse_transform_block_facade(MACROBLOCK * const x,int plane,int block,int blk_row,int blk_col,int eob,int reduced_tx_set)844 static inline void inverse_transform_block_facade(MACROBLOCK *const x,
845                                                   int plane, int block,
846                                                   int blk_row, int blk_col,
847                                                   int eob, int reduced_tx_set) {
848   if (!eob) return;
849   struct macroblock_plane *const p = &x->plane[plane];
850   MACROBLOCKD *const xd = &x->e_mbd;
851   tran_low_t *dqcoeff = p->dqcoeff + BLOCK_OFFSET(block);
852   const PLANE_TYPE plane_type = get_plane_type(plane);
853   const TX_SIZE tx_size = av1_get_tx_size(plane, xd);
854   const TX_TYPE tx_type = av1_get_tx_type(xd, plane_type, blk_row, blk_col,
855                                           tx_size, reduced_tx_set);
856 
857   struct macroblockd_plane *const pd = &xd->plane[plane];
858   const int dst_stride = pd->dst.stride;
859   uint8_t *dst = &pd->dst.buf[(blk_row * dst_stride + blk_col) << MI_SIZE_LOG2];
860   av1_inverse_transform_block(xd, dqcoeff, plane, tx_type, tx_size, dst,
861                               dst_stride, eob, reduced_tx_set);
862 }
863 
recon_intra(const AV1_COMP * cpi,MACROBLOCK * x,int plane,int block,int blk_row,int blk_col,BLOCK_SIZE plane_bsize,TX_SIZE tx_size,const TXB_CTX * const txb_ctx,int skip_trellis,TX_TYPE best_tx_type,int do_quant,int * rate_cost,uint16_t best_eob)864 static inline void recon_intra(const AV1_COMP *cpi, MACROBLOCK *x, int plane,
865                                int block, int blk_row, int blk_col,
866                                BLOCK_SIZE plane_bsize, TX_SIZE tx_size,
867                                const TXB_CTX *const txb_ctx, int skip_trellis,
868                                TX_TYPE best_tx_type, int do_quant,
869                                int *rate_cost, uint16_t best_eob) {
870   const AV1_COMMON *cm = &cpi->common;
871   MACROBLOCKD *xd = &x->e_mbd;
872   MB_MODE_INFO *mbmi = xd->mi[0];
873   const int is_inter = is_inter_block(mbmi);
874   if (!is_inter && best_eob &&
875       (blk_row + tx_size_high_unit[tx_size] < mi_size_high[plane_bsize] ||
876        blk_col + tx_size_wide_unit[tx_size] < mi_size_wide[plane_bsize])) {
877     // if the quantized coefficients are stored in the dqcoeff buffer, we don't
878     // need to do transform and quantization again.
879     if (do_quant) {
880       TxfmParam txfm_param_intra;
881       QUANT_PARAM quant_param_intra;
882       av1_setup_xform(cm, x, tx_size, best_tx_type, &txfm_param_intra);
883       av1_setup_quant(tx_size, !skip_trellis,
884                       skip_trellis
885                           ? (USE_B_QUANT_NO_TRELLIS ? AV1_XFORM_QUANT_B
886                                                     : AV1_XFORM_QUANT_FP)
887                           : AV1_XFORM_QUANT_FP,
888                       cpi->oxcf.q_cfg.quant_b_adapt, &quant_param_intra);
889       av1_setup_qmatrix(&cm->quant_params, xd, plane, tx_size, best_tx_type,
890                         &quant_param_intra);
891       av1_xform_quant(x, plane, block, blk_row, blk_col, plane_bsize,
892                       &txfm_param_intra, &quant_param_intra);
893       if (quant_param_intra.use_optimize_b) {
894         av1_optimize_b(cpi, x, plane, block, tx_size, best_tx_type, txb_ctx,
895                        rate_cost);
896       }
897     }
898 
899     inverse_transform_block_facade(x, plane, block, blk_row, blk_col,
900                                    x->plane[plane].eobs[block],
901                                    cm->features.reduced_tx_set_used);
902 
903     // This may happen because of hash collision. The eob stored in the hash
904     // table is non-zero, but the real eob is zero. We need to make sure tx_type
905     // is DCT_DCT in this case.
906     if (plane == 0 && x->plane[plane].eobs[block] == 0 &&
907         best_tx_type != DCT_DCT) {
908       update_txk_array(xd, blk_row, blk_col, tx_size, DCT_DCT);
909     }
910   }
911 }
912 
pixel_dist_visible_only(const AV1_COMP * const cpi,const MACROBLOCK * x,const uint8_t * src,const int src_stride,const uint8_t * dst,const int dst_stride,const BLOCK_SIZE tx_bsize,int txb_rows,int txb_cols,int visible_rows,int visible_cols)913 static unsigned pixel_dist_visible_only(
914     const AV1_COMP *const cpi, const MACROBLOCK *x, const uint8_t *src,
915     const int src_stride, const uint8_t *dst, const int dst_stride,
916     const BLOCK_SIZE tx_bsize, int txb_rows, int txb_cols, int visible_rows,
917     int visible_cols) {
918   unsigned sse;
919 
920   if (txb_rows == visible_rows && txb_cols == visible_cols) {
921     cpi->ppi->fn_ptr[tx_bsize].vf(src, src_stride, dst, dst_stride, &sse);
922     return sse;
923   }
924 
925 #if CONFIG_AV1_HIGHBITDEPTH
926   const MACROBLOCKD *xd = &x->e_mbd;
927   if (is_cur_buf_hbd(xd)) {
928     uint64_t sse64 = aom_highbd_sse_odd_size(src, src_stride, dst, dst_stride,
929                                              visible_cols, visible_rows);
930     return (unsigned int)ROUND_POWER_OF_TWO(sse64, (xd->bd - 8) * 2);
931   }
932 #else
933   (void)x;
934 #endif
935   sse = aom_sse_odd_size(src, src_stride, dst, dst_stride, visible_cols,
936                          visible_rows);
937   return sse;
938 }
939 
940 // Compute the pixel domain distortion from src and dst on all visible 4x4s in
941 // the
942 // transform block.
pixel_dist(const AV1_COMP * const cpi,const MACROBLOCK * x,int plane,const uint8_t * src,const int src_stride,const uint8_t * dst,const int dst_stride,int blk_row,int blk_col,const BLOCK_SIZE plane_bsize,const BLOCK_SIZE tx_bsize)943 static unsigned pixel_dist(const AV1_COMP *const cpi, const MACROBLOCK *x,
944                            int plane, const uint8_t *src, const int src_stride,
945                            const uint8_t *dst, const int dst_stride,
946                            int blk_row, int blk_col,
947                            const BLOCK_SIZE plane_bsize,
948                            const BLOCK_SIZE tx_bsize) {
949   int txb_rows, txb_cols, visible_rows, visible_cols;
950   const MACROBLOCKD *xd = &x->e_mbd;
951 
952   get_txb_dimensions(xd, plane, plane_bsize, blk_row, blk_col, tx_bsize,
953                      &txb_cols, &txb_rows, &visible_cols, &visible_rows);
954   assert(visible_rows > 0);
955   assert(visible_cols > 0);
956 
957   unsigned sse = pixel_dist_visible_only(cpi, x, src, src_stride, dst,
958                                          dst_stride, tx_bsize, txb_rows,
959                                          txb_cols, visible_rows, visible_cols);
960 
961   return sse;
962 }
963 
dist_block_px_domain(const AV1_COMP * cpi,MACROBLOCK * x,int plane,BLOCK_SIZE plane_bsize,int block,int blk_row,int blk_col,TX_SIZE tx_size)964 static inline int64_t dist_block_px_domain(const AV1_COMP *cpi, MACROBLOCK *x,
965                                            int plane, BLOCK_SIZE plane_bsize,
966                                            int block, int blk_row, int blk_col,
967                                            TX_SIZE tx_size) {
968   MACROBLOCKD *const xd = &x->e_mbd;
969   const struct macroblock_plane *const p = &x->plane[plane];
970   const uint16_t eob = p->eobs[block];
971   const BLOCK_SIZE tx_bsize = txsize_to_bsize[tx_size];
972   const int bsw = block_size_wide[tx_bsize];
973   const int bsh = block_size_high[tx_bsize];
974   const int src_stride = x->plane[plane].src.stride;
975   const int dst_stride = xd->plane[plane].dst.stride;
976   // Scale the transform block index to pixel unit.
977   const int src_idx = (blk_row * src_stride + blk_col) << MI_SIZE_LOG2;
978   const int dst_idx = (blk_row * dst_stride + blk_col) << MI_SIZE_LOG2;
979   const uint8_t *src = &x->plane[plane].src.buf[src_idx];
980   const uint8_t *dst = &xd->plane[plane].dst.buf[dst_idx];
981   const tran_low_t *dqcoeff = p->dqcoeff + BLOCK_OFFSET(block);
982 
983   assert(cpi != NULL);
984   assert(tx_size_wide_log2[0] == tx_size_high_log2[0]);
985 
986   uint8_t *recon;
987   DECLARE_ALIGNED(16, uint16_t, recon16[MAX_TX_SQUARE]);
988 
989 #if CONFIG_AV1_HIGHBITDEPTH
990   if (is_cur_buf_hbd(xd)) {
991     recon = CONVERT_TO_BYTEPTR(recon16);
992     aom_highbd_convolve_copy(CONVERT_TO_SHORTPTR(dst), dst_stride,
993                              CONVERT_TO_SHORTPTR(recon), MAX_TX_SIZE, bsw, bsh);
994   } else {
995     recon = (uint8_t *)recon16;
996     aom_convolve_copy(dst, dst_stride, recon, MAX_TX_SIZE, bsw, bsh);
997   }
998 #else
999   recon = (uint8_t *)recon16;
1000   aom_convolve_copy(dst, dst_stride, recon, MAX_TX_SIZE, bsw, bsh);
1001 #endif
1002 
1003   const PLANE_TYPE plane_type = get_plane_type(plane);
1004   TX_TYPE tx_type = av1_get_tx_type(xd, plane_type, blk_row, blk_col, tx_size,
1005                                     cpi->common.features.reduced_tx_set_used);
1006   av1_inverse_transform_block(xd, dqcoeff, plane, tx_type, tx_size, recon,
1007                               MAX_TX_SIZE, eob,
1008                               cpi->common.features.reduced_tx_set_used);
1009 
1010   return 16 * pixel_dist(cpi, x, plane, src, src_stride, recon, MAX_TX_SIZE,
1011                          blk_row, blk_col, plane_bsize, tx_bsize);
1012 }
1013 
1014 // pruning thresholds for prune_txk_type and prune_txk_type_separ
1015 static const int prune_factors[5] = { 200, 200, 120, 80, 40 };  // scale 1000
1016 static const int mul_factors[5] = { 80, 80, 70, 50, 30 };       // scale 100
1017 
1018 // R-D costs are sorted in ascending order.
sort_rd(int64_t rds[],int txk[],int len)1019 static inline void sort_rd(int64_t rds[], int txk[], int len) {
1020   int i, j, k;
1021 
1022   for (i = 1; i <= len - 1; ++i) {
1023     for (j = 0; j < i; ++j) {
1024       if (rds[j] > rds[i]) {
1025         int64_t temprd;
1026         int tempi;
1027 
1028         temprd = rds[i];
1029         tempi = txk[i];
1030 
1031         for (k = i; k > j; k--) {
1032           rds[k] = rds[k - 1];
1033           txk[k] = txk[k - 1];
1034         }
1035 
1036         rds[j] = temprd;
1037         txk[j] = tempi;
1038         break;
1039       }
1040     }
1041   }
1042 }
1043 
av1_block_error_qm(const tran_low_t * coeff,const tran_low_t * dqcoeff,intptr_t block_size,const qm_val_t * qmatrix,const int16_t * scan,int64_t * ssz)1044 static inline int64_t av1_block_error_qm(const tran_low_t *coeff,
1045                                          const tran_low_t *dqcoeff,
1046                                          intptr_t block_size,
1047                                          const qm_val_t *qmatrix,
1048                                          const int16_t *scan, int64_t *ssz) {
1049   int i;
1050   int64_t error = 0, sqcoeff = 0;
1051 
1052   for (i = 0; i < block_size; i++) {
1053     int64_t weight = qmatrix[scan[i]];
1054     int64_t dd = coeff[i] - dqcoeff[i];
1055     dd *= weight;
1056     int64_t cc = coeff[i];
1057     cc *= weight;
1058     // The ranges of coeff and dqcoeff are
1059     //  bd8 : 18 bits (including sign)
1060     //  bd10: 20 bits (including sign)
1061     //  bd12: 22 bits (including sign)
1062     // As AOM_QM_BITS is 5, the intermediate quantities in the calculation
1063     // below should fit in 54 bits, thus no overflow should happen.
1064     error += (dd * dd + (1 << (2 * AOM_QM_BITS - 1))) >> (2 * AOM_QM_BITS);
1065     sqcoeff += (cc * cc + (1 << (2 * AOM_QM_BITS - 1))) >> (2 * AOM_QM_BITS);
1066   }
1067 
1068   *ssz = sqcoeff;
1069   return error;
1070 }
1071 
dist_block_tx_domain(MACROBLOCK * x,int plane,int block,TX_SIZE tx_size,const qm_val_t * qmatrix,const int16_t * scan,int64_t * out_dist,int64_t * out_sse)1072 static inline void dist_block_tx_domain(MACROBLOCK *x, int plane, int block,
1073                                         TX_SIZE tx_size,
1074                                         const qm_val_t *qmatrix,
1075                                         const int16_t *scan, int64_t *out_dist,
1076                                         int64_t *out_sse) {
1077   const struct macroblock_plane *const p = &x->plane[plane];
1078   // Transform domain distortion computation is more efficient as it does
1079   // not involve an inverse transform, but it is less accurate.
1080   const int buffer_length = av1_get_max_eob(tx_size);
1081   int64_t this_sse;
1082   // TX-domain results need to shift down to Q2/D10 to match pixel
1083   // domain distortion values which are in Q2^2
1084   int shift = (MAX_TX_SCALE - av1_get_tx_scale(tx_size)) * 2;
1085   const int block_offset = BLOCK_OFFSET(block);
1086   tran_low_t *const coeff = p->coeff + block_offset;
1087   tran_low_t *const dqcoeff = p->dqcoeff + block_offset;
1088 #if CONFIG_AV1_HIGHBITDEPTH
1089   MACROBLOCKD *const xd = &x->e_mbd;
1090   if (is_cur_buf_hbd(xd)) {
1091     // TODO(veluca): handle use_qm_dist_metric for HBD too.
1092     *out_dist = av1_highbd_block_error(coeff, dqcoeff, buffer_length, &this_sse,
1093                                        xd->bd);
1094   } else {
1095 #endif
1096     if (qmatrix == NULL || !x->txfm_search_params.use_qm_dist_metric) {
1097       *out_dist = av1_block_error(coeff, dqcoeff, buffer_length, &this_sse);
1098     } else {
1099       *out_dist = av1_block_error_qm(coeff, dqcoeff, buffer_length, qmatrix,
1100                                      scan, &this_sse);
1101     }
1102 #if CONFIG_AV1_HIGHBITDEPTH
1103   }
1104 #endif
1105 
1106   *out_dist = RIGHT_SIGNED_SHIFT(*out_dist, shift);
1107   *out_sse = RIGHT_SIGNED_SHIFT(this_sse, shift);
1108 }
1109 
prune_txk_type_separ(const AV1_COMP * cpi,MACROBLOCK * x,int plane,int block,TX_SIZE tx_size,int blk_row,int blk_col,BLOCK_SIZE plane_bsize,int * txk_map,int16_t allowed_tx_mask,int prune_factor,const TXB_CTX * const txb_ctx,int reduced_tx_set_used,int64_t ref_best_rd,int num_sel)1110 static uint16_t prune_txk_type_separ(
1111     const AV1_COMP *cpi, MACROBLOCK *x, int plane, int block, TX_SIZE tx_size,
1112     int blk_row, int blk_col, BLOCK_SIZE plane_bsize, int *txk_map,
1113     int16_t allowed_tx_mask, int prune_factor, const TXB_CTX *const txb_ctx,
1114     int reduced_tx_set_used, int64_t ref_best_rd, int num_sel) {
1115   const AV1_COMMON *cm = &cpi->common;
1116   MACROBLOCKD *xd = &x->e_mbd;
1117 
1118   int idx;
1119 
1120   int64_t rds_v[4];
1121   int64_t rds_h[4];
1122   int idx_v[4] = { 0, 1, 2, 3 };
1123   int idx_h[4] = { 0, 1, 2, 3 };
1124   int skip_v[4] = { 0 };
1125   int skip_h[4] = { 0 };
1126   const int idx_map[16] = {
1127     DCT_DCT,      DCT_ADST,      DCT_FLIPADST,      V_DCT,
1128     ADST_DCT,     ADST_ADST,     ADST_FLIPADST,     V_ADST,
1129     FLIPADST_DCT, FLIPADST_ADST, FLIPADST_FLIPADST, V_FLIPADST,
1130     H_DCT,        H_ADST,        H_FLIPADST,        IDTX
1131   };
1132 
1133   const int sel_pattern_v[16] = {
1134     0, 0, 1, 1, 0, 2, 1, 2, 2, 0, 3, 1, 3, 2, 3, 3
1135   };
1136   const int sel_pattern_h[16] = {
1137     0, 1, 0, 1, 2, 0, 2, 1, 2, 3, 0, 3, 1, 3, 2, 3
1138   };
1139 
1140   QUANT_PARAM quant_param;
1141   TxfmParam txfm_param;
1142   av1_setup_xform(cm, x, tx_size, DCT_DCT, &txfm_param);
1143   av1_setup_quant(tx_size, 1, AV1_XFORM_QUANT_B, cpi->oxcf.q_cfg.quant_b_adapt,
1144                   &quant_param);
1145   int tx_type;
1146   // to ensure we can try ones even outside of ext_tx_set of current block
1147   // this function should only be called for size < 16
1148   assert(txsize_sqr_up_map[tx_size] <= TX_16X16);
1149   txfm_param.tx_set_type = EXT_TX_SET_ALL16;
1150 
1151   int rate_cost = 0;
1152   int64_t dist = 0, sse = 0;
1153   // evaluate horizontal with vertical DCT
1154   for (idx = 0; idx < 4; ++idx) {
1155     tx_type = idx_map[idx];
1156     txfm_param.tx_type = tx_type;
1157 
1158     av1_setup_qmatrix(&cm->quant_params, xd, plane, tx_size, tx_type,
1159                       &quant_param);
1160 
1161     av1_xform_quant(x, plane, block, blk_row, blk_col, plane_bsize, &txfm_param,
1162                     &quant_param);
1163 
1164     const SCAN_ORDER *const scan_order =
1165         get_scan(txfm_param.tx_size, txfm_param.tx_type);
1166     dist_block_tx_domain(x, plane, block, tx_size, quant_param.qmatrix,
1167                          scan_order->scan, &dist, &sse);
1168 
1169     rate_cost = av1_cost_coeffs_txb_laplacian(x, plane, block, tx_size, tx_type,
1170                                               txb_ctx, reduced_tx_set_used, 0);
1171 
1172     rds_h[idx] = RDCOST(x->rdmult, rate_cost, dist);
1173 
1174     if ((rds_h[idx] - (rds_h[idx] >> 2)) > ref_best_rd) {
1175       skip_h[idx] = 1;
1176     }
1177   }
1178   sort_rd(rds_h, idx_h, 4);
1179   for (idx = 1; idx < 4; idx++) {
1180     if (rds_h[idx] > rds_h[0] * 1.2) skip_h[idx_h[idx]] = 1;
1181   }
1182 
1183   if (skip_h[idx_h[0]]) return (uint16_t)0xFFFF;
1184 
1185   // evaluate vertical with the best horizontal chosen
1186   rds_v[0] = rds_h[0];
1187   int start_v = 1, end_v = 4;
1188   const int *idx_map_v = idx_map + idx_h[0];
1189 
1190   for (idx = start_v; idx < end_v; ++idx) {
1191     tx_type = idx_map_v[idx_v[idx] * 4];
1192     txfm_param.tx_type = tx_type;
1193 
1194     av1_setup_qmatrix(&cm->quant_params, xd, plane, tx_size, tx_type,
1195                       &quant_param);
1196 
1197     av1_xform_quant(x, plane, block, blk_row, blk_col, plane_bsize, &txfm_param,
1198                     &quant_param);
1199 
1200     const SCAN_ORDER *const scan_order =
1201         get_scan(txfm_param.tx_size, txfm_param.tx_type);
1202     dist_block_tx_domain(x, plane, block, tx_size, quant_param.qmatrix,
1203                          scan_order->scan, &dist, &sse);
1204 
1205     rate_cost = av1_cost_coeffs_txb_laplacian(x, plane, block, tx_size, tx_type,
1206                                               txb_ctx, reduced_tx_set_used, 0);
1207 
1208     rds_v[idx] = RDCOST(x->rdmult, rate_cost, dist);
1209 
1210     if ((rds_v[idx] - (rds_v[idx] >> 2)) > ref_best_rd) {
1211       skip_v[idx] = 1;
1212     }
1213   }
1214   sort_rd(rds_v, idx_v, 4);
1215   for (idx = 1; idx < 4; idx++) {
1216     if (rds_v[idx] > rds_v[0] * 1.2) skip_v[idx_v[idx]] = 1;
1217   }
1218 
1219   // combine rd_h and rd_v to prune tx candidates
1220   int i_v, i_h;
1221   int64_t rds[16];
1222   int num_cand = 0, last = TX_TYPES - 1;
1223 
1224   for (int i = 0; i < 16; i++) {
1225     i_v = sel_pattern_v[i];
1226     i_h = sel_pattern_h[i];
1227     tx_type = idx_map[idx_v[i_v] * 4 + idx_h[i_h]];
1228     if (!(allowed_tx_mask & (1 << tx_type)) || skip_h[idx_h[i_h]] ||
1229         skip_v[idx_v[i_v]]) {
1230       txk_map[last] = tx_type;
1231       last--;
1232     } else {
1233       txk_map[num_cand] = tx_type;
1234       rds[num_cand] = rds_v[i_v] + rds_h[i_h];
1235       if (rds[num_cand] == 0) rds[num_cand] = 1;
1236       num_cand++;
1237     }
1238   }
1239   sort_rd(rds, txk_map, num_cand);
1240 
1241   uint16_t prune = (uint16_t)(~(1 << txk_map[0]));
1242   num_sel = AOMMIN(num_sel, num_cand);
1243 
1244   for (int i = 1; i < num_sel; i++) {
1245     int64_t factor = 1800 * (rds[i] - rds[0]) / (rds[0]);
1246     if (factor < (int64_t)prune_factor)
1247       prune &= ~(1 << txk_map[i]);
1248     else
1249       break;
1250   }
1251   return prune;
1252 }
1253 
prune_txk_type(const AV1_COMP * cpi,MACROBLOCK * x,int plane,int block,TX_SIZE tx_size,int blk_row,int blk_col,BLOCK_SIZE plane_bsize,int * txk_map,uint16_t allowed_tx_mask,int prune_factor,const TXB_CTX * const txb_ctx,int reduced_tx_set_used)1254 static uint16_t prune_txk_type(const AV1_COMP *cpi, MACROBLOCK *x, int plane,
1255                                int block, TX_SIZE tx_size, int blk_row,
1256                                int blk_col, BLOCK_SIZE plane_bsize,
1257                                int *txk_map, uint16_t allowed_tx_mask,
1258                                int prune_factor, const TXB_CTX *const txb_ctx,
1259                                int reduced_tx_set_used) {
1260   const AV1_COMMON *cm = &cpi->common;
1261   MACROBLOCKD *xd = &x->e_mbd;
1262   int tx_type;
1263 
1264   int64_t rds[TX_TYPES];
1265 
1266   int num_cand = 0;
1267   int last = TX_TYPES - 1;
1268 
1269   TxfmParam txfm_param;
1270   QUANT_PARAM quant_param;
1271   av1_setup_xform(cm, x, tx_size, DCT_DCT, &txfm_param);
1272   av1_setup_quant(tx_size, 1, AV1_XFORM_QUANT_B, cpi->oxcf.q_cfg.quant_b_adapt,
1273                   &quant_param);
1274 
1275   for (int idx = 0; idx < TX_TYPES; idx++) {
1276     tx_type = idx;
1277     int rate_cost = 0;
1278     int64_t dist = 0, sse = 0;
1279     if (!(allowed_tx_mask & (1 << tx_type))) {
1280       txk_map[last] = tx_type;
1281       last--;
1282       continue;
1283     }
1284     txfm_param.tx_type = tx_type;
1285 
1286     av1_setup_qmatrix(&cm->quant_params, xd, plane, tx_size, tx_type,
1287                       &quant_param);
1288 
1289     // do txfm and quantization
1290     av1_xform_quant(x, plane, block, blk_row, blk_col, plane_bsize, &txfm_param,
1291                     &quant_param);
1292     // estimate rate cost
1293     rate_cost = av1_cost_coeffs_txb_laplacian(x, plane, block, tx_size, tx_type,
1294                                               txb_ctx, reduced_tx_set_used, 0);
1295     // tx domain dist
1296     const SCAN_ORDER *const scan_order =
1297         get_scan(txfm_param.tx_size, txfm_param.tx_type);
1298     dist_block_tx_domain(x, plane, block, tx_size, quant_param.qmatrix,
1299                          scan_order->scan, &dist, &sse);
1300 
1301     txk_map[num_cand] = tx_type;
1302     rds[num_cand] = RDCOST(x->rdmult, rate_cost, dist);
1303     if (rds[num_cand] == 0) rds[num_cand] = 1;
1304     num_cand++;
1305   }
1306 
1307   if (num_cand == 0) return (uint16_t)0xFFFF;
1308 
1309   sort_rd(rds, txk_map, num_cand);
1310   uint16_t prune = (uint16_t)(~(1 << txk_map[0]));
1311 
1312   // 0 < prune_factor <= 1000 controls aggressiveness
1313   int64_t factor = 0;
1314   for (int idx = 1; idx < num_cand; idx++) {
1315     factor = 1000 * (rds[idx] - rds[0]) / rds[0];
1316     if (factor < (int64_t)prune_factor)
1317       prune &= ~(1 << txk_map[idx]);
1318     else
1319       break;
1320   }
1321   return prune;
1322 }
1323 
1324 // These thresholds were calibrated to provide a certain number of TX types
1325 // pruned by the model on average, i.e. selecting a threshold with index i
1326 // will lead to pruning i+1 TX types on average
1327 static const float *prune_2D_adaptive_thresholds[] = {
1328   // TX_4X4
1329   (float[]){ 0.00549f, 0.01306f, 0.02039f, 0.02747f, 0.03406f, 0.04065f,
1330              0.04724f, 0.05383f, 0.06067f, 0.06799f, 0.07605f, 0.08533f,
1331              0.09778f, 0.11780f },
1332   // TX_8X8
1333   (float[]){ 0.00037f, 0.00183f, 0.00525f, 0.01038f, 0.01697f, 0.02502f,
1334              0.03381f, 0.04333f, 0.05286f, 0.06287f, 0.07434f, 0.08850f,
1335              0.10803f, 0.14124f },
1336   // TX_16X16
1337   (float[]){ 0.01404f, 0.02000f, 0.04211f, 0.05164f, 0.05798f, 0.06335f,
1338              0.06897f, 0.07629f, 0.08875f, 0.11169f },
1339   // TX_32X32
1340   NULL,
1341   // TX_64X64
1342   NULL,
1343   // TX_4X8
1344   (float[]){ 0.00183f, 0.00745f, 0.01428f, 0.02185f, 0.02966f, 0.03723f,
1345              0.04456f, 0.05188f, 0.05920f, 0.06702f, 0.07605f, 0.08704f,
1346              0.10168f, 0.12585f },
1347   // TX_8X4
1348   (float[]){ 0.00085f, 0.00476f, 0.01135f, 0.01892f, 0.02698f, 0.03528f,
1349              0.04358f, 0.05164f, 0.05994f, 0.06848f, 0.07849f, 0.09021f,
1350              0.10583f, 0.13123f },
1351   // TX_8X16
1352   (float[]){ 0.00037f, 0.00232f, 0.00671f, 0.01257f, 0.01965f, 0.02722f,
1353              0.03552f, 0.04382f, 0.05237f, 0.06189f, 0.07336f, 0.08728f,
1354              0.10730f, 0.14221f },
1355   // TX_16X8
1356   (float[]){ 0.00061f, 0.00330f, 0.00818f, 0.01453f, 0.02185f, 0.02966f,
1357              0.03772f, 0.04578f, 0.05383f, 0.06262f, 0.07288f, 0.08582f,
1358              0.10339f, 0.13464f },
1359   // TX_16X32
1360   NULL,
1361   // TX_32X16
1362   NULL,
1363   // TX_32X64
1364   NULL,
1365   // TX_64X32
1366   NULL,
1367   // TX_4X16
1368   (float[]){ 0.00232f, 0.00671f, 0.01257f, 0.01941f, 0.02673f, 0.03430f,
1369              0.04211f, 0.04968f, 0.05750f, 0.06580f, 0.07507f, 0.08655f,
1370              0.10242f, 0.12878f },
1371   // TX_16X4
1372   (float[]){ 0.00110f, 0.00525f, 0.01208f, 0.01990f, 0.02795f, 0.03601f,
1373              0.04358f, 0.05115f, 0.05896f, 0.06702f, 0.07629f, 0.08752f,
1374              0.10217f, 0.12610f },
1375   // TX_8X32
1376   NULL,
1377   // TX_32X8
1378   NULL,
1379   // TX_16X64
1380   NULL,
1381   // TX_64X16
1382   NULL,
1383 };
1384 
get_adaptive_thresholds(TX_SIZE tx_size,TxSetType tx_set_type,TX_TYPE_PRUNE_MODE prune_2d_txfm_mode)1385 static inline float get_adaptive_thresholds(
1386     TX_SIZE tx_size, TxSetType tx_set_type,
1387     TX_TYPE_PRUNE_MODE prune_2d_txfm_mode) {
1388   const int prune_aggr_table[5][2] = {
1389     { 4, 1 }, { 6, 3 }, { 9, 6 }, { 9, 6 }, { 12, 9 }
1390   };
1391   int pruning_aggressiveness = 0;
1392   if (tx_set_type == EXT_TX_SET_ALL16)
1393     pruning_aggressiveness =
1394         prune_aggr_table[prune_2d_txfm_mode - TX_TYPE_PRUNE_1][0];
1395   else if (tx_set_type == EXT_TX_SET_DTT9_IDTX_1DDCT)
1396     pruning_aggressiveness =
1397         prune_aggr_table[prune_2d_txfm_mode - TX_TYPE_PRUNE_1][1];
1398 
1399   return prune_2D_adaptive_thresholds[tx_size][pruning_aggressiveness];
1400 }
1401 
get_energy_distribution_finer(const int16_t * diff,int stride,int bw,int bh,float * hordist,float * verdist)1402 static inline void get_energy_distribution_finer(const int16_t *diff,
1403                                                  int stride, int bw, int bh,
1404                                                  float *hordist,
1405                                                  float *verdist) {
1406   // First compute downscaled block energy values (esq); downscale factors
1407   // are defined by w_shift and h_shift.
1408   unsigned int esq[256];
1409   const int w_shift = bw <= 8 ? 0 : 1;
1410   const int h_shift = bh <= 8 ? 0 : 1;
1411   const int esq_w = bw >> w_shift;
1412   const int esq_h = bh >> h_shift;
1413   const int esq_sz = esq_w * esq_h;
1414   int i, j;
1415   memset(esq, 0, esq_sz * sizeof(esq[0]));
1416   if (w_shift) {
1417     for (i = 0; i < bh; i++) {
1418       unsigned int *cur_esq_row = esq + (i >> h_shift) * esq_w;
1419       const int16_t *cur_diff_row = diff + i * stride;
1420       for (j = 0; j < bw; j += 2) {
1421         cur_esq_row[j >> 1] += (cur_diff_row[j] * cur_diff_row[j] +
1422                                 cur_diff_row[j + 1] * cur_diff_row[j + 1]);
1423       }
1424     }
1425   } else {
1426     for (i = 0; i < bh; i++) {
1427       unsigned int *cur_esq_row = esq + (i >> h_shift) * esq_w;
1428       const int16_t *cur_diff_row = diff + i * stride;
1429       for (j = 0; j < bw; j++) {
1430         cur_esq_row[j] += cur_diff_row[j] * cur_diff_row[j];
1431       }
1432     }
1433   }
1434 
1435   uint64_t total = 0;
1436   for (i = 0; i < esq_sz; i++) total += esq[i];
1437 
1438   // Output hordist and verdist arrays are normalized 1D projections of esq
1439   if (total == 0) {
1440     float hor_val = 1.0f / esq_w;
1441     for (j = 0; j < esq_w - 1; j++) hordist[j] = hor_val;
1442     float ver_val = 1.0f / esq_h;
1443     for (i = 0; i < esq_h - 1; i++) verdist[i] = ver_val;
1444     return;
1445   }
1446 
1447   const float e_recip = 1.0f / (float)total;
1448   memset(hordist, 0, (esq_w - 1) * sizeof(hordist[0]));
1449   memset(verdist, 0, (esq_h - 1) * sizeof(verdist[0]));
1450   const unsigned int *cur_esq_row;
1451   for (i = 0; i < esq_h - 1; i++) {
1452     cur_esq_row = esq + i * esq_w;
1453     for (j = 0; j < esq_w - 1; j++) {
1454       hordist[j] += (float)cur_esq_row[j];
1455       verdist[i] += (float)cur_esq_row[j];
1456     }
1457     verdist[i] += (float)cur_esq_row[j];
1458   }
1459   cur_esq_row = esq + i * esq_w;
1460   for (j = 0; j < esq_w - 1; j++) hordist[j] += (float)cur_esq_row[j];
1461 
1462   for (j = 0; j < esq_w - 1; j++) hordist[j] *= e_recip;
1463   for (i = 0; i < esq_h - 1; i++) verdist[i] *= e_recip;
1464 }
1465 
check_bit_mask(uint16_t mask,int val)1466 static inline bool check_bit_mask(uint16_t mask, int val) {
1467   return mask & (1 << val);
1468 }
1469 
set_bit_mask(uint16_t * mask,int val)1470 static inline void set_bit_mask(uint16_t *mask, int val) {
1471   *mask |= (1 << val);
1472 }
1473 
unset_bit_mask(uint16_t * mask,int val)1474 static inline void unset_bit_mask(uint16_t *mask, int val) {
1475   *mask &= ~(1 << val);
1476 }
1477 
prune_tx_2D(MACROBLOCK * x,BLOCK_SIZE bsize,TX_SIZE tx_size,int blk_row,int blk_col,TxSetType tx_set_type,TX_TYPE_PRUNE_MODE prune_2d_txfm_mode,int * txk_map,uint16_t * allowed_tx_mask)1478 static void prune_tx_2D(MACROBLOCK *x, BLOCK_SIZE bsize, TX_SIZE tx_size,
1479                         int blk_row, int blk_col, TxSetType tx_set_type,
1480                         TX_TYPE_PRUNE_MODE prune_2d_txfm_mode, int *txk_map,
1481                         uint16_t *allowed_tx_mask) {
1482   // This table is used because the search order is different from the enum
1483   // order.
1484   static const int tx_type_table_2D[16] = {
1485     DCT_DCT,      DCT_ADST,      DCT_FLIPADST,      V_DCT,
1486     ADST_DCT,     ADST_ADST,     ADST_FLIPADST,     V_ADST,
1487     FLIPADST_DCT, FLIPADST_ADST, FLIPADST_FLIPADST, V_FLIPADST,
1488     H_DCT,        H_ADST,        H_FLIPADST,        IDTX
1489   };
1490   if (tx_set_type != EXT_TX_SET_ALL16 &&
1491       tx_set_type != EXT_TX_SET_DTT9_IDTX_1DDCT)
1492     return;
1493 #if CONFIG_NN_V2
1494   NN_CONFIG_V2 *nn_config_hor = av1_tx_type_nnconfig_map_hor[tx_size];
1495   NN_CONFIG_V2 *nn_config_ver = av1_tx_type_nnconfig_map_ver[tx_size];
1496 #else
1497   const NN_CONFIG *nn_config_hor = av1_tx_type_nnconfig_map_hor[tx_size];
1498   const NN_CONFIG *nn_config_ver = av1_tx_type_nnconfig_map_ver[tx_size];
1499 #endif
1500   if (!nn_config_hor || !nn_config_ver) return;  // Model not established yet.
1501 
1502   float hfeatures[16], vfeatures[16];
1503   float hscores[4], vscores[4];
1504   float scores_2D_raw[16];
1505   const int bw = tx_size_wide[tx_size];
1506   const int bh = tx_size_high[tx_size];
1507   const int hfeatures_num = bw <= 8 ? bw : bw / 2;
1508   const int vfeatures_num = bh <= 8 ? bh : bh / 2;
1509   assert(hfeatures_num <= 16);
1510   assert(vfeatures_num <= 16);
1511 
1512   const struct macroblock_plane *const p = &x->plane[0];
1513   const int diff_stride = block_size_wide[bsize];
1514   const int16_t *diff = p->src_diff + 4 * blk_row * diff_stride + 4 * blk_col;
1515   get_energy_distribution_finer(diff, diff_stride, bw, bh, hfeatures,
1516                                 vfeatures);
1517 
1518   av1_get_horver_correlation_full(diff, diff_stride, bw, bh,
1519                                   &hfeatures[hfeatures_num - 1],
1520                                   &vfeatures[vfeatures_num - 1]);
1521 
1522 #if CONFIG_NN_V2
1523   av1_nn_predict_v2(hfeatures, nn_config_hor, 0, hscores);
1524   av1_nn_predict_v2(vfeatures, nn_config_ver, 0, vscores);
1525 #else
1526   av1_nn_predict(hfeatures, nn_config_hor, 1, hscores);
1527   av1_nn_predict(vfeatures, nn_config_ver, 1, vscores);
1528 #endif
1529 
1530   for (int i = 0; i < 4; i++) {
1531     float *cur_scores_2D = scores_2D_raw + i * 4;
1532     cur_scores_2D[0] = vscores[i] * hscores[0];
1533     cur_scores_2D[1] = vscores[i] * hscores[1];
1534     cur_scores_2D[2] = vscores[i] * hscores[2];
1535     cur_scores_2D[3] = vscores[i] * hscores[3];
1536   }
1537 
1538   assert(TX_TYPES == 16);
1539   // This version of the function only works when there are at most 16 classes.
1540   // So we will need to change the optimization or use av1_nn_softmax instead if
1541   // this ever gets changed.
1542   av1_nn_fast_softmax_16(scores_2D_raw, scores_2D_raw);
1543 
1544   const float score_thresh =
1545       get_adaptive_thresholds(tx_size, tx_set_type, prune_2d_txfm_mode);
1546 
1547   // Always keep the TX type with the highest score, prune all others with
1548   // score below score_thresh.
1549   int max_score_i = 0;
1550   float max_score = 0.0f;
1551   uint16_t allow_bitmask = 0;
1552   float sum_score = 0.0;
1553   // Calculate sum of allowed tx type score and Populate allow bit mask based
1554   // on score_thresh and allowed_tx_mask
1555   int allow_count = 0;
1556   int tx_type_allowed[16] = { TX_TYPE_INVALID, TX_TYPE_INVALID, TX_TYPE_INVALID,
1557                               TX_TYPE_INVALID, TX_TYPE_INVALID, TX_TYPE_INVALID,
1558                               TX_TYPE_INVALID, TX_TYPE_INVALID, TX_TYPE_INVALID,
1559                               TX_TYPE_INVALID, TX_TYPE_INVALID, TX_TYPE_INVALID,
1560                               TX_TYPE_INVALID, TX_TYPE_INVALID, TX_TYPE_INVALID,
1561                               TX_TYPE_INVALID };
1562   float scores_2D[16] = {
1563     -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
1564   };
1565   for (int tx_idx = 0; tx_idx < TX_TYPES; tx_idx++) {
1566     const int allow_tx_type =
1567         check_bit_mask(*allowed_tx_mask, tx_type_table_2D[tx_idx]);
1568     if (!allow_tx_type) {
1569       continue;
1570     }
1571     if (scores_2D_raw[tx_idx] > max_score) {
1572       max_score = scores_2D_raw[tx_idx];
1573       max_score_i = tx_idx;
1574     }
1575     if (scores_2D_raw[tx_idx] >= score_thresh) {
1576       // Set allow mask based on score_thresh
1577       set_bit_mask(&allow_bitmask, tx_type_table_2D[tx_idx]);
1578 
1579       // Accumulate score of allowed tx type
1580       sum_score += scores_2D_raw[tx_idx];
1581 
1582       scores_2D[allow_count] = scores_2D_raw[tx_idx];
1583       tx_type_allowed[allow_count] = tx_type_table_2D[tx_idx];
1584       allow_count += 1;
1585     }
1586   }
1587   if (!check_bit_mask(allow_bitmask, tx_type_table_2D[max_score_i])) {
1588     // If even the tx_type with max score is pruned, this means that no other
1589     // tx_type is feasible. When this happens, we force enable max_score_i and
1590     // end the search.
1591     set_bit_mask(&allow_bitmask, tx_type_table_2D[max_score_i]);
1592     memcpy(txk_map, tx_type_table_2D, sizeof(tx_type_table_2D));
1593     *allowed_tx_mask = allow_bitmask;
1594     return;
1595   }
1596 
1597   // Sort tx type probability of all types
1598   if (allow_count <= 8) {
1599     av1_sort_fi32_8(scores_2D, tx_type_allowed);
1600   } else {
1601     av1_sort_fi32_16(scores_2D, tx_type_allowed);
1602   }
1603 
1604   // Enable more pruning based on tx type probability and number of allowed tx
1605   // types
1606   if (prune_2d_txfm_mode >= TX_TYPE_PRUNE_4) {
1607     float temp_score = 0.0;
1608     float score_ratio = 0.0;
1609     int tx_idx, tx_count = 0;
1610     const float inv_sum_score = 100 / sum_score;
1611     // Get allowed tx types based on sorted probability score and tx count
1612     for (tx_idx = 0; tx_idx < allow_count; tx_idx++) {
1613       // Skip the tx type which has more than 30% of cumulative
1614       // probability and allowed tx type count is more than 2
1615       if (score_ratio > 30.0 && tx_count >= 2) break;
1616 
1617       assert(check_bit_mask(allow_bitmask, tx_type_allowed[tx_idx]));
1618       // Calculate cumulative probability
1619       temp_score += scores_2D[tx_idx];
1620 
1621       // Calculate percentage of cumulative probability of allowed tx type
1622       score_ratio = temp_score * inv_sum_score;
1623       tx_count++;
1624     }
1625     // Set remaining tx types as pruned
1626     for (; tx_idx < allow_count; tx_idx++)
1627       unset_bit_mask(&allow_bitmask, tx_type_allowed[tx_idx]);
1628   }
1629 
1630   memcpy(txk_map, tx_type_allowed, sizeof(tx_type_table_2D));
1631   *allowed_tx_mask = allow_bitmask;
1632 }
1633 
get_dev(float mean,double x2_sum,int num)1634 static float get_dev(float mean, double x2_sum, int num) {
1635   const float e_x2 = (float)(x2_sum / num);
1636   const float diff = e_x2 - mean * mean;
1637   const float dev = (diff > 0) ? sqrtf(diff) : 0;
1638   return dev;
1639 }
1640 
1641 // Writes the features required by the ML model to predict tx split based on
1642 // mean and standard deviation values of the block and sub-blocks.
1643 // Returns the number of elements written to the output array which is at most
1644 // 12 currently. Hence 'features' buffer should be able to accommodate at least
1645 // 12 elements.
get_mean_dev_features(const int16_t * data,int stride,int bw,int bh,float * features)1646 static inline int get_mean_dev_features(const int16_t *data, int stride, int bw,
1647                                         int bh, float *features) {
1648   const int16_t *const data_ptr = &data[0];
1649   const int subh = (bh >= bw) ? (bh >> 1) : bh;
1650   const int subw = (bw >= bh) ? (bw >> 1) : bw;
1651   const int num = bw * bh;
1652   const int sub_num = subw * subh;
1653   int feature_idx = 2;
1654   int total_x_sum = 0;
1655   int64_t total_x2_sum = 0;
1656   int num_sub_blks = 0;
1657   double mean2_sum = 0.0f;
1658   float dev_sum = 0.0f;
1659 
1660   for (int row = 0; row < bh; row += subh) {
1661     for (int col = 0; col < bw; col += subw) {
1662       int x_sum;
1663       int64_t x2_sum;
1664       // TODO(any): Write a SIMD version. Clear registers.
1665       aom_get_blk_sse_sum(data_ptr + row * stride + col, stride, subw, subh,
1666                           &x_sum, &x2_sum);
1667       total_x_sum += x_sum;
1668       total_x2_sum += x2_sum;
1669 
1670       const float mean = (float)x_sum / sub_num;
1671       const float dev = get_dev(mean, (double)x2_sum, sub_num);
1672       features[feature_idx++] = mean;
1673       features[feature_idx++] = dev;
1674       mean2_sum += (double)(mean * mean);
1675       dev_sum += dev;
1676       num_sub_blks++;
1677     }
1678   }
1679 
1680   const float lvl0_mean = (float)total_x_sum / num;
1681   features[0] = lvl0_mean;
1682   features[1] = get_dev(lvl0_mean, (double)total_x2_sum, num);
1683 
1684   // Deviation of means.
1685   features[feature_idx++] = get_dev(lvl0_mean, mean2_sum, num_sub_blks);
1686   // Mean of deviations.
1687   features[feature_idx++] = dev_sum / num_sub_blks;
1688 
1689   return feature_idx;
1690 }
1691 
ml_predict_tx_split(MACROBLOCK * x,BLOCK_SIZE bsize,int blk_row,int blk_col,TX_SIZE tx_size)1692 static int ml_predict_tx_split(MACROBLOCK *x, BLOCK_SIZE bsize, int blk_row,
1693                                int blk_col, TX_SIZE tx_size) {
1694   const NN_CONFIG *nn_config = av1_tx_split_nnconfig_map[tx_size];
1695   if (!nn_config) return -1;
1696 
1697   const int diff_stride = block_size_wide[bsize];
1698   const int16_t *diff =
1699       x->plane[0].src_diff + 4 * blk_row * diff_stride + 4 * blk_col;
1700   const int bw = tx_size_wide[tx_size];
1701   const int bh = tx_size_high[tx_size];
1702 
1703   float features[64] = { 0.0f };
1704   get_mean_dev_features(diff, diff_stride, bw, bh, features);
1705 
1706   float score = 0.0f;
1707   av1_nn_predict(features, nn_config, 1, &score);
1708 
1709   int int_score = (int)(score * 10000);
1710   return clamp(int_score, -80000, 80000);
1711 }
1712 
get_tx_mask(const AV1_COMP * cpi,MACROBLOCK * x,int plane,int block,int blk_row,int blk_col,BLOCK_SIZE plane_bsize,TX_SIZE tx_size,const TXB_CTX * const txb_ctx,FAST_TX_SEARCH_MODE ftxs_mode,int64_t ref_best_rd,TX_TYPE * allowed_txk_types,int * txk_map)1713 static inline uint16_t get_tx_mask(
1714     const AV1_COMP *cpi, MACROBLOCK *x, int plane, int block, int blk_row,
1715     int blk_col, BLOCK_SIZE plane_bsize, TX_SIZE tx_size,
1716     const TXB_CTX *const txb_ctx, FAST_TX_SEARCH_MODE ftxs_mode,
1717     int64_t ref_best_rd, TX_TYPE *allowed_txk_types, int *txk_map) {
1718   const AV1_COMMON *cm = &cpi->common;
1719   MACROBLOCKD *xd = &x->e_mbd;
1720   MB_MODE_INFO *mbmi = xd->mi[0];
1721   const TxfmSearchParams *txfm_params = &x->txfm_search_params;
1722   const int is_inter = is_inter_block(mbmi);
1723   const int fast_tx_search = ftxs_mode & FTXS_DCT_AND_1D_DCT_ONLY;
1724   // if txk_allowed = TX_TYPES, >1 tx types are allowed, else, if txk_allowed <
1725   // TX_TYPES, only that specific tx type is allowed.
1726   TX_TYPE txk_allowed = TX_TYPES;
1727 
1728   const FRAME_UPDATE_TYPE update_type =
1729       get_frame_update_type(&cpi->ppi->gf_group, cpi->gf_frame_index);
1730   int use_actual_frame_probs = 1;
1731   const int *tx_type_probs;
1732 #if CONFIG_FPMT_TEST
1733   use_actual_frame_probs =
1734       (cpi->ppi->fpmt_unit_test_cfg == PARALLEL_SIMULATION_ENCODE) ? 0 : 1;
1735   if (!use_actual_frame_probs) {
1736     tx_type_probs =
1737         (int *)cpi->ppi->temp_frame_probs.tx_type_probs[update_type][tx_size];
1738   }
1739 #endif
1740   if (use_actual_frame_probs) {
1741     tx_type_probs = cpi->ppi->frame_probs.tx_type_probs[update_type][tx_size];
1742   }
1743 
1744   if ((!is_inter && txfm_params->use_default_intra_tx_type) ||
1745       (is_inter && txfm_params->default_inter_tx_type_prob_thresh == 0)) {
1746     txk_allowed =
1747         get_default_tx_type(0, xd, tx_size, cpi->use_screen_content_tools);
1748   } else if (is_inter &&
1749              txfm_params->default_inter_tx_type_prob_thresh != INT_MAX) {
1750     if (tx_type_probs[DEFAULT_INTER_TX_TYPE] >
1751         txfm_params->default_inter_tx_type_prob_thresh) {
1752       txk_allowed = DEFAULT_INTER_TX_TYPE;
1753     } else {
1754       int force_tx_type = 0;
1755       int max_prob = 0;
1756       const int tx_type_prob_threshold =
1757           txfm_params->default_inter_tx_type_prob_thresh +
1758           PROB_THRESH_OFFSET_TX_TYPE;
1759       for (int i = 1; i < TX_TYPES; i++) {  // find maximum probability.
1760         if (tx_type_probs[i] > max_prob) {
1761           max_prob = tx_type_probs[i];
1762           force_tx_type = i;
1763         }
1764       }
1765       if (max_prob > tx_type_prob_threshold)  // force tx type with max prob.
1766         txk_allowed = force_tx_type;
1767       else if (x->rd_model == LOW_TXFM_RD) {
1768         if (plane == 0) txk_allowed = DCT_DCT;
1769       }
1770     }
1771   } else if (x->rd_model == LOW_TXFM_RD) {
1772     if (plane == 0) txk_allowed = DCT_DCT;
1773   }
1774 
1775   const TxSetType tx_set_type = av1_get_ext_tx_set_type(
1776       tx_size, is_inter, cm->features.reduced_tx_set_used);
1777 
1778   TX_TYPE uv_tx_type = DCT_DCT;
1779   if (plane) {
1780     // tx_type of PLANE_TYPE_UV should be the same as PLANE_TYPE_Y
1781     uv_tx_type = txk_allowed =
1782         av1_get_tx_type(xd, get_plane_type(plane), blk_row, blk_col, tx_size,
1783                         cm->features.reduced_tx_set_used);
1784   }
1785   PREDICTION_MODE intra_dir =
1786       mbmi->filter_intra_mode_info.use_filter_intra
1787           ? fimode_to_intradir[mbmi->filter_intra_mode_info.filter_intra_mode]
1788           : mbmi->mode;
1789   uint16_t ext_tx_used_flag =
1790       cpi->sf.tx_sf.tx_type_search.use_reduced_intra_txset != 0 &&
1791               tx_set_type == EXT_TX_SET_DTT4_IDTX_1DDCT
1792           ? av1_reduced_intra_tx_used_flag[intra_dir]
1793           : av1_ext_tx_used_flag[tx_set_type];
1794 
1795   if (cpi->sf.tx_sf.tx_type_search.use_reduced_intra_txset == 2)
1796     ext_tx_used_flag &= av1_derived_intra_tx_used_flag[intra_dir];
1797 
1798   if (xd->lossless[mbmi->segment_id] || txsize_sqr_up_map[tx_size] > TX_32X32 ||
1799       ext_tx_used_flag == 0x0001 ||
1800       (is_inter && cpi->oxcf.txfm_cfg.use_inter_dct_only) ||
1801       (!is_inter && cpi->oxcf.txfm_cfg.use_intra_dct_only)) {
1802     txk_allowed = DCT_DCT;
1803   }
1804 
1805   if (cpi->oxcf.txfm_cfg.enable_flip_idtx == 0)
1806     ext_tx_used_flag &= DCT_ADST_TX_MASK;
1807 
1808   uint16_t allowed_tx_mask = 0;  // 1: allow; 0: skip.
1809   if (txk_allowed < TX_TYPES) {
1810     allowed_tx_mask = 1 << txk_allowed;
1811     allowed_tx_mask &= ext_tx_used_flag;
1812   } else if (fast_tx_search) {
1813     allowed_tx_mask = 0x0c01;  // V_DCT, H_DCT, DCT_DCT
1814     allowed_tx_mask &= ext_tx_used_flag;
1815   } else {
1816     assert(plane == 0);
1817     allowed_tx_mask = ext_tx_used_flag;
1818     int num_allowed = 0;
1819     int i;
1820 
1821     if (cpi->sf.tx_sf.tx_type_search.prune_tx_type_using_stats) {
1822       static const int thresh_arr[2][7] = { { 10, 15, 15, 10, 15, 15, 15 },
1823                                             { 10, 17, 17, 10, 17, 17, 17 } };
1824       const int thresh =
1825           thresh_arr[cpi->sf.tx_sf.tx_type_search.prune_tx_type_using_stats - 1]
1826                     [update_type];
1827       uint16_t prune = 0;
1828       int max_prob = -1;
1829       int max_idx = 0;
1830       for (i = 0; i < TX_TYPES; i++) {
1831         if (tx_type_probs[i] > max_prob && (allowed_tx_mask & (1 << i))) {
1832           max_prob = tx_type_probs[i];
1833           max_idx = i;
1834         }
1835         if (tx_type_probs[i] < thresh) prune |= (1 << i);
1836       }
1837       if ((prune >> max_idx) & 0x01) prune &= ~(1 << max_idx);
1838       allowed_tx_mask &= (~prune);
1839     }
1840     for (i = 0; i < TX_TYPES; i++) {
1841       if (allowed_tx_mask & (1 << i)) num_allowed++;
1842     }
1843     assert(num_allowed > 0);
1844 
1845     if (num_allowed > 2 && cpi->sf.tx_sf.tx_type_search.prune_tx_type_est_rd) {
1846       int pf = prune_factors[txfm_params->prune_2d_txfm_mode];
1847       int mf = mul_factors[txfm_params->prune_2d_txfm_mode];
1848       if (num_allowed <= 7) {
1849         const uint16_t prune =
1850             prune_txk_type(cpi, x, plane, block, tx_size, blk_row, blk_col,
1851                            plane_bsize, txk_map, allowed_tx_mask, pf, txb_ctx,
1852                            cm->features.reduced_tx_set_used);
1853         allowed_tx_mask &= (~prune);
1854       } else {
1855         const int num_sel = (num_allowed * mf + 50) / 100;
1856         const uint16_t prune = prune_txk_type_separ(
1857             cpi, x, plane, block, tx_size, blk_row, blk_col, plane_bsize,
1858             txk_map, allowed_tx_mask, pf, txb_ctx,
1859             cm->features.reduced_tx_set_used, ref_best_rd, num_sel);
1860 
1861         allowed_tx_mask &= (~prune);
1862       }
1863     } else {
1864       assert(num_allowed > 0);
1865       int allowed_tx_count =
1866           (txfm_params->prune_2d_txfm_mode >= TX_TYPE_PRUNE_4) ? 1 : 5;
1867       // !fast_tx_search && txk_end != txk_start && plane == 0
1868       if (txfm_params->prune_2d_txfm_mode >= TX_TYPE_PRUNE_1 && is_inter &&
1869           num_allowed > allowed_tx_count) {
1870         prune_tx_2D(x, plane_bsize, tx_size, blk_row, blk_col, tx_set_type,
1871                     txfm_params->prune_2d_txfm_mode, txk_map, &allowed_tx_mask);
1872       }
1873     }
1874   }
1875 
1876   // Need to have at least one transform type allowed.
1877   if (allowed_tx_mask == 0) {
1878     txk_allowed = (plane ? uv_tx_type : DCT_DCT);
1879     allowed_tx_mask = (1 << txk_allowed);
1880   }
1881 
1882   assert(IMPLIES(txk_allowed < TX_TYPES, allowed_tx_mask == 1 << txk_allowed));
1883   *allowed_txk_types = txk_allowed;
1884   return allowed_tx_mask;
1885 }
1886 
1887 #if CONFIG_RD_DEBUG
update_txb_coeff_cost(RD_STATS * rd_stats,int plane,int txb_coeff_cost)1888 static inline void update_txb_coeff_cost(RD_STATS *rd_stats, int plane,
1889                                          int txb_coeff_cost) {
1890   rd_stats->txb_coeff_cost[plane] += txb_coeff_cost;
1891 }
1892 #endif
1893 
cost_coeffs(MACROBLOCK * x,int plane,int block,TX_SIZE tx_size,const TX_TYPE tx_type,const TXB_CTX * const txb_ctx,int reduced_tx_set_used)1894 static inline int cost_coeffs(MACROBLOCK *x, int plane, int block,
1895                               TX_SIZE tx_size, const TX_TYPE tx_type,
1896                               const TXB_CTX *const txb_ctx,
1897                               int reduced_tx_set_used) {
1898 #if TXCOEFF_COST_TIMER
1899   struct aom_usec_timer timer;
1900   aom_usec_timer_start(&timer);
1901 #endif
1902   const int cost = av1_cost_coeffs_txb(x, plane, block, tx_size, tx_type,
1903                                        txb_ctx, reduced_tx_set_used);
1904 #if TXCOEFF_COST_TIMER
1905   AV1_COMMON *tmp_cm = (AV1_COMMON *)&cpi->common;
1906   aom_usec_timer_mark(&timer);
1907   const int64_t elapsed_time = aom_usec_timer_elapsed(&timer);
1908   tmp_cm->txcoeff_cost_timer += elapsed_time;
1909   ++tmp_cm->txcoeff_cost_count;
1910 #endif
1911   return cost;
1912 }
1913 
skip_trellis_opt_based_on_satd(MACROBLOCK * x,QUANT_PARAM * quant_param,int plane,int block,TX_SIZE tx_size,int quant_b_adapt,int qstep,unsigned int coeff_opt_satd_threshold,int skip_trellis,int dc_only_blk)1914 static int skip_trellis_opt_based_on_satd(MACROBLOCK *x,
1915                                           QUANT_PARAM *quant_param, int plane,
1916                                           int block, TX_SIZE tx_size,
1917                                           int quant_b_adapt, int qstep,
1918                                           unsigned int coeff_opt_satd_threshold,
1919                                           int skip_trellis, int dc_only_blk) {
1920   if (skip_trellis || (coeff_opt_satd_threshold == UINT_MAX))
1921     return skip_trellis;
1922 
1923   const struct macroblock_plane *const p = &x->plane[plane];
1924   const int block_offset = BLOCK_OFFSET(block);
1925   tran_low_t *const coeff_ptr = p->coeff + block_offset;
1926   const int n_coeffs = av1_get_max_eob(tx_size);
1927   const int shift = (MAX_TX_SCALE - av1_get_tx_scale(tx_size));
1928   int satd = (dc_only_blk) ? abs(coeff_ptr[0]) : aom_satd(coeff_ptr, n_coeffs);
1929   satd = RIGHT_SIGNED_SHIFT(satd, shift);
1930   satd >>= (x->e_mbd.bd - 8);
1931 
1932   const int skip_block_trellis =
1933       ((uint64_t)satd >
1934        (uint64_t)coeff_opt_satd_threshold * qstep * sqrt_tx_pixels_2d[tx_size]);
1935 
1936   av1_setup_quant(
1937       tx_size, !skip_block_trellis,
1938       skip_block_trellis
1939           ? (USE_B_QUANT_NO_TRELLIS ? AV1_XFORM_QUANT_B : AV1_XFORM_QUANT_FP)
1940           : AV1_XFORM_QUANT_FP,
1941       quant_b_adapt, quant_param);
1942 
1943   return skip_block_trellis;
1944 }
1945 
1946 // Predict DC only blocks if the residual variance is below a qstep based
1947 // threshold.For such blocks, transform type search is bypassed.
predict_dc_only_block(MACROBLOCK * x,int plane,BLOCK_SIZE plane_bsize,TX_SIZE tx_size,int block,int blk_row,int blk_col,RD_STATS * best_rd_stats,int64_t * block_sse,unsigned int * block_mse_q8,int64_t * per_px_mean,int * dc_only_blk)1948 static inline void predict_dc_only_block(
1949     MACROBLOCK *x, int plane, BLOCK_SIZE plane_bsize, TX_SIZE tx_size,
1950     int block, int blk_row, int blk_col, RD_STATS *best_rd_stats,
1951     int64_t *block_sse, unsigned int *block_mse_q8, int64_t *per_px_mean,
1952     int *dc_only_blk) {
1953   MACROBLOCKD *xd = &x->e_mbd;
1954   MB_MODE_INFO *mbmi = xd->mi[0];
1955   const int dequant_shift = (is_cur_buf_hbd(xd)) ? xd->bd - 5 : 3;
1956   const int qstep = x->plane[plane].dequant_QTX[1] >> dequant_shift;
1957   uint64_t block_var = UINT64_MAX;
1958   const int dc_qstep = x->plane[plane].dequant_QTX[0] >> 3;
1959   *block_sse = pixel_diff_stats(x, plane, blk_row, blk_col, plane_bsize,
1960                                 txsize_to_bsize[tx_size], block_mse_q8,
1961                                 per_px_mean, &block_var);
1962   assert((*block_mse_q8) != UINT_MAX);
1963   uint64_t var_threshold = (uint64_t)(1.8 * qstep * qstep);
1964   if (is_cur_buf_hbd(xd))
1965     block_var = ROUND_POWER_OF_TWO(block_var, (xd->bd - 8) * 2);
1966 
1967   if (block_var >= var_threshold) return;
1968   const unsigned int predict_dc_level = x->txfm_search_params.predict_dc_level;
1969   assert(predict_dc_level != 0);
1970 
1971   // Prediction of skip block if residual mean and variance are less
1972   // than qstep based threshold
1973   if ((llabs(*per_px_mean) * dc_coeff_scale[tx_size]) < (dc_qstep << 12)) {
1974     // If the normalized mean of residual block is less than the dc qstep and
1975     // the  normalized block variance is less than ac qstep, then the block is
1976     // assumed to be a skip block and its rdcost is updated accordingly.
1977     best_rd_stats->skip_txfm = 1;
1978 
1979     x->plane[plane].eobs[block] = 0;
1980 
1981     if (is_cur_buf_hbd(xd))
1982       *block_sse = ROUND_POWER_OF_TWO((*block_sse), (xd->bd - 8) * 2);
1983 
1984     best_rd_stats->dist = (*block_sse) << 4;
1985     best_rd_stats->sse = best_rd_stats->dist;
1986 
1987     ENTROPY_CONTEXT ctxa[MAX_MIB_SIZE];
1988     ENTROPY_CONTEXT ctxl[MAX_MIB_SIZE];
1989     av1_get_entropy_contexts(plane_bsize, &xd->plane[plane], ctxa, ctxl);
1990     ENTROPY_CONTEXT *ta = ctxa;
1991     ENTROPY_CONTEXT *tl = ctxl;
1992     const TX_SIZE txs_ctx = get_txsize_entropy_ctx(tx_size);
1993     TXB_CTX txb_ctx_tmp;
1994     const PLANE_TYPE plane_type = get_plane_type(plane);
1995     get_txb_ctx(plane_bsize, tx_size, plane, ta, tl, &txb_ctx_tmp);
1996     const int zero_blk_rate = x->coeff_costs.coeff_costs[txs_ctx][plane_type]
1997                                   .txb_skip_cost[txb_ctx_tmp.txb_skip_ctx][1];
1998     best_rd_stats->rate = zero_blk_rate;
1999 
2000     best_rd_stats->rdcost =
2001         RDCOST(x->rdmult, best_rd_stats->rate, best_rd_stats->sse);
2002 
2003     x->plane[plane].txb_entropy_ctx[block] = 0;
2004   } else if (predict_dc_level > 1) {
2005     // Predict DC only blocks based on residual variance.
2006     // For chroma plane, this prediction is disabled for intra blocks.
2007     if ((plane == 0) || (plane > 0 && is_inter_block(mbmi))) *dc_only_blk = 1;
2008   }
2009 }
2010 
2011 // Search for the best transform type for a given transform block.
2012 // This function can be used for both inter and intra, both luma and chroma.
search_tx_type(const AV1_COMP * cpi,MACROBLOCK * x,int plane,int block,int blk_row,int blk_col,BLOCK_SIZE plane_bsize,TX_SIZE tx_size,const TXB_CTX * const txb_ctx,FAST_TX_SEARCH_MODE ftxs_mode,int skip_trellis,int64_t ref_best_rd,RD_STATS * best_rd_stats)2013 static void search_tx_type(const AV1_COMP *cpi, MACROBLOCK *x, int plane,
2014                            int block, int blk_row, int blk_col,
2015                            BLOCK_SIZE plane_bsize, TX_SIZE tx_size,
2016                            const TXB_CTX *const txb_ctx,
2017                            FAST_TX_SEARCH_MODE ftxs_mode, int skip_trellis,
2018                            int64_t ref_best_rd, RD_STATS *best_rd_stats) {
2019   const AV1_COMMON *cm = &cpi->common;
2020   MACROBLOCKD *xd = &x->e_mbd;
2021   MB_MODE_INFO *mbmi = xd->mi[0];
2022   const TxfmSearchParams *txfm_params = &x->txfm_search_params;
2023   int64_t best_rd = INT64_MAX;
2024   uint16_t best_eob = 0;
2025   TX_TYPE best_tx_type = DCT_DCT;
2026   int rate_cost = 0;
2027   struct macroblock_plane *const p = &x->plane[plane];
2028   tran_low_t *orig_dqcoeff = p->dqcoeff;
2029   tran_low_t *best_dqcoeff = x->dqcoeff_buf;
2030   const int tx_type_map_idx =
2031       plane ? 0 : blk_row * xd->tx_type_map_stride + blk_col;
2032   av1_invalid_rd_stats(best_rd_stats);
2033 
2034   skip_trellis |= !is_trellis_used(cpi->optimize_seg_arr[xd->mi[0]->segment_id],
2035                                    DRY_RUN_NORMAL);
2036 
2037   uint8_t best_txb_ctx = 0;
2038   // txk_allowed = TX_TYPES: >1 tx types are allowed
2039   // txk_allowed < TX_TYPES: only that specific tx type is allowed.
2040   TX_TYPE txk_allowed = TX_TYPES;
2041   int txk_map[TX_TYPES] = {
2042     0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
2043   };
2044   const int dequant_shift = (is_cur_buf_hbd(xd)) ? xd->bd - 5 : 3;
2045   const int qstep = x->plane[plane].dequant_QTX[1] >> dequant_shift;
2046 
2047   const uint8_t txw = tx_size_wide[tx_size];
2048   const uint8_t txh = tx_size_high[tx_size];
2049   int64_t block_sse;
2050   unsigned int block_mse_q8;
2051   int dc_only_blk = 0;
2052   const bool predict_dc_block =
2053       txfm_params->predict_dc_level >= 1 && txw != 64 && txh != 64;
2054   int64_t per_px_mean = INT64_MAX;
2055   if (predict_dc_block) {
2056     predict_dc_only_block(x, plane, plane_bsize, tx_size, block, blk_row,
2057                           blk_col, best_rd_stats, &block_sse, &block_mse_q8,
2058                           &per_px_mean, &dc_only_blk);
2059     if (best_rd_stats->skip_txfm == 1) {
2060       const TX_TYPE tx_type = DCT_DCT;
2061       if (plane == 0) xd->tx_type_map[tx_type_map_idx] = tx_type;
2062       return;
2063     }
2064   } else {
2065     block_sse = av1_pixel_diff_dist(x, plane, blk_row, blk_col, plane_bsize,
2066                                     txsize_to_bsize[tx_size], &block_mse_q8);
2067     assert(block_mse_q8 != UINT_MAX);
2068   }
2069 
2070   // Bit mask to indicate which transform types are allowed in the RD search.
2071   uint16_t tx_mask;
2072 
2073   // Use DCT_DCT transform for DC only block.
2074   if (dc_only_blk || cpi->sf.rt_sf.dct_only_palette_nonrd == 1)
2075     tx_mask = 1 << DCT_DCT;
2076   else
2077     tx_mask = get_tx_mask(cpi, x, plane, block, blk_row, blk_col, plane_bsize,
2078                           tx_size, txb_ctx, ftxs_mode, ref_best_rd,
2079                           &txk_allowed, txk_map);
2080   const uint16_t allowed_tx_mask = tx_mask;
2081 
2082   if (is_cur_buf_hbd(xd)) {
2083     block_sse = ROUND_POWER_OF_TWO(block_sse, (xd->bd - 8) * 2);
2084     block_mse_q8 = ROUND_POWER_OF_TWO(block_mse_q8, (xd->bd - 8) * 2);
2085   }
2086   block_sse *= 16;
2087   // Use mse / qstep^2 based threshold logic to take decision of R-D
2088   // optimization of coeffs. For smaller residuals, coeff optimization
2089   // would be helpful. For larger residuals, R-D optimization may not be
2090   // effective.
2091   // TODO(any): Experiment with variance and mean based thresholds
2092   const int perform_block_coeff_opt =
2093       ((uint64_t)block_mse_q8 <=
2094        (uint64_t)txfm_params->coeff_opt_thresholds[0] * qstep * qstep);
2095   skip_trellis |= !perform_block_coeff_opt;
2096 
2097   // Flag to indicate if distortion should be calculated in transform domain or
2098   // not during iterating through transform type candidates.
2099   // Transform domain distortion is accurate for higher residuals.
2100   // TODO(any): Experiment with variance and mean based thresholds
2101   int use_transform_domain_distortion =
2102       (txfm_params->use_transform_domain_distortion > 0) &&
2103       (block_mse_q8 >= txfm_params->tx_domain_dist_threshold) &&
2104       // Any 64-pt transforms only preserves half the coefficients.
2105       // Therefore transform domain distortion is not valid for these
2106       // transform sizes.
2107       (txsize_sqr_up_map[tx_size] != TX_64X64) &&
2108       // Use pixel domain distortion for DC only blocks
2109       !dc_only_blk;
2110   // Flag to indicate if an extra calculation of distortion in the pixel domain
2111   // should be performed at the end, after the best transform type has been
2112   // decided.
2113   int calc_pixel_domain_distortion_final =
2114       txfm_params->use_transform_domain_distortion == 1 &&
2115       use_transform_domain_distortion && x->rd_model != LOW_TXFM_RD;
2116   if (calc_pixel_domain_distortion_final &&
2117       (txk_allowed < TX_TYPES || allowed_tx_mask == 0x0001))
2118     calc_pixel_domain_distortion_final = use_transform_domain_distortion = 0;
2119 
2120   const uint16_t *eobs_ptr = x->plane[plane].eobs;
2121 
2122   TxfmParam txfm_param;
2123   QUANT_PARAM quant_param;
2124   int skip_trellis_based_on_satd[TX_TYPES] = { 0 };
2125   av1_setup_xform(cm, x, tx_size, DCT_DCT, &txfm_param);
2126   av1_setup_quant(tx_size, !skip_trellis,
2127                   skip_trellis ? (USE_B_QUANT_NO_TRELLIS ? AV1_XFORM_QUANT_B
2128                                                          : AV1_XFORM_QUANT_FP)
2129                                : AV1_XFORM_QUANT_FP,
2130                   cpi->oxcf.q_cfg.quant_b_adapt, &quant_param);
2131 
2132   // Iterate through all transform type candidates.
2133   for (int idx = 0; idx < TX_TYPES; ++idx) {
2134     const TX_TYPE tx_type = (TX_TYPE)txk_map[idx];
2135     if (tx_type == TX_TYPE_INVALID || !check_bit_mask(allowed_tx_mask, tx_type))
2136       continue;
2137     txfm_param.tx_type = tx_type;
2138     if (av1_use_qmatrix(&cm->quant_params, xd, mbmi->segment_id)) {
2139       av1_setup_qmatrix(&cm->quant_params, xd, plane, tx_size, tx_type,
2140                         &quant_param);
2141     }
2142     if (plane == 0) xd->tx_type_map[tx_type_map_idx] = tx_type;
2143     RD_STATS this_rd_stats;
2144     av1_invalid_rd_stats(&this_rd_stats);
2145 
2146     if (!dc_only_blk)
2147       av1_xform(x, plane, block, blk_row, blk_col, plane_bsize, &txfm_param);
2148     else
2149       av1_xform_dc_only(x, plane, block, &txfm_param, per_px_mean);
2150 
2151     skip_trellis_based_on_satd[tx_type] = skip_trellis_opt_based_on_satd(
2152         x, &quant_param, plane, block, tx_size, cpi->oxcf.q_cfg.quant_b_adapt,
2153         qstep, txfm_params->coeff_opt_thresholds[1], skip_trellis, dc_only_blk);
2154 
2155     av1_quant(x, plane, block, &txfm_param, &quant_param);
2156 
2157     // Calculate rate cost of quantized coefficients.
2158     if (quant_param.use_optimize_b) {
2159       // TODO(aomedia:3209): update Trellis quantization to take into account
2160       // quantization matrices.
2161       av1_optimize_b(cpi, x, plane, block, tx_size, tx_type, txb_ctx,
2162                      &rate_cost);
2163     } else {
2164       rate_cost = cost_coeffs(x, plane, block, tx_size, tx_type, txb_ctx,
2165                               cm->features.reduced_tx_set_used);
2166     }
2167 
2168     // If rd cost based on coeff rate alone is already more than best_rd,
2169     // terminate early.
2170     if (RDCOST(x->rdmult, rate_cost, 0) > best_rd) continue;
2171 
2172     // Calculate distortion.
2173     if (eobs_ptr[block] == 0) {
2174       // When eob is 0, pixel domain distortion is more efficient and accurate.
2175       this_rd_stats.dist = this_rd_stats.sse = block_sse;
2176     } else if (dc_only_blk) {
2177       this_rd_stats.sse = block_sse;
2178       this_rd_stats.dist = dist_block_px_domain(
2179           cpi, x, plane, plane_bsize, block, blk_row, blk_col, tx_size);
2180     } else if (use_transform_domain_distortion) {
2181       const SCAN_ORDER *const scan_order =
2182           get_scan(txfm_param.tx_size, txfm_param.tx_type);
2183       dist_block_tx_domain(x, plane, block, tx_size, quant_param.qmatrix,
2184                            scan_order->scan, &this_rd_stats.dist,
2185                            &this_rd_stats.sse);
2186     } else {
2187       int64_t sse_diff = INT64_MAX;
2188       // high_energy threshold assumes that every pixel within a txfm block
2189       // has a residue energy of at least 25% of the maximum, i.e. 128 * 128
2190       // for 8 bit.
2191       const int64_t high_energy_thresh =
2192           ((int64_t)128 * 128 * tx_size_2d[tx_size]);
2193       const int is_high_energy = (block_sse >= high_energy_thresh);
2194       if (tx_size == TX_64X64 || is_high_energy) {
2195         // Because 3 out 4 quadrants of transform coefficients are forced to
2196         // zero, the inverse transform has a tendency to overflow. sse_diff
2197         // is effectively the energy of those 3 quadrants, here we use it
2198         // to decide if we should do pixel domain distortion. If the energy
2199         // is mostly in first quadrant, then it is unlikely that we have
2200         // overflow issue in inverse transform.
2201         const SCAN_ORDER *const scan_order =
2202             get_scan(txfm_param.tx_size, txfm_param.tx_type);
2203         dist_block_tx_domain(x, plane, block, tx_size, quant_param.qmatrix,
2204                              scan_order->scan, &this_rd_stats.dist,
2205                              &this_rd_stats.sse);
2206         sse_diff = block_sse - this_rd_stats.sse;
2207       }
2208       if (tx_size != TX_64X64 || !is_high_energy ||
2209           (sse_diff * 2) < this_rd_stats.sse) {
2210         const int64_t tx_domain_dist = this_rd_stats.dist;
2211         this_rd_stats.dist = dist_block_px_domain(
2212             cpi, x, plane, plane_bsize, block, blk_row, blk_col, tx_size);
2213         // For high energy blocks, occasionally, the pixel domain distortion
2214         // can be artificially low due to clamping at reconstruction stage
2215         // even when inverse transform output is hugely different from the
2216         // actual residue.
2217         if (is_high_energy && this_rd_stats.dist < tx_domain_dist)
2218           this_rd_stats.dist = tx_domain_dist;
2219       } else {
2220         assert(sse_diff < INT64_MAX);
2221         this_rd_stats.dist += sse_diff;
2222       }
2223       this_rd_stats.sse = block_sse;
2224     }
2225 
2226     this_rd_stats.rate = rate_cost;
2227 
2228     const int64_t rd =
2229         RDCOST(x->rdmult, this_rd_stats.rate, this_rd_stats.dist);
2230 
2231     if (rd < best_rd) {
2232       best_rd = rd;
2233       *best_rd_stats = this_rd_stats;
2234       best_tx_type = tx_type;
2235       best_txb_ctx = x->plane[plane].txb_entropy_ctx[block];
2236       best_eob = x->plane[plane].eobs[block];
2237       // Swap dqcoeff buffers
2238       tran_low_t *const tmp_dqcoeff = best_dqcoeff;
2239       best_dqcoeff = p->dqcoeff;
2240       p->dqcoeff = tmp_dqcoeff;
2241     }
2242 
2243 #if CONFIG_COLLECT_RD_STATS == 1
2244     if (plane == 0) {
2245       PrintTransformUnitStats(cpi, x, &this_rd_stats, blk_row, blk_col,
2246                               plane_bsize, tx_size, tx_type, rd);
2247     }
2248 #endif  // CONFIG_COLLECT_RD_STATS == 1
2249 
2250 #if COLLECT_TX_SIZE_DATA
2251     // Generate small sample to restrict output size.
2252     static unsigned int seed = 21743;
2253     if (lcg_rand16(&seed) % 200 == 0) {
2254       FILE *fp = NULL;
2255 
2256       if (within_border) {
2257         fp = fopen(av1_tx_size_data_output_file, "a");
2258       }
2259 
2260       if (fp) {
2261         // Transform info and RD
2262         const int txb_w = tx_size_wide[tx_size];
2263         const int txb_h = tx_size_high[tx_size];
2264 
2265         // Residue signal.
2266         const int diff_stride = block_size_wide[plane_bsize];
2267         struct macroblock_plane *const p = &x->plane[plane];
2268         const int16_t *src_diff =
2269             &p->src_diff[(blk_row * diff_stride + blk_col) * 4];
2270 
2271         for (int r = 0; r < txb_h; ++r) {
2272           for (int c = 0; c < txb_w; ++c) {
2273             fprintf(fp, "%d,", src_diff[c]);
2274           }
2275           src_diff += diff_stride;
2276         }
2277 
2278         fprintf(fp, "%d,%d,%d,%" PRId64, txb_w, txb_h, tx_type, rd);
2279         fprintf(fp, "\n");
2280         fclose(fp);
2281       }
2282     }
2283 #endif  // COLLECT_TX_SIZE_DATA
2284 
2285     // If the current best RD cost is much worse than the reference RD cost,
2286     // terminate early.
2287     if (cpi->sf.tx_sf.adaptive_txb_search_level) {
2288       if ((best_rd - (best_rd >> cpi->sf.tx_sf.adaptive_txb_search_level)) >
2289           ref_best_rd) {
2290         break;
2291       }
2292     }
2293 
2294     // Terminate transform type search if the block has been quantized to
2295     // all zero.
2296     if (cpi->sf.tx_sf.tx_type_search.skip_tx_search && !best_eob) break;
2297   }
2298 
2299   assert(best_rd != INT64_MAX);
2300 
2301   best_rd_stats->skip_txfm = best_eob == 0;
2302   if (plane == 0) update_txk_array(xd, blk_row, blk_col, tx_size, best_tx_type);
2303   x->plane[plane].txb_entropy_ctx[block] = best_txb_ctx;
2304   x->plane[plane].eobs[block] = best_eob;
2305   skip_trellis = skip_trellis_based_on_satd[best_tx_type];
2306 
2307   // Point dqcoeff to the quantized coefficients corresponding to the best
2308   // transform type, then we can skip transform and quantization, e.g. in the
2309   // final pixel domain distortion calculation and recon_intra().
2310   p->dqcoeff = best_dqcoeff;
2311 
2312   if (calc_pixel_domain_distortion_final && best_eob) {
2313     best_rd_stats->dist = dist_block_px_domain(
2314         cpi, x, plane, plane_bsize, block, blk_row, blk_col, tx_size);
2315     best_rd_stats->sse = block_sse;
2316   }
2317 
2318   // Intra mode needs decoded pixels such that the next transform block
2319   // can use them for prediction.
2320   recon_intra(cpi, x, plane, block, blk_row, blk_col, plane_bsize, tx_size,
2321               txb_ctx, skip_trellis, best_tx_type, 0, &rate_cost, best_eob);
2322   p->dqcoeff = orig_dqcoeff;
2323 }
2324 
2325 // Pick transform type for a luma transform block of tx_size. Note this function
2326 // is used only for inter-predicted blocks.
tx_type_rd(const AV1_COMP * cpi,MACROBLOCK * x,TX_SIZE tx_size,int blk_row,int blk_col,int block,int plane_bsize,TXB_CTX * txb_ctx,RD_STATS * rd_stats,FAST_TX_SEARCH_MODE ftxs_mode,int64_t ref_rdcost)2327 static inline void tx_type_rd(const AV1_COMP *cpi, MACROBLOCK *x,
2328                               TX_SIZE tx_size, int blk_row, int blk_col,
2329                               int block, int plane_bsize, TXB_CTX *txb_ctx,
2330                               RD_STATS *rd_stats, FAST_TX_SEARCH_MODE ftxs_mode,
2331                               int64_t ref_rdcost) {
2332   assert(is_inter_block(x->e_mbd.mi[0]));
2333   RD_STATS this_rd_stats;
2334   const int skip_trellis = 0;
2335   search_tx_type(cpi, x, 0, block, blk_row, blk_col, plane_bsize, tx_size,
2336                  txb_ctx, ftxs_mode, skip_trellis, ref_rdcost, &this_rd_stats);
2337 
2338   av1_merge_rd_stats(rd_stats, &this_rd_stats);
2339 }
2340 
try_tx_block_no_split(const AV1_COMP * cpi,MACROBLOCK * x,int blk_row,int blk_col,int block,TX_SIZE tx_size,int depth,BLOCK_SIZE plane_bsize,const ENTROPY_CONTEXT * ta,const ENTROPY_CONTEXT * tl,int txfm_partition_ctx,RD_STATS * rd_stats,int64_t ref_best_rd,FAST_TX_SEARCH_MODE ftxs_mode,TxCandidateInfo * no_split)2341 static inline void try_tx_block_no_split(
2342     const AV1_COMP *cpi, MACROBLOCK *x, int blk_row, int blk_col, int block,
2343     TX_SIZE tx_size, int depth, BLOCK_SIZE plane_bsize,
2344     const ENTROPY_CONTEXT *ta, const ENTROPY_CONTEXT *tl,
2345     int txfm_partition_ctx, RD_STATS *rd_stats, int64_t ref_best_rd,
2346     FAST_TX_SEARCH_MODE ftxs_mode, TxCandidateInfo *no_split) {
2347   MACROBLOCKD *const xd = &x->e_mbd;
2348   MB_MODE_INFO *const mbmi = xd->mi[0];
2349   struct macroblock_plane *const p = &x->plane[0];
2350   const int bw = mi_size_wide[plane_bsize];
2351   const ENTROPY_CONTEXT *const pta = ta + blk_col;
2352   const ENTROPY_CONTEXT *const ptl = tl + blk_row;
2353   const TX_SIZE txs_ctx = get_txsize_entropy_ctx(tx_size);
2354   TXB_CTX txb_ctx;
2355   get_txb_ctx(plane_bsize, tx_size, 0, pta, ptl, &txb_ctx);
2356   const int zero_blk_rate = x->coeff_costs.coeff_costs[txs_ctx][PLANE_TYPE_Y]
2357                                 .txb_skip_cost[txb_ctx.txb_skip_ctx][1];
2358   rd_stats->zero_rate = zero_blk_rate;
2359   const int index = av1_get_txb_size_index(plane_bsize, blk_row, blk_col);
2360   mbmi->inter_tx_size[index] = tx_size;
2361   tx_type_rd(cpi, x, tx_size, blk_row, blk_col, block, plane_bsize, &txb_ctx,
2362              rd_stats, ftxs_mode, ref_best_rd);
2363   assert(rd_stats->rate < INT_MAX);
2364 
2365   const int pick_skip_txfm =
2366       !xd->lossless[mbmi->segment_id] &&
2367       (rd_stats->skip_txfm == 1 ||
2368        RDCOST(x->rdmult, rd_stats->rate, rd_stats->dist) >=
2369            RDCOST(x->rdmult, zero_blk_rate, rd_stats->sse));
2370   if (pick_skip_txfm) {
2371 #if CONFIG_RD_DEBUG
2372     update_txb_coeff_cost(rd_stats, 0, zero_blk_rate - rd_stats->rate);
2373 #endif  // CONFIG_RD_DEBUG
2374     rd_stats->rate = zero_blk_rate;
2375     rd_stats->dist = rd_stats->sse;
2376     p->eobs[block] = 0;
2377     update_txk_array(xd, blk_row, blk_col, tx_size, DCT_DCT);
2378   }
2379   rd_stats->skip_txfm = pick_skip_txfm;
2380   set_blk_skip(x->txfm_search_info.blk_skip, 0, blk_row * bw + blk_col,
2381                pick_skip_txfm);
2382 
2383   if (tx_size > TX_4X4 && depth < MAX_VARTX_DEPTH)
2384     rd_stats->rate += x->mode_costs.txfm_partition_cost[txfm_partition_ctx][0];
2385 
2386   no_split->rd = RDCOST(x->rdmult, rd_stats->rate, rd_stats->dist);
2387   no_split->txb_entropy_ctx = p->txb_entropy_ctx[block];
2388   no_split->tx_type =
2389       xd->tx_type_map[blk_row * xd->tx_type_map_stride + blk_col];
2390 }
2391 
try_tx_block_split(const AV1_COMP * cpi,MACROBLOCK * x,int blk_row,int blk_col,int block,TX_SIZE tx_size,int depth,BLOCK_SIZE plane_bsize,ENTROPY_CONTEXT * ta,ENTROPY_CONTEXT * tl,TXFM_CONTEXT * tx_above,TXFM_CONTEXT * tx_left,int txfm_partition_ctx,int64_t no_split_rd,int64_t ref_best_rd,FAST_TX_SEARCH_MODE ftxs_mode,RD_STATS * split_rd_stats)2392 static inline void try_tx_block_split(
2393     const AV1_COMP *cpi, MACROBLOCK *x, int blk_row, int blk_col, int block,
2394     TX_SIZE tx_size, int depth, BLOCK_SIZE plane_bsize, ENTROPY_CONTEXT *ta,
2395     ENTROPY_CONTEXT *tl, TXFM_CONTEXT *tx_above, TXFM_CONTEXT *tx_left,
2396     int txfm_partition_ctx, int64_t no_split_rd, int64_t ref_best_rd,
2397     FAST_TX_SEARCH_MODE ftxs_mode, RD_STATS *split_rd_stats) {
2398   assert(tx_size < TX_SIZES_ALL);
2399   MACROBLOCKD *const xd = &x->e_mbd;
2400   const int max_blocks_high = max_block_high(xd, plane_bsize, 0);
2401   const int max_blocks_wide = max_block_wide(xd, plane_bsize, 0);
2402   const int txb_width = tx_size_wide_unit[tx_size];
2403   const int txb_height = tx_size_high_unit[tx_size];
2404   // Transform size after splitting current block.
2405   const TX_SIZE sub_txs = sub_tx_size_map[tx_size];
2406   const int sub_txb_width = tx_size_wide_unit[sub_txs];
2407   const int sub_txb_height = tx_size_high_unit[sub_txs];
2408   const int sub_step = sub_txb_width * sub_txb_height;
2409   const int nblks = (txb_height / sub_txb_height) * (txb_width / sub_txb_width);
2410   assert(nblks > 0);
2411   av1_init_rd_stats(split_rd_stats);
2412   split_rd_stats->rate =
2413       x->mode_costs.txfm_partition_cost[txfm_partition_ctx][1];
2414 
2415   for (int r = 0, blk_idx = 0; r < txb_height; r += sub_txb_height) {
2416     const int offsetr = blk_row + r;
2417     if (offsetr >= max_blocks_high) break;
2418     for (int c = 0; c < txb_width; c += sub_txb_width, ++blk_idx) {
2419       assert(blk_idx < 4);
2420       const int offsetc = blk_col + c;
2421       if (offsetc >= max_blocks_wide) continue;
2422 
2423       RD_STATS this_rd_stats;
2424       int this_cost_valid = 1;
2425       select_tx_block(cpi, x, offsetr, offsetc, block, sub_txs, depth + 1,
2426                       plane_bsize, ta, tl, tx_above, tx_left, &this_rd_stats,
2427                       no_split_rd / nblks, ref_best_rd - split_rd_stats->rdcost,
2428                       &this_cost_valid, ftxs_mode);
2429       if (!this_cost_valid) {
2430         split_rd_stats->rdcost = INT64_MAX;
2431         return;
2432       }
2433       av1_merge_rd_stats(split_rd_stats, &this_rd_stats);
2434       split_rd_stats->rdcost =
2435           RDCOST(x->rdmult, split_rd_stats->rate, split_rd_stats->dist);
2436       if (split_rd_stats->rdcost > ref_best_rd) {
2437         split_rd_stats->rdcost = INT64_MAX;
2438         return;
2439       }
2440       block += sub_step;
2441     }
2442   }
2443 }
2444 
get_var(float mean,double x2_sum,int num)2445 static float get_var(float mean, double x2_sum, int num) {
2446   const float e_x2 = (float)(x2_sum / num);
2447   const float diff = e_x2 - mean * mean;
2448   return diff;
2449 }
2450 
get_blk_var_dev(const int16_t * data,int stride,int bw,int bh,float * dev_of_mean,float * var_of_vars)2451 static inline void get_blk_var_dev(const int16_t *data, int stride, int bw,
2452                                    int bh, float *dev_of_mean,
2453                                    float *var_of_vars) {
2454   const int16_t *const data_ptr = &data[0];
2455   const int subh = (bh >= bw) ? (bh >> 1) : bh;
2456   const int subw = (bw >= bh) ? (bw >> 1) : bw;
2457   const int num = bw * bh;
2458   const int sub_num = subw * subh;
2459   int total_x_sum = 0;
2460   int64_t total_x2_sum = 0;
2461   int blk_idx = 0;
2462   float var_sum = 0.0f;
2463   float mean_sum = 0.0f;
2464   double var2_sum = 0.0f;
2465   double mean2_sum = 0.0f;
2466 
2467   for (int row = 0; row < bh; row += subh) {
2468     for (int col = 0; col < bw; col += subw) {
2469       int x_sum;
2470       int64_t x2_sum;
2471       aom_get_blk_sse_sum(data_ptr + row * stride + col, stride, subw, subh,
2472                           &x_sum, &x2_sum);
2473       total_x_sum += x_sum;
2474       total_x2_sum += x2_sum;
2475 
2476       const float mean = (float)x_sum / sub_num;
2477       const float var = get_var(mean, (double)x2_sum, sub_num);
2478       mean_sum += mean;
2479       mean2_sum += (double)(mean * mean);
2480       var_sum += var;
2481       var2_sum += var * var;
2482       blk_idx++;
2483     }
2484   }
2485 
2486   const float lvl0_mean = (float)total_x_sum / num;
2487   const float block_var = get_var(lvl0_mean, (double)total_x2_sum, num);
2488   mean_sum += lvl0_mean;
2489   mean2_sum += (double)(lvl0_mean * lvl0_mean);
2490   var_sum += block_var;
2491   var2_sum += block_var * block_var;
2492   const float av_mean = mean_sum / 5;
2493 
2494   if (blk_idx > 1) {
2495     // Deviation of means.
2496     *dev_of_mean = get_dev(av_mean, mean2_sum, (blk_idx + 1));
2497     // Variance of variances.
2498     const float mean_var = var_sum / (blk_idx + 1);
2499     *var_of_vars = get_var(mean_var, var2_sum, (blk_idx + 1));
2500   }
2501 }
2502 
prune_tx_split_no_split(MACROBLOCK * x,BLOCK_SIZE bsize,int blk_row,int blk_col,TX_SIZE tx_size,int * try_no_split,int * try_split,int pruning_level)2503 static void prune_tx_split_no_split(MACROBLOCK *x, BLOCK_SIZE bsize,
2504                                     int blk_row, int blk_col, TX_SIZE tx_size,
2505                                     int *try_no_split, int *try_split,
2506                                     int pruning_level) {
2507   const int diff_stride = block_size_wide[bsize];
2508   const int16_t *diff =
2509       x->plane[0].src_diff + 4 * blk_row * diff_stride + 4 * blk_col;
2510   const int bw = tx_size_wide[tx_size];
2511   const int bh = tx_size_high[tx_size];
2512   float dev_of_means = 0.0f;
2513   float var_of_vars = 0.0f;
2514 
2515   // This function calculates the deviation of means, and the variance of pixel
2516   // variances of the block as well as it's sub-blocks.
2517   get_blk_var_dev(diff, diff_stride, bw, bh, &dev_of_means, &var_of_vars);
2518   const int dc_q = x->plane[0].dequant_QTX[0] >> 3;
2519   const int ac_q = x->plane[0].dequant_QTX[1] >> 3;
2520   const int no_split_thresh_scales[4] = { 0, 24, 8, 8 };
2521   const int no_split_thresh_scale = no_split_thresh_scales[pruning_level];
2522   const int split_thresh_scales[4] = { 0, 24, 10, 8 };
2523   const int split_thresh_scale = split_thresh_scales[pruning_level];
2524 
2525   if ((dev_of_means <= dc_q) &&
2526       (split_thresh_scale * var_of_vars <= ac_q * ac_q)) {
2527     *try_split = 0;
2528   }
2529   if ((dev_of_means > no_split_thresh_scale * dc_q) &&
2530       (var_of_vars > no_split_thresh_scale * ac_q * ac_q)) {
2531     *try_no_split = 0;
2532   }
2533 }
2534 
2535 // Search for the best transform partition(recursive)/type for a given
2536 // inter-predicted luma block. The obtained transform selection will be saved
2537 // in xd->mi[0], the corresponding RD stats will be saved in rd_stats.
select_tx_block(const AV1_COMP * cpi,MACROBLOCK * x,int blk_row,int blk_col,int block,TX_SIZE tx_size,int depth,BLOCK_SIZE plane_bsize,ENTROPY_CONTEXT * ta,ENTROPY_CONTEXT * tl,TXFM_CONTEXT * tx_above,TXFM_CONTEXT * tx_left,RD_STATS * rd_stats,int64_t prev_level_rd,int64_t ref_best_rd,int * is_cost_valid,FAST_TX_SEARCH_MODE ftxs_mode)2538 static inline void select_tx_block(
2539     const AV1_COMP *cpi, MACROBLOCK *x, int blk_row, int blk_col, int block,
2540     TX_SIZE tx_size, int depth, BLOCK_SIZE plane_bsize, ENTROPY_CONTEXT *ta,
2541     ENTROPY_CONTEXT *tl, TXFM_CONTEXT *tx_above, TXFM_CONTEXT *tx_left,
2542     RD_STATS *rd_stats, int64_t prev_level_rd, int64_t ref_best_rd,
2543     int *is_cost_valid, FAST_TX_SEARCH_MODE ftxs_mode) {
2544   assert(tx_size < TX_SIZES_ALL);
2545   av1_init_rd_stats(rd_stats);
2546   if (ref_best_rd < 0) {
2547     *is_cost_valid = 0;
2548     return;
2549   }
2550 
2551   MACROBLOCKD *const xd = &x->e_mbd;
2552   assert(blk_row < max_block_high(xd, plane_bsize, 0) &&
2553          blk_col < max_block_wide(xd, plane_bsize, 0));
2554   MB_MODE_INFO *const mbmi = xd->mi[0];
2555   const int ctx = txfm_partition_context(tx_above + blk_col, tx_left + blk_row,
2556                                          mbmi->bsize, tx_size);
2557   struct macroblock_plane *const p = &x->plane[0];
2558 
2559   int try_no_split = (cpi->oxcf.txfm_cfg.enable_tx64 ||
2560                       txsize_sqr_up_map[tx_size] != TX_64X64) &&
2561                      (cpi->oxcf.txfm_cfg.enable_rect_tx ||
2562                       tx_size_wide[tx_size] == tx_size_high[tx_size]);
2563   int try_split = tx_size > TX_4X4 && depth < MAX_VARTX_DEPTH;
2564   TxCandidateInfo no_split = { INT64_MAX, 0, TX_TYPES };
2565 
2566   // Prune tx_split and no-split based on sub-block properties.
2567   if (tx_size != TX_4X4 && try_split == 1 && try_no_split == 1 &&
2568       cpi->sf.tx_sf.prune_tx_size_level > 0) {
2569     prune_tx_split_no_split(x, plane_bsize, blk_row, blk_col, tx_size,
2570                             &try_no_split, &try_split,
2571                             cpi->sf.tx_sf.prune_tx_size_level);
2572   }
2573 
2574   if (cpi->sf.rt_sf.skip_tx_no_split_var_based_partition) {
2575     if (x->try_merge_partition && try_split && p->eobs[block]) try_no_split = 0;
2576   }
2577 
2578   // Try using current block as a single transform block without split.
2579   if (try_no_split) {
2580     try_tx_block_no_split(cpi, x, blk_row, blk_col, block, tx_size, depth,
2581                           plane_bsize, ta, tl, ctx, rd_stats, ref_best_rd,
2582                           ftxs_mode, &no_split);
2583 
2584     // Speed features for early termination.
2585     const int search_level = cpi->sf.tx_sf.adaptive_txb_search_level;
2586     if (search_level) {
2587       if ((no_split.rd - (no_split.rd >> (1 + search_level))) > ref_best_rd) {
2588         *is_cost_valid = 0;
2589         return;
2590       }
2591       if (no_split.rd - (no_split.rd >> (2 + search_level)) > prev_level_rd) {
2592         try_split = 0;
2593       }
2594     }
2595     if (cpi->sf.tx_sf.txb_split_cap) {
2596       if (p->eobs[block] == 0) try_split = 0;
2597     }
2598   }
2599 
2600   // ML based speed feature to skip searching for split transform blocks.
2601   if (x->e_mbd.bd == 8 && try_split &&
2602       !(ref_best_rd == INT64_MAX && no_split.rd == INT64_MAX)) {
2603     const int threshold = cpi->sf.tx_sf.tx_type_search.ml_tx_split_thresh;
2604     if (threshold >= 0) {
2605       const int split_score =
2606           ml_predict_tx_split(x, plane_bsize, blk_row, blk_col, tx_size);
2607       if (split_score < -threshold) try_split = 0;
2608     }
2609   }
2610 
2611   RD_STATS split_rd_stats;
2612   split_rd_stats.rdcost = INT64_MAX;
2613   // Try splitting current block into smaller transform blocks.
2614   if (try_split) {
2615     try_tx_block_split(cpi, x, blk_row, blk_col, block, tx_size, depth,
2616                        plane_bsize, ta, tl, tx_above, tx_left, ctx, no_split.rd,
2617                        AOMMIN(no_split.rd, ref_best_rd), ftxs_mode,
2618                        &split_rd_stats);
2619   }
2620 
2621   if (no_split.rd < split_rd_stats.rdcost) {
2622     ENTROPY_CONTEXT *pta = ta + blk_col;
2623     ENTROPY_CONTEXT *ptl = tl + blk_row;
2624     p->txb_entropy_ctx[block] = no_split.txb_entropy_ctx;
2625     av1_set_txb_context(x, 0, block, tx_size, pta, ptl);
2626     txfm_partition_update(tx_above + blk_col, tx_left + blk_row, tx_size,
2627                           tx_size);
2628     for (int idy = 0; idy < tx_size_high_unit[tx_size]; ++idy) {
2629       for (int idx = 0; idx < tx_size_wide_unit[tx_size]; ++idx) {
2630         const int index =
2631             av1_get_txb_size_index(plane_bsize, blk_row + idy, blk_col + idx);
2632         mbmi->inter_tx_size[index] = tx_size;
2633       }
2634     }
2635     mbmi->tx_size = tx_size;
2636     update_txk_array(xd, blk_row, blk_col, tx_size, no_split.tx_type);
2637     const int bw = mi_size_wide[plane_bsize];
2638     set_blk_skip(x->txfm_search_info.blk_skip, 0, blk_row * bw + blk_col,
2639                  rd_stats->skip_txfm);
2640   } else {
2641     *rd_stats = split_rd_stats;
2642     if (split_rd_stats.rdcost == INT64_MAX) *is_cost_valid = 0;
2643   }
2644 }
2645 
choose_largest_tx_size(const AV1_COMP * const cpi,MACROBLOCK * x,RD_STATS * rd_stats,int64_t ref_best_rd,BLOCK_SIZE bs)2646 static inline void choose_largest_tx_size(const AV1_COMP *const cpi,
2647                                           MACROBLOCK *x, RD_STATS *rd_stats,
2648                                           int64_t ref_best_rd, BLOCK_SIZE bs) {
2649   MACROBLOCKD *const xd = &x->e_mbd;
2650   MB_MODE_INFO *const mbmi = xd->mi[0];
2651   const TxfmSearchParams *txfm_params = &x->txfm_search_params;
2652   mbmi->tx_size = tx_size_from_tx_mode(bs, txfm_params->tx_mode_search_type);
2653 
2654   // If tx64 is not enabled, we need to go down to the next available size
2655   if (!cpi->oxcf.txfm_cfg.enable_tx64 && cpi->oxcf.txfm_cfg.enable_rect_tx) {
2656     static const TX_SIZE tx_size_max_32[TX_SIZES_ALL] = {
2657       TX_4X4,    // 4x4 transform
2658       TX_8X8,    // 8x8 transform
2659       TX_16X16,  // 16x16 transform
2660       TX_32X32,  // 32x32 transform
2661       TX_32X32,  // 64x64 transform
2662       TX_4X8,    // 4x8 transform
2663       TX_8X4,    // 8x4 transform
2664       TX_8X16,   // 8x16 transform
2665       TX_16X8,   // 16x8 transform
2666       TX_16X32,  // 16x32 transform
2667       TX_32X16,  // 32x16 transform
2668       TX_32X32,  // 32x64 transform
2669       TX_32X32,  // 64x32 transform
2670       TX_4X16,   // 4x16 transform
2671       TX_16X4,   // 16x4 transform
2672       TX_8X32,   // 8x32 transform
2673       TX_32X8,   // 32x8 transform
2674       TX_16X32,  // 16x64 transform
2675       TX_32X16,  // 64x16 transform
2676     };
2677     mbmi->tx_size = tx_size_max_32[mbmi->tx_size];
2678   } else if (cpi->oxcf.txfm_cfg.enable_tx64 &&
2679              !cpi->oxcf.txfm_cfg.enable_rect_tx) {
2680     static const TX_SIZE tx_size_max_square[TX_SIZES_ALL] = {
2681       TX_4X4,    // 4x4 transform
2682       TX_8X8,    // 8x8 transform
2683       TX_16X16,  // 16x16 transform
2684       TX_32X32,  // 32x32 transform
2685       TX_64X64,  // 64x64 transform
2686       TX_4X4,    // 4x8 transform
2687       TX_4X4,    // 8x4 transform
2688       TX_8X8,    // 8x16 transform
2689       TX_8X8,    // 16x8 transform
2690       TX_16X16,  // 16x32 transform
2691       TX_16X16,  // 32x16 transform
2692       TX_32X32,  // 32x64 transform
2693       TX_32X32,  // 64x32 transform
2694       TX_4X4,    // 4x16 transform
2695       TX_4X4,    // 16x4 transform
2696       TX_8X8,    // 8x32 transform
2697       TX_8X8,    // 32x8 transform
2698       TX_16X16,  // 16x64 transform
2699       TX_16X16,  // 64x16 transform
2700     };
2701     mbmi->tx_size = tx_size_max_square[mbmi->tx_size];
2702   } else if (!cpi->oxcf.txfm_cfg.enable_tx64 &&
2703              !cpi->oxcf.txfm_cfg.enable_rect_tx) {
2704     static const TX_SIZE tx_size_max_32_square[TX_SIZES_ALL] = {
2705       TX_4X4,    // 4x4 transform
2706       TX_8X8,    // 8x8 transform
2707       TX_16X16,  // 16x16 transform
2708       TX_32X32,  // 32x32 transform
2709       TX_32X32,  // 64x64 transform
2710       TX_4X4,    // 4x8 transform
2711       TX_4X4,    // 8x4 transform
2712       TX_8X8,    // 8x16 transform
2713       TX_8X8,    // 16x8 transform
2714       TX_16X16,  // 16x32 transform
2715       TX_16X16,  // 32x16 transform
2716       TX_32X32,  // 32x64 transform
2717       TX_32X32,  // 64x32 transform
2718       TX_4X4,    // 4x16 transform
2719       TX_4X4,    // 16x4 transform
2720       TX_8X8,    // 8x32 transform
2721       TX_8X8,    // 32x8 transform
2722       TX_16X16,  // 16x64 transform
2723       TX_16X16,  // 64x16 transform
2724     };
2725 
2726     mbmi->tx_size = tx_size_max_32_square[mbmi->tx_size];
2727   }
2728 
2729   const int skip_ctx = av1_get_skip_txfm_context(xd);
2730   const int no_skip_txfm_rate = x->mode_costs.skip_txfm_cost[skip_ctx][0];
2731   const int skip_txfm_rate = x->mode_costs.skip_txfm_cost[skip_ctx][1];
2732   // Skip RDcost is used only for Inter blocks
2733   const int64_t skip_txfm_rd =
2734       is_inter_block(mbmi) ? RDCOST(x->rdmult, skip_txfm_rate, 0) : INT64_MAX;
2735   const int64_t no_skip_txfm_rd = RDCOST(x->rdmult, no_skip_txfm_rate, 0);
2736   const int skip_trellis = 0;
2737   av1_txfm_rd_in_plane(x, cpi, rd_stats, ref_best_rd,
2738                        AOMMIN(no_skip_txfm_rd, skip_txfm_rd), AOM_PLANE_Y, bs,
2739                        mbmi->tx_size, FTXS_NONE, skip_trellis);
2740 }
2741 
choose_smallest_tx_size(const AV1_COMP * const cpi,MACROBLOCK * x,RD_STATS * rd_stats,int64_t ref_best_rd,BLOCK_SIZE bs)2742 static inline void choose_smallest_tx_size(const AV1_COMP *const cpi,
2743                                            MACROBLOCK *x, RD_STATS *rd_stats,
2744                                            int64_t ref_best_rd, BLOCK_SIZE bs) {
2745   MACROBLOCKD *const xd = &x->e_mbd;
2746   MB_MODE_INFO *const mbmi = xd->mi[0];
2747 
2748   mbmi->tx_size = TX_4X4;
2749   // TODO(any) : Pass this_rd based on skip/non-skip cost
2750   const int skip_trellis = 0;
2751   av1_txfm_rd_in_plane(x, cpi, rd_stats, ref_best_rd, 0, 0, bs, mbmi->tx_size,
2752                        FTXS_NONE, skip_trellis);
2753 }
2754 
2755 #if !CONFIG_REALTIME_ONLY
ml_predict_intra_tx_depth_prune(MACROBLOCK * x,int blk_row,int blk_col,BLOCK_SIZE bsize,TX_SIZE tx_size)2756 static void ml_predict_intra_tx_depth_prune(MACROBLOCK *x, int blk_row,
2757                                             int blk_col, BLOCK_SIZE bsize,
2758                                             TX_SIZE tx_size) {
2759   const MACROBLOCKD *const xd = &x->e_mbd;
2760   const MB_MODE_INFO *const mbmi = xd->mi[0];
2761 
2762   // Disable the pruning logic using NN model for the following cases:
2763   // 1) Lossless coding as only 4x4 transform is evaluated in this case
2764   // 2) When transform and current block sizes do not match as the features are
2765   // obtained over the current block
2766   // 3) When operating bit-depth is not 8-bit as the input features are not
2767   // scaled according to bit-depth.
2768   if (xd->lossless[mbmi->segment_id] || txsize_to_bsize[tx_size] != bsize ||
2769       xd->bd != 8)
2770     return;
2771 
2772   // Currently NN model based pruning is supported only when largest transform
2773   // size is 8x8
2774   if (tx_size != TX_8X8) return;
2775 
2776   // Neural network model is a sequential neural net and was trained using SGD
2777   // optimizer. The model can be further improved in terms of speed/quality by
2778   // considering the following experiments:
2779   // 1) Generate ML model by training with balanced data for different learning
2780   // rates and optimizers.
2781   // 2) Experiment with ML model by adding features related to the statistics of
2782   // top and left pixels to capture the accuracy of reconstructed neighbouring
2783   // pixels for 4x4 blocks numbered 1, 2, 3 in 8x8 block, source variance of 4x4
2784   // sub-blocks, etc.
2785   // 3) Generate ML models for transform blocks other than 8x8.
2786   const NN_CONFIG *const nn_config = &av1_intra_tx_split_nnconfig_8x8;
2787   const float *const intra_tx_prune_thresh = av1_intra_tx_prune_nn_thresh_8x8;
2788 
2789   float features[NUM_INTRA_TX_SPLIT_FEATURES] = { 0.0f };
2790   const int diff_stride = block_size_wide[bsize];
2791 
2792   const int16_t *diff = x->plane[0].src_diff + MI_SIZE * blk_row * diff_stride +
2793                         MI_SIZE * blk_col;
2794   const int bw = tx_size_wide[tx_size];
2795   const int bh = tx_size_high[tx_size];
2796 
2797   int feature_idx = get_mean_dev_features(diff, diff_stride, bw, bh, features);
2798 
2799   features[feature_idx++] = log1pf((float)x->source_variance);
2800 
2801   const int dc_q = av1_dc_quant_QTX(x->qindex, 0, xd->bd) >> (xd->bd - 8);
2802   const float log_dc_q_square = log1pf((float)(dc_q * dc_q) / 256.0f);
2803   features[feature_idx++] = log_dc_q_square;
2804   assert(feature_idx == NUM_INTRA_TX_SPLIT_FEATURES);
2805   for (int i = 0; i < NUM_INTRA_TX_SPLIT_FEATURES; i++) {
2806     features[i] = (features[i] - av1_intra_tx_split_8x8_mean[i]) /
2807                   av1_intra_tx_split_8x8_std[i];
2808   }
2809 
2810   float score;
2811   av1_nn_predict(features, nn_config, 1, &score);
2812 
2813   TxfmSearchParams *const txfm_params = &x->txfm_search_params;
2814   if (score <= intra_tx_prune_thresh[0])
2815     txfm_params->nn_prune_depths_for_intra_tx = TX_PRUNE_SPLIT;
2816   else if (score > intra_tx_prune_thresh[1])
2817     txfm_params->nn_prune_depths_for_intra_tx = TX_PRUNE_LARGEST;
2818 }
2819 #endif  // !CONFIG_REALTIME_ONLY
2820 
2821 /*!\brief Transform type search for luma macroblock with fixed transform size.
2822  *
2823  * \ingroup transform_search
2824  * Search for the best transform type and return the transform coefficients RD
2825  * cost of current luma macroblock with the given uniform transform size.
2826  *
2827  * \param[in]    x              Pointer to structure holding the data for the
2828                                 current encoding macroblock
2829  * \param[in]    cpi            Top-level encoder structure
2830  * \param[in]    rd_stats       Pointer to struct to keep track of the RD stats
2831  * \param[in]    ref_best_rd    Best RD cost seen for this block so far
2832  * \param[in]    bs             Size of the current macroblock
2833  * \param[in]    tx_size        The given transform size
2834  * \param[in]    ftxs_mode      Transform search mode specifying desired speed
2835                                 and quality tradeoff
2836  * \param[in]    skip_trellis   Binary flag indicating if trellis optimization
2837                                 should be skipped
2838  * \return       An int64_t value that is the best RD cost found.
2839  */
uniform_txfm_yrd(const AV1_COMP * const cpi,MACROBLOCK * x,RD_STATS * rd_stats,int64_t ref_best_rd,BLOCK_SIZE bs,TX_SIZE tx_size,FAST_TX_SEARCH_MODE ftxs_mode,int skip_trellis)2840 static int64_t uniform_txfm_yrd(const AV1_COMP *const cpi, MACROBLOCK *x,
2841                                 RD_STATS *rd_stats, int64_t ref_best_rd,
2842                                 BLOCK_SIZE bs, TX_SIZE tx_size,
2843                                 FAST_TX_SEARCH_MODE ftxs_mode,
2844                                 int skip_trellis) {
2845   assert(IMPLIES(is_rect_tx(tx_size), is_rect_tx_allowed_bsize(bs)));
2846   MACROBLOCKD *const xd = &x->e_mbd;
2847   MB_MODE_INFO *const mbmi = xd->mi[0];
2848   const TxfmSearchParams *txfm_params = &x->txfm_search_params;
2849   const ModeCosts *mode_costs = &x->mode_costs;
2850   const int is_inter = is_inter_block(mbmi);
2851   const int tx_select = txfm_params->tx_mode_search_type == TX_MODE_SELECT &&
2852                         block_signals_txsize(mbmi->bsize);
2853   int tx_size_rate = 0;
2854   if (tx_select) {
2855     const int ctx = txfm_partition_context(
2856         xd->above_txfm_context, xd->left_txfm_context, mbmi->bsize, tx_size);
2857     tx_size_rate = is_inter ? mode_costs->txfm_partition_cost[ctx][0]
2858                             : tx_size_cost(x, bs, tx_size);
2859   }
2860   const int skip_ctx = av1_get_skip_txfm_context(xd);
2861   const int no_skip_txfm_rate = mode_costs->skip_txfm_cost[skip_ctx][0];
2862   const int skip_txfm_rate = mode_costs->skip_txfm_cost[skip_ctx][1];
2863   const int64_t skip_txfm_rd =
2864       is_inter ? RDCOST(x->rdmult, skip_txfm_rate, 0) : INT64_MAX;
2865   const int64_t no_this_rd =
2866       RDCOST(x->rdmult, no_skip_txfm_rate + tx_size_rate, 0);
2867 
2868   mbmi->tx_size = tx_size;
2869   av1_txfm_rd_in_plane(x, cpi, rd_stats, ref_best_rd,
2870                        AOMMIN(no_this_rd, skip_txfm_rd), AOM_PLANE_Y, bs,
2871                        tx_size, ftxs_mode, skip_trellis);
2872   if (rd_stats->rate == INT_MAX) return INT64_MAX;
2873 
2874   int64_t rd;
2875   // rdstats->rate should include all the rate except skip/non-skip cost as the
2876   // same is accounted in the caller functions after rd evaluation of all
2877   // planes. However the decisions should be done after considering the
2878   // skip/non-skip header cost
2879   if (rd_stats->skip_txfm && is_inter) {
2880     rd = RDCOST(x->rdmult, skip_txfm_rate, rd_stats->sse);
2881   } else {
2882     // Intra blocks are always signalled as non-skip
2883     rd = RDCOST(x->rdmult, rd_stats->rate + no_skip_txfm_rate + tx_size_rate,
2884                 rd_stats->dist);
2885     rd_stats->rate += tx_size_rate;
2886   }
2887   // Check if forcing the block to skip transform leads to smaller RD cost.
2888   if (is_inter && !rd_stats->skip_txfm && !xd->lossless[mbmi->segment_id]) {
2889     int64_t temp_skip_txfm_rd =
2890         RDCOST(x->rdmult, skip_txfm_rate, rd_stats->sse);
2891     if (temp_skip_txfm_rd <= rd) {
2892       rd = temp_skip_txfm_rd;
2893       rd_stats->rate = 0;
2894       rd_stats->dist = rd_stats->sse;
2895       rd_stats->skip_txfm = 1;
2896     }
2897   }
2898 
2899   return rd;
2900 }
2901 
2902 // Search for the best uniform transform size and type for current coding block.
choose_tx_size_type_from_rd(const AV1_COMP * const cpi,MACROBLOCK * x,RD_STATS * rd_stats,int64_t ref_best_rd,BLOCK_SIZE bs)2903 static inline void choose_tx_size_type_from_rd(const AV1_COMP *const cpi,
2904                                                MACROBLOCK *x,
2905                                                RD_STATS *rd_stats,
2906                                                int64_t ref_best_rd,
2907                                                BLOCK_SIZE bs) {
2908   av1_invalid_rd_stats(rd_stats);
2909 
2910   MACROBLOCKD *const xd = &x->e_mbd;
2911   MB_MODE_INFO *const mbmi = xd->mi[0];
2912   TxfmSearchParams *const txfm_params = &x->txfm_search_params;
2913   const TX_SIZE max_rect_tx_size = max_txsize_rect_lookup[bs];
2914   const int tx_select = txfm_params->tx_mode_search_type == TX_MODE_SELECT;
2915   int start_tx;
2916   // The split depth can be at most MAX_TX_DEPTH, so the init_depth controls
2917   // how many times of splitting is allowed during the RD search.
2918   int init_depth;
2919 
2920   if (tx_select) {
2921     start_tx = max_rect_tx_size;
2922     init_depth = get_search_init_depth(mi_size_wide[bs], mi_size_high[bs],
2923                                        is_inter_block(mbmi), &cpi->sf,
2924                                        txfm_params->tx_size_search_method);
2925     if (init_depth == MAX_TX_DEPTH && !cpi->oxcf.txfm_cfg.enable_tx64 &&
2926         txsize_sqr_up_map[start_tx] == TX_64X64) {
2927       start_tx = sub_tx_size_map[start_tx];
2928     }
2929   } else {
2930     const TX_SIZE chosen_tx_size =
2931         tx_size_from_tx_mode(bs, txfm_params->tx_mode_search_type);
2932     start_tx = chosen_tx_size;
2933     init_depth = MAX_TX_DEPTH;
2934   }
2935 
2936   const int skip_trellis = 0;
2937   uint8_t best_txk_type_map[MAX_MIB_SIZE * MAX_MIB_SIZE];
2938   uint8_t best_blk_skip[MAX_MIB_SIZE * MAX_MIB_SIZE];
2939   TX_SIZE best_tx_size = max_rect_tx_size;
2940   int64_t best_rd = INT64_MAX;
2941   const int num_blks = bsize_to_num_blk(bs);
2942   x->rd_model = FULL_TXFM_RD;
2943   int64_t rd[MAX_TX_DEPTH + 1] = { INT64_MAX, INT64_MAX, INT64_MAX };
2944   TxfmSearchInfo *txfm_info = &x->txfm_search_info;
2945   for (int tx_size = start_tx, depth = init_depth; depth <= MAX_TX_DEPTH;
2946        depth++, tx_size = sub_tx_size_map[tx_size]) {
2947     if ((!cpi->oxcf.txfm_cfg.enable_tx64 &&
2948          txsize_sqr_up_map[tx_size] == TX_64X64) ||
2949         (!cpi->oxcf.txfm_cfg.enable_rect_tx &&
2950          tx_size_wide[tx_size] != tx_size_high[tx_size])) {
2951       continue;
2952     }
2953 
2954 #if !CONFIG_REALTIME_ONLY
2955     if (txfm_params->nn_prune_depths_for_intra_tx == TX_PRUNE_SPLIT) break;
2956 
2957     // Set the flag to enable the evaluation of NN classifier to prune transform
2958     // depths. As the features are based on intra residual information of
2959     // largest transform, the evaluation of NN model is enabled only for this
2960     // case.
2961     txfm_params->enable_nn_prune_intra_tx_depths =
2962         (cpi->sf.tx_sf.prune_intra_tx_depths_using_nn && tx_size == start_tx);
2963 #endif
2964 
2965     RD_STATS this_rd_stats;
2966     // When the speed feature use_rd_based_breakout_for_intra_tx_search is
2967     // enabled, use the known minimum best_rd for early termination.
2968     const int64_t rd_thresh =
2969         cpi->sf.tx_sf.use_rd_based_breakout_for_intra_tx_search
2970             ? AOMMIN(ref_best_rd, best_rd)
2971             : ref_best_rd;
2972     rd[depth] = uniform_txfm_yrd(cpi, x, &this_rd_stats, rd_thresh, bs, tx_size,
2973                                  FTXS_NONE, skip_trellis);
2974     if (rd[depth] < best_rd) {
2975       av1_copy_array(best_blk_skip, txfm_info->blk_skip, num_blks);
2976       av1_copy_array(best_txk_type_map, xd->tx_type_map, num_blks);
2977       best_tx_size = tx_size;
2978       best_rd = rd[depth];
2979       *rd_stats = this_rd_stats;
2980     }
2981     if (tx_size == TX_4X4) break;
2982     // If we are searching three depths, prune the smallest size depending
2983     // on rd results for the first two depths for low contrast blocks.
2984     if (depth > init_depth && depth != MAX_TX_DEPTH &&
2985         x->source_variance < 256) {
2986       if (rd[depth - 1] != INT64_MAX && rd[depth] > rd[depth - 1]) break;
2987     }
2988   }
2989 
2990   if (rd_stats->rate != INT_MAX) {
2991     mbmi->tx_size = best_tx_size;
2992     av1_copy_array(xd->tx_type_map, best_txk_type_map, num_blks);
2993     av1_copy_array(txfm_info->blk_skip, best_blk_skip, num_blks);
2994   }
2995 
2996 #if !CONFIG_REALTIME_ONLY
2997   // Reset the flags to avoid any unintentional evaluation of NN model and
2998   // consumption of prune depths.
2999   txfm_params->enable_nn_prune_intra_tx_depths = false;
3000   txfm_params->nn_prune_depths_for_intra_tx = TX_PRUNE_NONE;
3001 #endif
3002 }
3003 
3004 // Search for the best transform type for the given transform block in the
3005 // given plane/channel, and calculate the corresponding RD cost.
block_rd_txfm(int plane,int block,int blk_row,int blk_col,BLOCK_SIZE plane_bsize,TX_SIZE tx_size,void * arg)3006 static inline void block_rd_txfm(int plane, int block, int blk_row, int blk_col,
3007                                  BLOCK_SIZE plane_bsize, TX_SIZE tx_size,
3008                                  void *arg) {
3009   struct rdcost_block_args *args = arg;
3010   if (args->exit_early) {
3011     args->incomplete_exit = 1;
3012     return;
3013   }
3014 
3015   MACROBLOCK *const x = args->x;
3016   MACROBLOCKD *const xd = &x->e_mbd;
3017   const int is_inter = is_inter_block(xd->mi[0]);
3018   const AV1_COMP *cpi = args->cpi;
3019   ENTROPY_CONTEXT *a = args->t_above + blk_col;
3020   ENTROPY_CONTEXT *l = args->t_left + blk_row;
3021   const AV1_COMMON *cm = &cpi->common;
3022   RD_STATS this_rd_stats;
3023   av1_init_rd_stats(&this_rd_stats);
3024 
3025   if (!is_inter) {
3026     av1_predict_intra_block_facade(cm, xd, plane, blk_col, blk_row, tx_size);
3027     av1_subtract_txb(x, plane, plane_bsize, blk_col, blk_row, tx_size);
3028 #if !CONFIG_REALTIME_ONLY
3029     const TxfmSearchParams *const txfm_params = &x->txfm_search_params;
3030     if (txfm_params->enable_nn_prune_intra_tx_depths) {
3031       ml_predict_intra_tx_depth_prune(x, blk_row, blk_col, plane_bsize,
3032                                       tx_size);
3033       if (txfm_params->nn_prune_depths_for_intra_tx == TX_PRUNE_LARGEST) {
3034         av1_invalid_rd_stats(&args->rd_stats);
3035         args->exit_early = 1;
3036         return;
3037       }
3038     }
3039 #endif
3040   }
3041 
3042   TXB_CTX txb_ctx;
3043   get_txb_ctx(plane_bsize, tx_size, plane, a, l, &txb_ctx);
3044   search_tx_type(cpi, x, plane, block, blk_row, blk_col, plane_bsize, tx_size,
3045                  &txb_ctx, args->ftxs_mode, args->skip_trellis,
3046                  args->best_rd - args->current_rd, &this_rd_stats);
3047 
3048 #if !CONFIG_REALTIME_ONLY
3049   if (plane == AOM_PLANE_Y && xd->cfl.store_y) {
3050     assert(!is_inter || plane_bsize < BLOCK_8X8);
3051     cfl_store_tx(xd, blk_row, blk_col, tx_size, plane_bsize);
3052   }
3053 #endif
3054 
3055 #if CONFIG_RD_DEBUG
3056   update_txb_coeff_cost(&this_rd_stats, plane, this_rd_stats.rate);
3057 #endif  // CONFIG_RD_DEBUG
3058   av1_set_txb_context(x, plane, block, tx_size, a, l);
3059 
3060   const int blk_idx =
3061       blk_row * (block_size_wide[plane_bsize] >> MI_SIZE_LOG2) + blk_col;
3062 
3063   TxfmSearchInfo *txfm_info = &x->txfm_search_info;
3064   if (plane == 0)
3065     set_blk_skip(txfm_info->blk_skip, plane, blk_idx,
3066                  x->plane[plane].eobs[block] == 0);
3067   else
3068     set_blk_skip(txfm_info->blk_skip, plane, blk_idx, 0);
3069 
3070   int64_t rd;
3071   if (is_inter) {
3072     const int64_t no_skip_txfm_rd =
3073         RDCOST(x->rdmult, this_rd_stats.rate, this_rd_stats.dist);
3074     const int64_t skip_txfm_rd = RDCOST(x->rdmult, 0, this_rd_stats.sse);
3075     rd = AOMMIN(no_skip_txfm_rd, skip_txfm_rd);
3076     this_rd_stats.skip_txfm &= !x->plane[plane].eobs[block];
3077   } else {
3078     // Signal non-skip_txfm for Intra blocks
3079     rd = RDCOST(x->rdmult, this_rd_stats.rate, this_rd_stats.dist);
3080     this_rd_stats.skip_txfm = 0;
3081   }
3082 
3083   av1_merge_rd_stats(&args->rd_stats, &this_rd_stats);
3084 
3085   args->current_rd += rd;
3086   if (args->current_rd > args->best_rd) args->exit_early = 1;
3087 }
3088 
av1_estimate_txfm_yrd(const AV1_COMP * const cpi,MACROBLOCK * x,RD_STATS * rd_stats,int64_t ref_best_rd,BLOCK_SIZE bs,TX_SIZE tx_size)3089 int64_t av1_estimate_txfm_yrd(const AV1_COMP *const cpi, MACROBLOCK *x,
3090                               RD_STATS *rd_stats, int64_t ref_best_rd,
3091                               BLOCK_SIZE bs, TX_SIZE tx_size) {
3092   MACROBLOCKD *const xd = &x->e_mbd;
3093   MB_MODE_INFO *const mbmi = xd->mi[0];
3094   const TxfmSearchParams *txfm_params = &x->txfm_search_params;
3095   const ModeCosts *mode_costs = &x->mode_costs;
3096   const int is_inter = is_inter_block(mbmi);
3097   const int tx_select = txfm_params->tx_mode_search_type == TX_MODE_SELECT &&
3098                         block_signals_txsize(mbmi->bsize);
3099   int tx_size_rate = 0;
3100   if (tx_select) {
3101     const int ctx = txfm_partition_context(
3102         xd->above_txfm_context, xd->left_txfm_context, mbmi->bsize, tx_size);
3103     tx_size_rate = mode_costs->txfm_partition_cost[ctx][0];
3104   }
3105   const int skip_ctx = av1_get_skip_txfm_context(xd);
3106   const int no_skip_txfm_rate = mode_costs->skip_txfm_cost[skip_ctx][0];
3107   const int skip_txfm_rate = mode_costs->skip_txfm_cost[skip_ctx][1];
3108   const int64_t skip_txfm_rd = RDCOST(x->rdmult, skip_txfm_rate, 0);
3109   const int64_t no_this_rd =
3110       RDCOST(x->rdmult, no_skip_txfm_rate + tx_size_rate, 0);
3111   mbmi->tx_size = tx_size;
3112 
3113   const uint8_t txw_unit = tx_size_wide_unit[tx_size];
3114   const uint8_t txh_unit = tx_size_high_unit[tx_size];
3115   const int step = txw_unit * txh_unit;
3116   const int max_blocks_wide = max_block_wide(xd, bs, 0);
3117   const int max_blocks_high = max_block_high(xd, bs, 0);
3118 
3119   struct rdcost_block_args args;
3120   av1_zero(args);
3121   args.x = x;
3122   args.cpi = cpi;
3123   args.best_rd = ref_best_rd;
3124   args.current_rd = AOMMIN(no_this_rd, skip_txfm_rd);
3125   av1_init_rd_stats(&args.rd_stats);
3126   av1_get_entropy_contexts(bs, &xd->plane[0], args.t_above, args.t_left);
3127   int i = 0;
3128   for (int blk_row = 0; blk_row < max_blocks_high && !args.incomplete_exit;
3129        blk_row += txh_unit) {
3130     for (int blk_col = 0; blk_col < max_blocks_wide; blk_col += txw_unit) {
3131       RD_STATS this_rd_stats;
3132       av1_init_rd_stats(&this_rd_stats);
3133 
3134       if (args.exit_early) {
3135         args.incomplete_exit = 1;
3136         break;
3137       }
3138 
3139       ENTROPY_CONTEXT *a = args.t_above + blk_col;
3140       ENTROPY_CONTEXT *l = args.t_left + blk_row;
3141       TXB_CTX txb_ctx;
3142       get_txb_ctx(bs, tx_size, 0, a, l, &txb_ctx);
3143 
3144       TxfmParam txfm_param;
3145       QUANT_PARAM quant_param;
3146       av1_setup_xform(&cpi->common, x, tx_size, DCT_DCT, &txfm_param);
3147       av1_setup_quant(tx_size, 0, AV1_XFORM_QUANT_B, 0, &quant_param);
3148 
3149       av1_xform(x, 0, i, blk_row, blk_col, bs, &txfm_param);
3150       av1_quant(x, 0, i, &txfm_param, &quant_param);
3151 
3152       this_rd_stats.rate =
3153           cost_coeffs(x, 0, i, tx_size, txfm_param.tx_type, &txb_ctx, 0);
3154 
3155       const SCAN_ORDER *const scan_order =
3156           get_scan(txfm_param.tx_size, txfm_param.tx_type);
3157       dist_block_tx_domain(x, 0, i, tx_size, quant_param.qmatrix,
3158                            scan_order->scan, &this_rd_stats.dist,
3159                            &this_rd_stats.sse);
3160 
3161       const int64_t no_skip_txfm_rd =
3162           RDCOST(x->rdmult, this_rd_stats.rate, this_rd_stats.dist);
3163       const int64_t skip_rd = RDCOST(x->rdmult, 0, this_rd_stats.sse);
3164 
3165       this_rd_stats.skip_txfm &= !x->plane[0].eobs[i];
3166 
3167       av1_merge_rd_stats(&args.rd_stats, &this_rd_stats);
3168       args.current_rd += AOMMIN(no_skip_txfm_rd, skip_rd);
3169 
3170       if (args.current_rd > ref_best_rd) {
3171         args.exit_early = 1;
3172         break;
3173       }
3174 
3175       av1_set_txb_context(x, 0, i, tx_size, a, l);
3176       i += step;
3177     }
3178   }
3179 
3180   if (args.incomplete_exit) av1_invalid_rd_stats(&args.rd_stats);
3181 
3182   *rd_stats = args.rd_stats;
3183   if (rd_stats->rate == INT_MAX) return INT64_MAX;
3184 
3185   int64_t rd;
3186   // rdstats->rate should include all the rate except skip/non-skip cost as the
3187   // same is accounted in the caller functions after rd evaluation of all
3188   // planes. However the decisions should be done after considering the
3189   // skip/non-skip header cost
3190   if (rd_stats->skip_txfm && is_inter) {
3191     rd = RDCOST(x->rdmult, skip_txfm_rate, rd_stats->sse);
3192   } else {
3193     // Intra blocks are always signalled as non-skip
3194     rd = RDCOST(x->rdmult, rd_stats->rate + no_skip_txfm_rate + tx_size_rate,
3195                 rd_stats->dist);
3196     rd_stats->rate += tx_size_rate;
3197   }
3198   // Check if forcing the block to skip transform leads to smaller RD cost.
3199   if (is_inter && !rd_stats->skip_txfm && !xd->lossless[mbmi->segment_id]) {
3200     int64_t temp_skip_txfm_rd =
3201         RDCOST(x->rdmult, skip_txfm_rate, rd_stats->sse);
3202     if (temp_skip_txfm_rd <= rd) {
3203       rd = temp_skip_txfm_rd;
3204       rd_stats->rate = 0;
3205       rd_stats->dist = rd_stats->sse;
3206       rd_stats->skip_txfm = 1;
3207     }
3208   }
3209 
3210   return rd;
3211 }
3212 
3213 // Search for the best transform type for a luma inter-predicted block, given
3214 // the transform block partitions.
3215 // This function is used only when some speed features are enabled.
tx_block_yrd(const AV1_COMP * cpi,MACROBLOCK * x,int blk_row,int blk_col,int block,TX_SIZE tx_size,BLOCK_SIZE plane_bsize,int depth,ENTROPY_CONTEXT * above_ctx,ENTROPY_CONTEXT * left_ctx,TXFM_CONTEXT * tx_above,TXFM_CONTEXT * tx_left,int64_t ref_best_rd,RD_STATS * rd_stats,FAST_TX_SEARCH_MODE ftxs_mode)3216 static inline void tx_block_yrd(const AV1_COMP *cpi, MACROBLOCK *x, int blk_row,
3217                                 int blk_col, int block, TX_SIZE tx_size,
3218                                 BLOCK_SIZE plane_bsize, int depth,
3219                                 ENTROPY_CONTEXT *above_ctx,
3220                                 ENTROPY_CONTEXT *left_ctx,
3221                                 TXFM_CONTEXT *tx_above, TXFM_CONTEXT *tx_left,
3222                                 int64_t ref_best_rd, RD_STATS *rd_stats,
3223                                 FAST_TX_SEARCH_MODE ftxs_mode) {
3224   assert(tx_size < TX_SIZES_ALL);
3225   MACROBLOCKD *const xd = &x->e_mbd;
3226   MB_MODE_INFO *const mbmi = xd->mi[0];
3227   assert(is_inter_block(mbmi));
3228   const int max_blocks_high = max_block_high(xd, plane_bsize, 0);
3229   const int max_blocks_wide = max_block_wide(xd, plane_bsize, 0);
3230 
3231   if (blk_row >= max_blocks_high || blk_col >= max_blocks_wide) return;
3232 
3233   const TX_SIZE plane_tx_size = mbmi->inter_tx_size[av1_get_txb_size_index(
3234       plane_bsize, blk_row, blk_col)];
3235   const int ctx = txfm_partition_context(tx_above + blk_col, tx_left + blk_row,
3236                                          mbmi->bsize, tx_size);
3237 
3238   av1_init_rd_stats(rd_stats);
3239   if (tx_size == plane_tx_size) {
3240     ENTROPY_CONTEXT *ta = above_ctx + blk_col;
3241     ENTROPY_CONTEXT *tl = left_ctx + blk_row;
3242     const TX_SIZE txs_ctx = get_txsize_entropy_ctx(tx_size);
3243     TXB_CTX txb_ctx;
3244     get_txb_ctx(plane_bsize, tx_size, 0, ta, tl, &txb_ctx);
3245 
3246     const int zero_blk_rate =
3247         x->coeff_costs.coeff_costs[txs_ctx][get_plane_type(0)]
3248             .txb_skip_cost[txb_ctx.txb_skip_ctx][1];
3249     rd_stats->zero_rate = zero_blk_rate;
3250     tx_type_rd(cpi, x, tx_size, blk_row, blk_col, block, plane_bsize, &txb_ctx,
3251                rd_stats, ftxs_mode, ref_best_rd);
3252     const int mi_width = mi_size_wide[plane_bsize];
3253     TxfmSearchInfo *txfm_info = &x->txfm_search_info;
3254     if (RDCOST(x->rdmult, rd_stats->rate, rd_stats->dist) >=
3255             RDCOST(x->rdmult, zero_blk_rate, rd_stats->sse) ||
3256         rd_stats->skip_txfm == 1) {
3257       rd_stats->rate = zero_blk_rate;
3258       rd_stats->dist = rd_stats->sse;
3259       rd_stats->skip_txfm = 1;
3260       set_blk_skip(txfm_info->blk_skip, 0, blk_row * mi_width + blk_col, 1);
3261       x->plane[0].eobs[block] = 0;
3262       x->plane[0].txb_entropy_ctx[block] = 0;
3263       update_txk_array(xd, blk_row, blk_col, tx_size, DCT_DCT);
3264     } else {
3265       rd_stats->skip_txfm = 0;
3266       set_blk_skip(txfm_info->blk_skip, 0, blk_row * mi_width + blk_col, 0);
3267     }
3268     if (tx_size > TX_4X4 && depth < MAX_VARTX_DEPTH)
3269       rd_stats->rate += x->mode_costs.txfm_partition_cost[ctx][0];
3270     av1_set_txb_context(x, 0, block, tx_size, ta, tl);
3271     txfm_partition_update(tx_above + blk_col, tx_left + blk_row, tx_size,
3272                           tx_size);
3273   } else {
3274     const TX_SIZE sub_txs = sub_tx_size_map[tx_size];
3275     const int txb_width = tx_size_wide_unit[sub_txs];
3276     const int txb_height = tx_size_high_unit[sub_txs];
3277     const int step = txb_height * txb_width;
3278     const int row_end =
3279         AOMMIN(tx_size_high_unit[tx_size], max_blocks_high - blk_row);
3280     const int col_end =
3281         AOMMIN(tx_size_wide_unit[tx_size], max_blocks_wide - blk_col);
3282     RD_STATS pn_rd_stats;
3283     int64_t this_rd = 0;
3284     assert(txb_width > 0 && txb_height > 0);
3285 
3286     for (int row = 0; row < row_end; row += txb_height) {
3287       const int offsetr = blk_row + row;
3288       for (int col = 0; col < col_end; col += txb_width) {
3289         const int offsetc = blk_col + col;
3290 
3291         av1_init_rd_stats(&pn_rd_stats);
3292         tx_block_yrd(cpi, x, offsetr, offsetc, block, sub_txs, plane_bsize,
3293                      depth + 1, above_ctx, left_ctx, tx_above, tx_left,
3294                      ref_best_rd - this_rd, &pn_rd_stats, ftxs_mode);
3295         if (pn_rd_stats.rate == INT_MAX) {
3296           av1_invalid_rd_stats(rd_stats);
3297           return;
3298         }
3299         av1_merge_rd_stats(rd_stats, &pn_rd_stats);
3300         this_rd += RDCOST(x->rdmult, pn_rd_stats.rate, pn_rd_stats.dist);
3301         block += step;
3302       }
3303     }
3304 
3305     if (tx_size > TX_4X4 && depth < MAX_VARTX_DEPTH)
3306       rd_stats->rate += x->mode_costs.txfm_partition_cost[ctx][1];
3307   }
3308 }
3309 
3310 // search for tx type with tx sizes already decided for a inter-predicted luma
3311 // partition block. It's used only when some speed features are enabled.
3312 // Return value 0: early termination triggered, no valid rd cost available;
3313 //              1: rd cost values are valid.
inter_block_yrd(const AV1_COMP * cpi,MACROBLOCK * x,RD_STATS * rd_stats,BLOCK_SIZE bsize,int64_t ref_best_rd,FAST_TX_SEARCH_MODE ftxs_mode)3314 static int inter_block_yrd(const AV1_COMP *cpi, MACROBLOCK *x,
3315                            RD_STATS *rd_stats, BLOCK_SIZE bsize,
3316                            int64_t ref_best_rd, FAST_TX_SEARCH_MODE ftxs_mode) {
3317   if (ref_best_rd < 0) {
3318     av1_invalid_rd_stats(rd_stats);
3319     return 0;
3320   }
3321 
3322   av1_init_rd_stats(rd_stats);
3323 
3324   MACROBLOCKD *const xd = &x->e_mbd;
3325   const TxfmSearchParams *txfm_params = &x->txfm_search_params;
3326   const struct macroblockd_plane *const pd = &xd->plane[0];
3327   const int mi_width = mi_size_wide[bsize];
3328   const int mi_height = mi_size_high[bsize];
3329   const TX_SIZE max_tx_size = get_vartx_max_txsize(xd, bsize, 0);
3330   const int bh = tx_size_high_unit[max_tx_size];
3331   const int bw = tx_size_wide_unit[max_tx_size];
3332   const int step = bw * bh;
3333   const int init_depth = get_search_init_depth(
3334       mi_width, mi_height, 1, &cpi->sf, txfm_params->tx_size_search_method);
3335   ENTROPY_CONTEXT ctxa[MAX_MIB_SIZE];
3336   ENTROPY_CONTEXT ctxl[MAX_MIB_SIZE];
3337   TXFM_CONTEXT tx_above[MAX_MIB_SIZE];
3338   TXFM_CONTEXT tx_left[MAX_MIB_SIZE];
3339   av1_get_entropy_contexts(bsize, pd, ctxa, ctxl);
3340   memcpy(tx_above, xd->above_txfm_context, sizeof(TXFM_CONTEXT) * mi_width);
3341   memcpy(tx_left, xd->left_txfm_context, sizeof(TXFM_CONTEXT) * mi_height);
3342 
3343   int64_t this_rd = 0;
3344   for (int idy = 0, block = 0; idy < mi_height; idy += bh) {
3345     for (int idx = 0; idx < mi_width; idx += bw) {
3346       RD_STATS pn_rd_stats;
3347       av1_init_rd_stats(&pn_rd_stats);
3348       tx_block_yrd(cpi, x, idy, idx, block, max_tx_size, bsize, init_depth,
3349                    ctxa, ctxl, tx_above, tx_left, ref_best_rd - this_rd,
3350                    &pn_rd_stats, ftxs_mode);
3351       if (pn_rd_stats.rate == INT_MAX) {
3352         av1_invalid_rd_stats(rd_stats);
3353         return 0;
3354       }
3355       av1_merge_rd_stats(rd_stats, &pn_rd_stats);
3356       this_rd +=
3357           AOMMIN(RDCOST(x->rdmult, pn_rd_stats.rate, pn_rd_stats.dist),
3358                  RDCOST(x->rdmult, pn_rd_stats.zero_rate, pn_rd_stats.sse));
3359       block += step;
3360     }
3361   }
3362 
3363   const int skip_ctx = av1_get_skip_txfm_context(xd);
3364   const int no_skip_txfm_rate = x->mode_costs.skip_txfm_cost[skip_ctx][0];
3365   const int skip_txfm_rate = x->mode_costs.skip_txfm_cost[skip_ctx][1];
3366   const int64_t skip_txfm_rd = RDCOST(x->rdmult, skip_txfm_rate, rd_stats->sse);
3367   this_rd =
3368       RDCOST(x->rdmult, rd_stats->rate + no_skip_txfm_rate, rd_stats->dist);
3369   if (skip_txfm_rd < this_rd) {
3370     this_rd = skip_txfm_rd;
3371     rd_stats->rate = 0;
3372     rd_stats->dist = rd_stats->sse;
3373     rd_stats->skip_txfm = 1;
3374   }
3375 
3376   const int is_cost_valid = this_rd > ref_best_rd;
3377   if (!is_cost_valid) {
3378     // reset cost value
3379     av1_invalid_rd_stats(rd_stats);
3380   }
3381   return is_cost_valid;
3382 }
3383 
3384 // Search for the best transform size and type for current inter-predicted
3385 // luma block with recursive transform block partitioning. The obtained
3386 // transform selection will be saved in xd->mi[0], the corresponding RD stats
3387 // will be saved in rd_stats. The returned value is the corresponding RD cost.
select_tx_size_and_type(const AV1_COMP * cpi,MACROBLOCK * x,RD_STATS * rd_stats,BLOCK_SIZE bsize,int64_t ref_best_rd)3388 static int64_t select_tx_size_and_type(const AV1_COMP *cpi, MACROBLOCK *x,
3389                                        RD_STATS *rd_stats, BLOCK_SIZE bsize,
3390                                        int64_t ref_best_rd) {
3391   MACROBLOCKD *const xd = &x->e_mbd;
3392   const TxfmSearchParams *txfm_params = &x->txfm_search_params;
3393   assert(is_inter_block(xd->mi[0]));
3394   assert(bsize < BLOCK_SIZES_ALL);
3395   const int fast_tx_search = txfm_params->tx_size_search_method > USE_FULL_RD;
3396   int64_t rd_thresh = ref_best_rd;
3397   if (rd_thresh == 0) {
3398     av1_invalid_rd_stats(rd_stats);
3399     return INT64_MAX;
3400   }
3401   if (fast_tx_search && rd_thresh < INT64_MAX) {
3402     if (INT64_MAX - rd_thresh > (rd_thresh >> 3)) rd_thresh += (rd_thresh >> 3);
3403   }
3404   assert(rd_thresh > 0);
3405   const FAST_TX_SEARCH_MODE ftxs_mode =
3406       fast_tx_search ? FTXS_DCT_AND_1D_DCT_ONLY : FTXS_NONE;
3407   const struct macroblockd_plane *const pd = &xd->plane[0];
3408   assert(bsize < BLOCK_SIZES_ALL);
3409   const int mi_width = mi_size_wide[bsize];
3410   const int mi_height = mi_size_high[bsize];
3411   ENTROPY_CONTEXT ctxa[MAX_MIB_SIZE];
3412   ENTROPY_CONTEXT ctxl[MAX_MIB_SIZE];
3413   TXFM_CONTEXT tx_above[MAX_MIB_SIZE];
3414   TXFM_CONTEXT tx_left[MAX_MIB_SIZE];
3415   av1_get_entropy_contexts(bsize, pd, ctxa, ctxl);
3416   memcpy(tx_above, xd->above_txfm_context, sizeof(TXFM_CONTEXT) * mi_width);
3417   memcpy(tx_left, xd->left_txfm_context, sizeof(TXFM_CONTEXT) * mi_height);
3418   const int init_depth = get_search_init_depth(
3419       mi_width, mi_height, 1, &cpi->sf, txfm_params->tx_size_search_method);
3420   const TX_SIZE max_tx_size = max_txsize_rect_lookup[bsize];
3421   const int bh = tx_size_high_unit[max_tx_size];
3422   const int bw = tx_size_wide_unit[max_tx_size];
3423   const int step = bw * bh;
3424   const int skip_ctx = av1_get_skip_txfm_context(xd);
3425   const int no_skip_txfm_cost = x->mode_costs.skip_txfm_cost[skip_ctx][0];
3426   const int skip_txfm_cost = x->mode_costs.skip_txfm_cost[skip_ctx][1];
3427   int64_t skip_txfm_rd = RDCOST(x->rdmult, skip_txfm_cost, 0);
3428   int64_t no_skip_txfm_rd = RDCOST(x->rdmult, no_skip_txfm_cost, 0);
3429   int block = 0;
3430 
3431   av1_init_rd_stats(rd_stats);
3432   for (int idy = 0; idy < max_block_high(xd, bsize, 0); idy += bh) {
3433     for (int idx = 0; idx < max_block_wide(xd, bsize, 0); idx += bw) {
3434       const int64_t best_rd_sofar =
3435           (rd_thresh == INT64_MAX)
3436               ? INT64_MAX
3437               : (rd_thresh - (AOMMIN(skip_txfm_rd, no_skip_txfm_rd)));
3438       int is_cost_valid = 1;
3439       RD_STATS pn_rd_stats;
3440       // Search for the best transform block size and type for the sub-block.
3441       select_tx_block(cpi, x, idy, idx, block, max_tx_size, init_depth, bsize,
3442                       ctxa, ctxl, tx_above, tx_left, &pn_rd_stats, INT64_MAX,
3443                       best_rd_sofar, &is_cost_valid, ftxs_mode);
3444       if (!is_cost_valid || pn_rd_stats.rate == INT_MAX) {
3445         av1_invalid_rd_stats(rd_stats);
3446         return INT64_MAX;
3447       }
3448       av1_merge_rd_stats(rd_stats, &pn_rd_stats);
3449       skip_txfm_rd = RDCOST(x->rdmult, skip_txfm_cost, rd_stats->sse);
3450       no_skip_txfm_rd =
3451           RDCOST(x->rdmult, rd_stats->rate + no_skip_txfm_cost, rd_stats->dist);
3452       block += step;
3453     }
3454   }
3455 
3456   if (rd_stats->rate == INT_MAX) return INT64_MAX;
3457 
3458   rd_stats->skip_txfm = (skip_txfm_rd <= no_skip_txfm_rd);
3459 
3460   // If fast_tx_search is true, only DCT and 1D DCT were tested in
3461   // select_inter_block_yrd() above. Do a better search for tx type with
3462   // tx sizes already decided.
3463   if (fast_tx_search && cpi->sf.tx_sf.refine_fast_tx_search_results) {
3464     if (!inter_block_yrd(cpi, x, rd_stats, bsize, ref_best_rd, FTXS_NONE))
3465       return INT64_MAX;
3466   }
3467 
3468   int64_t final_rd;
3469   if (rd_stats->skip_txfm) {
3470     final_rd = RDCOST(x->rdmult, skip_txfm_cost, rd_stats->sse);
3471   } else {
3472     final_rd =
3473         RDCOST(x->rdmult, rd_stats->rate + no_skip_txfm_cost, rd_stats->dist);
3474     if (!xd->lossless[xd->mi[0]->segment_id]) {
3475       final_rd =
3476           AOMMIN(final_rd, RDCOST(x->rdmult, skip_txfm_cost, rd_stats->sse));
3477     }
3478   }
3479 
3480   return final_rd;
3481 }
3482 
3483 // Return 1 to terminate transform search early. The decision is made based on
3484 // the comparison with the reference RD cost and the model-estimated RD cost.
model_based_tx_search_prune(const AV1_COMP * cpi,MACROBLOCK * x,BLOCK_SIZE bsize,int64_t ref_best_rd)3485 static inline int model_based_tx_search_prune(const AV1_COMP *cpi,
3486                                               MACROBLOCK *x, BLOCK_SIZE bsize,
3487                                               int64_t ref_best_rd) {
3488   const int level = cpi->sf.tx_sf.model_based_prune_tx_search_level;
3489   assert(level >= 0 && level <= 2);
3490   int model_rate;
3491   int64_t model_dist;
3492   uint8_t model_skip;
3493   MACROBLOCKD *const xd = &x->e_mbd;
3494   model_rd_sb_fn[MODELRD_TYPE_TX_SEARCH_PRUNE](
3495       cpi, bsize, x, xd, 0, 0, &model_rate, &model_dist, &model_skip, NULL,
3496       NULL, NULL, NULL);
3497   if (model_skip) return 0;
3498   const int64_t model_rd = RDCOST(x->rdmult, model_rate, model_dist);
3499   // TODO(debargha, urvang): Improve the model and make the check below
3500   // tighter.
3501   static const int prune_factor_by8[] = { 3, 5 };
3502   const int factor = prune_factor_by8[level - 1];
3503   return ((model_rd * factor) >> 3) > ref_best_rd;
3504 }
3505 
av1_pick_recursive_tx_size_type_yrd(const AV1_COMP * cpi,MACROBLOCK * x,RD_STATS * rd_stats,BLOCK_SIZE bsize,int64_t ref_best_rd)3506 void av1_pick_recursive_tx_size_type_yrd(const AV1_COMP *cpi, MACROBLOCK *x,
3507                                          RD_STATS *rd_stats, BLOCK_SIZE bsize,
3508                                          int64_t ref_best_rd) {
3509   MACROBLOCKD *const xd = &x->e_mbd;
3510   const TxfmSearchParams *txfm_params = &x->txfm_search_params;
3511   assert(is_inter_block(xd->mi[0]));
3512 
3513   av1_invalid_rd_stats(rd_stats);
3514 
3515   // If modeled RD cost is a lot worse than the best so far, terminate early.
3516   if (cpi->sf.tx_sf.model_based_prune_tx_search_level &&
3517       ref_best_rd != INT64_MAX) {
3518     if (model_based_tx_search_prune(cpi, x, bsize, ref_best_rd)) return;
3519   }
3520 
3521   // Hashing based speed feature. If the hash of the prediction residue block is
3522   // found in the hash table, use previous search results and terminate early.
3523   uint32_t hash = 0;
3524   MB_RD_RECORD *mb_rd_record = NULL;
3525   const int mi_row = x->e_mbd.mi_row;
3526   const int mi_col = x->e_mbd.mi_col;
3527   const int within_border =
3528       mi_row >= xd->tile.mi_row_start &&
3529       (mi_row + mi_size_high[bsize] < xd->tile.mi_row_end) &&
3530       mi_col >= xd->tile.mi_col_start &&
3531       (mi_col + mi_size_wide[bsize] < xd->tile.mi_col_end);
3532   const int is_mb_rd_hash_enabled =
3533       (within_border && cpi->sf.rd_sf.use_mb_rd_hash);
3534   const int n4 = bsize_to_num_blk(bsize);
3535   if (is_mb_rd_hash_enabled) {
3536     hash = get_block_residue_hash(x, bsize);
3537     mb_rd_record = x->txfm_search_info.mb_rd_record;
3538     const int match_index = find_mb_rd_info(mb_rd_record, ref_best_rd, hash);
3539     if (match_index != -1) {
3540       MB_RD_INFO *mb_rd_info = &mb_rd_record->mb_rd_info[match_index];
3541       fetch_mb_rd_info(n4, mb_rd_info, rd_stats, x);
3542       return;
3543     }
3544   }
3545 
3546   // If we predict that skip is the optimal RD decision - set the respective
3547   // context and terminate early.
3548   int64_t dist;
3549   if (txfm_params->skip_txfm_level &&
3550       predict_skip_txfm(x, bsize, &dist,
3551                         cpi->common.features.reduced_tx_set_used)) {
3552     set_skip_txfm(x, rd_stats, bsize, dist);
3553     // Save the RD search results into mb_rd_record.
3554     if (is_mb_rd_hash_enabled)
3555       save_mb_rd_info(n4, hash, x, rd_stats, mb_rd_record);
3556     return;
3557   }
3558 #if CONFIG_SPEED_STATS
3559   ++x->txfm_search_info.tx_search_count;
3560 #endif  // CONFIG_SPEED_STATS
3561 
3562   const int64_t rd =
3563       select_tx_size_and_type(cpi, x, rd_stats, bsize, ref_best_rd);
3564 
3565   if (rd == INT64_MAX) {
3566     // We should always find at least one candidate unless ref_best_rd is less
3567     // than INT64_MAX (in which case, all the calls to select_tx_size_fix_type
3568     // might have failed to find something better)
3569     assert(ref_best_rd != INT64_MAX);
3570     av1_invalid_rd_stats(rd_stats);
3571     return;
3572   }
3573 
3574   // Save the RD search results into mb_rd_record.
3575   if (is_mb_rd_hash_enabled) {
3576     assert(mb_rd_record != NULL);
3577     save_mb_rd_info(n4, hash, x, rd_stats, mb_rd_record);
3578   }
3579 }
3580 
av1_pick_uniform_tx_size_type_yrd(const AV1_COMP * const cpi,MACROBLOCK * x,RD_STATS * rd_stats,BLOCK_SIZE bs,int64_t ref_best_rd)3581 void av1_pick_uniform_tx_size_type_yrd(const AV1_COMP *const cpi, MACROBLOCK *x,
3582                                        RD_STATS *rd_stats, BLOCK_SIZE bs,
3583                                        int64_t ref_best_rd) {
3584   MACROBLOCKD *const xd = &x->e_mbd;
3585   MB_MODE_INFO *const mbmi = xd->mi[0];
3586   const TxfmSearchParams *tx_params = &x->txfm_search_params;
3587   assert(bs == mbmi->bsize);
3588   const int is_inter = is_inter_block(mbmi);
3589   const int mi_row = xd->mi_row;
3590   const int mi_col = xd->mi_col;
3591 
3592   av1_init_rd_stats(rd_stats);
3593 
3594   // Hashing based speed feature for inter blocks. If the hash of the residue
3595   // block is found in the table, use previously saved search results and
3596   // terminate early.
3597   uint32_t hash = 0;
3598   MB_RD_RECORD *mb_rd_record = NULL;
3599   const int num_blks = bsize_to_num_blk(bs);
3600   if (is_inter && cpi->sf.rd_sf.use_mb_rd_hash) {
3601     const int within_border =
3602         mi_row >= xd->tile.mi_row_start &&
3603         (mi_row + mi_size_high[bs] < xd->tile.mi_row_end) &&
3604         mi_col >= xd->tile.mi_col_start &&
3605         (mi_col + mi_size_wide[bs] < xd->tile.mi_col_end);
3606     if (within_border) {
3607       hash = get_block_residue_hash(x, bs);
3608       mb_rd_record = x->txfm_search_info.mb_rd_record;
3609       const int match_index = find_mb_rd_info(mb_rd_record, ref_best_rd, hash);
3610       if (match_index != -1) {
3611         MB_RD_INFO *mb_rd_info = &mb_rd_record->mb_rd_info[match_index];
3612         fetch_mb_rd_info(num_blks, mb_rd_info, rd_stats, x);
3613         return;
3614       }
3615     }
3616   }
3617 
3618   // If we predict that skip is the optimal RD decision - set the respective
3619   // context and terminate early.
3620   int64_t dist;
3621   if (tx_params->skip_txfm_level && is_inter &&
3622       !xd->lossless[mbmi->segment_id] &&
3623       predict_skip_txfm(x, bs, &dist,
3624                         cpi->common.features.reduced_tx_set_used)) {
3625     // Populate rdstats as per skip decision
3626     set_skip_txfm(x, rd_stats, bs, dist);
3627     // Save the RD search results into mb_rd_record.
3628     if (mb_rd_record) {
3629       save_mb_rd_info(num_blks, hash, x, rd_stats, mb_rd_record);
3630     }
3631     return;
3632   }
3633 
3634   if (xd->lossless[mbmi->segment_id]) {
3635     // Lossless mode can only pick the smallest (4x4) transform size.
3636     choose_smallest_tx_size(cpi, x, rd_stats, ref_best_rd, bs);
3637   } else if (tx_params->tx_size_search_method == USE_LARGESTALL) {
3638     choose_largest_tx_size(cpi, x, rd_stats, ref_best_rd, bs);
3639   } else {
3640     choose_tx_size_type_from_rd(cpi, x, rd_stats, ref_best_rd, bs);
3641   }
3642 
3643   // Save the RD search results into mb_rd_record for possible reuse in future.
3644   if (mb_rd_record) {
3645     save_mb_rd_info(num_blks, hash, x, rd_stats, mb_rd_record);
3646   }
3647 }
3648 
av1_txfm_uvrd(const AV1_COMP * const cpi,MACROBLOCK * x,RD_STATS * rd_stats,BLOCK_SIZE bsize,int64_t ref_best_rd)3649 int av1_txfm_uvrd(const AV1_COMP *const cpi, MACROBLOCK *x, RD_STATS *rd_stats,
3650                   BLOCK_SIZE bsize, int64_t ref_best_rd) {
3651   av1_init_rd_stats(rd_stats);
3652   if (ref_best_rd < 0) return 0;
3653   if (!x->e_mbd.is_chroma_ref) return 1;
3654 
3655   MACROBLOCKD *const xd = &x->e_mbd;
3656   MB_MODE_INFO *const mbmi = xd->mi[0];
3657   struct macroblockd_plane *const pd = &xd->plane[AOM_PLANE_U];
3658   const int is_inter = is_inter_block(mbmi);
3659   int64_t this_rd = 0, skip_txfm_rd = 0;
3660   const BLOCK_SIZE plane_bsize =
3661       get_plane_block_size(bsize, pd->subsampling_x, pd->subsampling_y);
3662 
3663   if (is_inter) {
3664     for (int plane = 1; plane < MAX_MB_PLANE; ++plane)
3665       av1_subtract_plane(x, plane_bsize, plane);
3666   }
3667 
3668   const int skip_trellis = 0;
3669   const TX_SIZE uv_tx_size = av1_get_tx_size(AOM_PLANE_U, xd);
3670   int is_cost_valid = 1;
3671   for (int plane = 1; plane < MAX_MB_PLANE; ++plane) {
3672     RD_STATS this_rd_stats;
3673     int64_t chroma_ref_best_rd = ref_best_rd;
3674     // For inter blocks, refined ref_best_rd is used for early exit
3675     // For intra blocks, even though current rd crosses ref_best_rd, early
3676     // exit is not recommended as current rd is used for gating subsequent
3677     // modes as well (say, for angular modes)
3678     // TODO(any): Extend the early exit mechanism for intra modes as well
3679     if (cpi->sf.inter_sf.perform_best_rd_based_gating_for_chroma && is_inter &&
3680         chroma_ref_best_rd != INT64_MAX)
3681       chroma_ref_best_rd = ref_best_rd - AOMMIN(this_rd, skip_txfm_rd);
3682     av1_txfm_rd_in_plane(x, cpi, &this_rd_stats, chroma_ref_best_rd, 0, plane,
3683                          plane_bsize, uv_tx_size, FTXS_NONE, skip_trellis);
3684     if (this_rd_stats.rate == INT_MAX) {
3685       is_cost_valid = 0;
3686       break;
3687     }
3688     av1_merge_rd_stats(rd_stats, &this_rd_stats);
3689     this_rd = RDCOST(x->rdmult, rd_stats->rate, rd_stats->dist);
3690     skip_txfm_rd = RDCOST(x->rdmult, 0, rd_stats->sse);
3691     if (AOMMIN(this_rd, skip_txfm_rd) > ref_best_rd) {
3692       is_cost_valid = 0;
3693       break;
3694     }
3695   }
3696 
3697   if (!is_cost_valid) {
3698     // reset cost value
3699     av1_invalid_rd_stats(rd_stats);
3700   }
3701 
3702   return is_cost_valid;
3703 }
3704 
av1_txfm_rd_in_plane(MACROBLOCK * x,const AV1_COMP * cpi,RD_STATS * rd_stats,int64_t ref_best_rd,int64_t current_rd,int plane,BLOCK_SIZE plane_bsize,TX_SIZE tx_size,FAST_TX_SEARCH_MODE ftxs_mode,int skip_trellis)3705 void av1_txfm_rd_in_plane(MACROBLOCK *x, const AV1_COMP *cpi,
3706                           RD_STATS *rd_stats, int64_t ref_best_rd,
3707                           int64_t current_rd, int plane, BLOCK_SIZE plane_bsize,
3708                           TX_SIZE tx_size, FAST_TX_SEARCH_MODE ftxs_mode,
3709                           int skip_trellis) {
3710   assert(IMPLIES(plane == 0, x->e_mbd.mi[0]->tx_size == tx_size));
3711 
3712   if (!cpi->oxcf.txfm_cfg.enable_tx64 &&
3713       txsize_sqr_up_map[tx_size] == TX_64X64) {
3714     av1_invalid_rd_stats(rd_stats);
3715     return;
3716   }
3717 
3718   if (current_rd > ref_best_rd) {
3719     av1_invalid_rd_stats(rd_stats);
3720     return;
3721   }
3722 
3723   MACROBLOCKD *const xd = &x->e_mbd;
3724   const struct macroblockd_plane *const pd = &xd->plane[plane];
3725   struct rdcost_block_args args;
3726   av1_zero(args);
3727   args.x = x;
3728   args.cpi = cpi;
3729   args.best_rd = ref_best_rd;
3730   args.current_rd = current_rd;
3731   args.ftxs_mode = ftxs_mode;
3732   args.skip_trellis = skip_trellis;
3733   av1_init_rd_stats(&args.rd_stats);
3734 
3735   av1_get_entropy_contexts(plane_bsize, pd, args.t_above, args.t_left);
3736   av1_foreach_transformed_block_in_plane(xd, plane_bsize, plane, block_rd_txfm,
3737                                          &args);
3738 
3739   MB_MODE_INFO *const mbmi = xd->mi[0];
3740   const int is_inter = is_inter_block(mbmi);
3741   const int invalid_rd = is_inter ? args.incomplete_exit : args.exit_early;
3742 
3743   if (invalid_rd) {
3744     av1_invalid_rd_stats(rd_stats);
3745   } else {
3746     *rd_stats = args.rd_stats;
3747   }
3748 }
3749 
av1_txfm_search(const AV1_COMP * cpi,MACROBLOCK * x,BLOCK_SIZE bsize,RD_STATS * rd_stats,RD_STATS * rd_stats_y,RD_STATS * rd_stats_uv,int mode_rate,int64_t ref_best_rd)3750 int av1_txfm_search(const AV1_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bsize,
3751                     RD_STATS *rd_stats, RD_STATS *rd_stats_y,
3752                     RD_STATS *rd_stats_uv, int mode_rate, int64_t ref_best_rd) {
3753   MACROBLOCKD *const xd = &x->e_mbd;
3754   TxfmSearchParams *txfm_params = &x->txfm_search_params;
3755   const int skip_ctx = av1_get_skip_txfm_context(xd);
3756   const int skip_txfm_cost[2] = { x->mode_costs.skip_txfm_cost[skip_ctx][0],
3757                                   x->mode_costs.skip_txfm_cost[skip_ctx][1] };
3758   const int64_t min_header_rate =
3759       mode_rate + AOMMIN(skip_txfm_cost[0], skip_txfm_cost[1]);
3760   // Account for minimum skip and non_skip rd.
3761   // Eventually either one of them will be added to mode_rate
3762   const int64_t min_header_rd_possible = RDCOST(x->rdmult, min_header_rate, 0);
3763   if (min_header_rd_possible > ref_best_rd) {
3764     av1_invalid_rd_stats(rd_stats_y);
3765     return 0;
3766   }
3767 
3768   const AV1_COMMON *cm = &cpi->common;
3769   MB_MODE_INFO *const mbmi = xd->mi[0];
3770   const int64_t mode_rd = RDCOST(x->rdmult, mode_rate, 0);
3771   const int64_t rd_thresh =
3772       ref_best_rd == INT64_MAX ? INT64_MAX : ref_best_rd - mode_rd;
3773   av1_init_rd_stats(rd_stats);
3774   av1_init_rd_stats(rd_stats_y);
3775   rd_stats->rate = mode_rate;
3776 
3777   // cost and distortion
3778   av1_subtract_plane(x, bsize, 0);
3779   if (txfm_params->tx_mode_search_type == TX_MODE_SELECT &&
3780       !xd->lossless[mbmi->segment_id]) {
3781     av1_pick_recursive_tx_size_type_yrd(cpi, x, rd_stats_y, bsize, rd_thresh);
3782 #if CONFIG_COLLECT_RD_STATS == 2
3783     PrintPredictionUnitStats(cpi, tile_data, x, rd_stats_y, bsize);
3784 #endif  // CONFIG_COLLECT_RD_STATS == 2
3785   } else {
3786     av1_pick_uniform_tx_size_type_yrd(cpi, x, rd_stats_y, bsize, rd_thresh);
3787     memset(mbmi->inter_tx_size, mbmi->tx_size, sizeof(mbmi->inter_tx_size));
3788     for (int i = 0; i < xd->height * xd->width; ++i)
3789       set_blk_skip(x->txfm_search_info.blk_skip, 0, i, rd_stats_y->skip_txfm);
3790   }
3791 
3792   if (rd_stats_y->rate == INT_MAX) return 0;
3793 
3794   av1_merge_rd_stats(rd_stats, rd_stats_y);
3795 
3796   const int64_t non_skip_txfm_rdcosty =
3797       RDCOST(x->rdmult, rd_stats->rate + skip_txfm_cost[0], rd_stats->dist);
3798   const int64_t skip_txfm_rdcosty =
3799       RDCOST(x->rdmult, mode_rate + skip_txfm_cost[1], rd_stats->sse);
3800   const int64_t min_rdcosty = AOMMIN(non_skip_txfm_rdcosty, skip_txfm_rdcosty);
3801   if (min_rdcosty > ref_best_rd) return 0;
3802 
3803   av1_init_rd_stats(rd_stats_uv);
3804   const int num_planes = av1_num_planes(cm);
3805   if (num_planes > 1) {
3806     int64_t ref_best_chroma_rd = ref_best_rd;
3807     // Calculate best rd cost possible for chroma
3808     if (cpi->sf.inter_sf.perform_best_rd_based_gating_for_chroma &&
3809         (ref_best_chroma_rd != INT64_MAX)) {
3810       ref_best_chroma_rd = (ref_best_chroma_rd -
3811                             AOMMIN(non_skip_txfm_rdcosty, skip_txfm_rdcosty));
3812     }
3813     const int is_cost_valid_uv =
3814         av1_txfm_uvrd(cpi, x, rd_stats_uv, bsize, ref_best_chroma_rd);
3815     if (!is_cost_valid_uv) return 0;
3816     av1_merge_rd_stats(rd_stats, rd_stats_uv);
3817   }
3818 
3819   int choose_skip_txfm = rd_stats->skip_txfm;
3820   if (!choose_skip_txfm && !xd->lossless[mbmi->segment_id]) {
3821     const int64_t rdcost_no_skip_txfm = RDCOST(
3822         x->rdmult, rd_stats_y->rate + rd_stats_uv->rate + skip_txfm_cost[0],
3823         rd_stats->dist);
3824     const int64_t rdcost_skip_txfm =
3825         RDCOST(x->rdmult, skip_txfm_cost[1], rd_stats->sse);
3826     if (rdcost_no_skip_txfm >= rdcost_skip_txfm) choose_skip_txfm = 1;
3827   }
3828   if (choose_skip_txfm) {
3829     rd_stats_y->rate = 0;
3830     rd_stats_uv->rate = 0;
3831     rd_stats->rate = mode_rate + skip_txfm_cost[1];
3832     rd_stats->dist = rd_stats->sse;
3833     rd_stats_y->dist = rd_stats_y->sse;
3834     rd_stats_uv->dist = rd_stats_uv->sse;
3835     mbmi->skip_txfm = 1;
3836     if (rd_stats->skip_txfm) {
3837       const int64_t tmprd = RDCOST(x->rdmult, rd_stats->rate, rd_stats->dist);
3838       if (tmprd > ref_best_rd) return 0;
3839     }
3840   } else {
3841     rd_stats->rate += skip_txfm_cost[0];
3842     mbmi->skip_txfm = 0;
3843   }
3844 
3845   return 1;
3846 }
3847