1 /*
2 * Copyright (c) 2016, Alliance for Open Media. All rights reserved.
3 *
4 * This source code is subject to the terms of the BSD 2 Clause License and
5 * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
6 * was not distributed with this source code in the LICENSE file, you can
7 * obtain it at www.aomedia.org/license/software. If the Alliance for Open
8 * Media Patent License 1.0 was not distributed with this source code in the
9 * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
10 */
11
12 #ifndef AOM_AV1_COMMON_AV1_COMMON_INT_H_
13 #define AOM_AV1_COMMON_AV1_COMMON_INT_H_
14
15 #include <stdbool.h>
16
17 #include "config/aom_config.h"
18 #include "config/av1_rtcd.h"
19
20 #include "aom/internal/aom_codec_internal.h"
21 #include "aom_dsp/flow_estimation/corner_detect.h"
22 #include "aom_util/aom_pthread.h"
23 #include "av1/common/alloccommon.h"
24 #include "av1/common/av1_loopfilter.h"
25 #include "av1/common/entropy.h"
26 #include "av1/common/entropymode.h"
27 #include "av1/common/entropymv.h"
28 #include "av1/common/enums.h"
29 #include "av1/common/frame_buffers.h"
30 #include "av1/common/mv.h"
31 #include "av1/common/quant_common.h"
32 #include "av1/common/restoration.h"
33 #include "av1/common/tile_common.h"
34 #include "av1/common/timing.h"
35 #include "aom_dsp/grain_params.h"
36 #include "aom_dsp/grain_table.h"
37 #include "aom_dsp/odintrin.h"
38 #ifdef __cplusplus
39 extern "C" {
40 #endif
41
42 #if defined(__clang__) && defined(__has_warning)
43 #if __has_feature(cxx_attributes) && __has_warning("-Wimplicit-fallthrough")
44 #define AOM_FALLTHROUGH_INTENDED [[clang::fallthrough]] // NOLINT
45 #endif
46 #elif defined(__GNUC__) && __GNUC__ >= 7
47 #define AOM_FALLTHROUGH_INTENDED __attribute__((fallthrough)) // NOLINT
48 #endif
49
50 #ifndef AOM_FALLTHROUGH_INTENDED
51 #define AOM_FALLTHROUGH_INTENDED \
52 do { \
53 } while (0)
54 #endif
55
56 #define CDEF_MAX_STRENGTHS 16
57
58 /* Constant values while waiting for the sequence header */
59 #define FRAME_ID_LENGTH 15
60 #define DELTA_FRAME_ID_LENGTH 14
61
62 #define FRAME_CONTEXTS (FRAME_BUFFERS + 1)
63 // Extra frame context which is always kept at default values
64 #define FRAME_CONTEXT_DEFAULTS (FRAME_CONTEXTS - 1)
65 #define PRIMARY_REF_BITS 3
66 #define PRIMARY_REF_NONE 7
67
68 #define NUM_PING_PONG_BUFFERS 2
69
70 #define MAX_NUM_TEMPORAL_LAYERS 8
71 #define MAX_NUM_SPATIAL_LAYERS 4
72 /* clang-format off */
73 // clang-format seems to think this is a pointer dereference and not a
74 // multiplication.
75 #define MAX_NUM_OPERATING_POINTS \
76 (MAX_NUM_TEMPORAL_LAYERS * MAX_NUM_SPATIAL_LAYERS)
77 /* clang-format on */
78
79 // TODO(jingning): Turning this on to set up transform coefficient
80 // processing timer.
81 #define TXCOEFF_TIMER 0
82 #define TXCOEFF_COST_TIMER 0
83
84 /*!\cond */
85
86 enum {
87 SINGLE_REFERENCE = 0,
88 COMPOUND_REFERENCE = 1,
89 REFERENCE_MODE_SELECT = 2,
90 REFERENCE_MODES = 3,
91 } UENUM1BYTE(REFERENCE_MODE);
92
93 enum {
94 /**
95 * Frame context updates are disabled
96 */
97 REFRESH_FRAME_CONTEXT_DISABLED,
98 /**
99 * Update frame context to values resulting from backward probability
100 * updates based on entropy/counts in the decoded frame
101 */
102 REFRESH_FRAME_CONTEXT_BACKWARD,
103 } UENUM1BYTE(REFRESH_FRAME_CONTEXT_MODE);
104
105 #define MFMV_STACK_SIZE 3
106 typedef struct {
107 int_mv mfmv0;
108 uint8_t ref_frame_offset;
109 } TPL_MV_REF;
110
111 typedef struct {
112 int_mv mv;
113 MV_REFERENCE_FRAME ref_frame;
114 } MV_REF;
115
116 typedef struct RefCntBuffer {
117 // For a RefCntBuffer, the following are reference-holding variables:
118 // - cm->ref_frame_map[]
119 // - cm->cur_frame
120 // - cm->scaled_ref_buf[] (encoder only)
121 // - pbi->output_frame_index[] (decoder only)
122 // With that definition, 'ref_count' is the number of reference-holding
123 // variables that are currently referencing this buffer.
124 // For example:
125 // - suppose this buffer is at index 'k' in the buffer pool, and
126 // - Total 'n' of the variables / array elements above have value 'k' (that
127 // is, they are pointing to buffer at index 'k').
128 // Then, pool->frame_bufs[k].ref_count = n.
129 int ref_count;
130
131 unsigned int order_hint;
132 unsigned int ref_order_hints[INTER_REFS_PER_FRAME];
133
134 // These variables are used only in encoder and compare the absolute
135 // display order hint to compute the relative distance and overcome
136 // the limitation of get_relative_dist() which returns incorrect
137 // distance when a very old frame is used as a reference.
138 unsigned int display_order_hint;
139 unsigned int ref_display_order_hint[INTER_REFS_PER_FRAME];
140 // Frame's level within the hierarchical structure.
141 unsigned int pyramid_level;
142 MV_REF *mvs;
143 uint8_t *seg_map;
144 struct segmentation seg;
145 int mi_rows;
146 int mi_cols;
147 // Width and height give the size of the buffer (before any upscaling, unlike
148 // the sizes that can be derived from the buf structure)
149 int width;
150 int height;
151 WarpedMotionParams global_motion[REF_FRAMES];
152 int showable_frame; // frame can be used as show existing frame in future
153 uint8_t film_grain_params_present;
154 aom_film_grain_t film_grain_params;
155 aom_codec_frame_buffer_t raw_frame_buffer;
156 YV12_BUFFER_CONFIG buf;
157 int temporal_id; // Temporal layer ID of the frame
158 int spatial_id; // Spatial layer ID of the frame
159 FRAME_TYPE frame_type;
160
161 // This is only used in the encoder but needs to be indexed per ref frame
162 // so it's extremely convenient to keep it here.
163 int interp_filter_selected[SWITCHABLE];
164
165 // Inter frame reference frame delta for loop filter
166 int8_t ref_deltas[REF_FRAMES];
167
168 // 0 = ZERO_MV, MV
169 int8_t mode_deltas[MAX_MODE_LF_DELTAS];
170
171 FRAME_CONTEXT frame_context;
172 } RefCntBuffer;
173
174 typedef struct BufferPool {
175 // Protect BufferPool from being accessed by several FrameWorkers at
176 // the same time during frame parallel decode.
177 // TODO(hkuang): Try to use atomic variable instead of locking the whole pool.
178 // TODO(wtc): Remove this. See
179 // https://chromium-review.googlesource.com/c/webm/libvpx/+/560630.
180 #if CONFIG_MULTITHREAD
181 pthread_mutex_t pool_mutex;
182 #endif
183
184 // Private data associated with the frame buffer callbacks.
185 void *cb_priv;
186
187 aom_get_frame_buffer_cb_fn_t get_fb_cb;
188 aom_release_frame_buffer_cb_fn_t release_fb_cb;
189
190 RefCntBuffer *frame_bufs;
191 uint8_t num_frame_bufs;
192
193 // Frame buffers allocated internally by the codec.
194 InternalFrameBufferList int_frame_buffers;
195 } BufferPool;
196
197 /*!\endcond */
198
199 /*!\brief Parameters related to CDEF */
200 typedef struct {
201 //! CDEF column line buffer
202 uint16_t *colbuf[MAX_MB_PLANE];
203 //! CDEF top & bottom line buffer
204 uint16_t *linebuf[MAX_MB_PLANE];
205 //! CDEF intermediate buffer
206 uint16_t *srcbuf;
207 //! CDEF column line buffer sizes
208 size_t allocated_colbuf_size[MAX_MB_PLANE];
209 //! CDEF top and bottom line buffer sizes
210 size_t allocated_linebuf_size[MAX_MB_PLANE];
211 //! CDEF intermediate buffer size
212 size_t allocated_srcbuf_size;
213 //! CDEF damping factor
214 int cdef_damping;
215 //! Number of CDEF strength values
216 int nb_cdef_strengths;
217 //! CDEF strength values for luma
218 int cdef_strengths[CDEF_MAX_STRENGTHS];
219 //! CDEF strength values for chroma
220 int cdef_uv_strengths[CDEF_MAX_STRENGTHS];
221 //! Number of CDEF strength values in bits
222 int cdef_bits;
223 //! Number of rows in the frame in 4 pixel
224 int allocated_mi_rows;
225 //! Number of CDEF workers
226 int allocated_num_workers;
227 } CdefInfo;
228
229 /*!\cond */
230
231 typedef struct {
232 int delta_q_present_flag;
233 // Resolution of delta quant
234 int delta_q_res;
235 int delta_lf_present_flag;
236 // Resolution of delta lf level
237 int delta_lf_res;
238 // This is a flag for number of deltas of loop filter level
239 // 0: use 1 delta, for y_vertical, y_horizontal, u, and v
240 // 1: use separate deltas for each filter level
241 int delta_lf_multi;
242 } DeltaQInfo;
243
244 typedef struct {
245 int enable_order_hint; // 0 - disable order hint, and related tools
246 int order_hint_bits_minus_1; // dist_wtd_comp, ref_frame_mvs,
247 // frame_sign_bias
248 // if 0, enable_dist_wtd_comp and
249 // enable_ref_frame_mvs must be set as 0.
250 int enable_dist_wtd_comp; // 0 - disable dist-wtd compound modes
251 // 1 - enable it
252 int enable_ref_frame_mvs; // 0 - disable ref frame mvs
253 // 1 - enable it
254 } OrderHintInfo;
255
256 // Sequence header structure.
257 // Note: All syntax elements of sequence_header_obu that need to be
258 // bit-identical across multiple sequence headers must be part of this struct,
259 // so that consistency is checked by are_seq_headers_consistent() function.
260 // One exception is the last member 'op_params' that is ignored by
261 // are_seq_headers_consistent() function.
262 typedef struct SequenceHeader {
263 int num_bits_width;
264 int num_bits_height;
265 int max_frame_width;
266 int max_frame_height;
267 // Whether current and reference frame IDs are signaled in the bitstream.
268 // Frame id numbers are additional information that do not affect the
269 // decoding process, but provide decoders with a way of detecting missing
270 // reference frames so that appropriate action can be taken.
271 uint8_t frame_id_numbers_present_flag;
272 int frame_id_length;
273 int delta_frame_id_length;
274 BLOCK_SIZE sb_size; // Size of the superblock used for this frame
275 int mib_size; // Size of the superblock in units of MI blocks
276 int mib_size_log2; // Log 2 of above.
277
278 OrderHintInfo order_hint_info;
279
280 uint8_t force_screen_content_tools; // 0 - force off
281 // 1 - force on
282 // 2 - adaptive
283 uint8_t still_picture; // Video is a single frame still picture
284 uint8_t reduced_still_picture_hdr; // Use reduced header for still picture
285 uint8_t force_integer_mv; // 0 - Don't force. MV can use subpel
286 // 1 - force to integer
287 // 2 - adaptive
288 uint8_t enable_filter_intra; // enables/disables filterintra
289 uint8_t enable_intra_edge_filter; // enables/disables edge upsampling
290 uint8_t enable_interintra_compound; // enables/disables interintra_compound
291 uint8_t enable_masked_compound; // enables/disables masked compound
292 uint8_t enable_dual_filter; // 0 - disable dual interpolation filter
293 // 1 - enable vert/horz filter selection
294 uint8_t enable_warped_motion; // 0 - disable warp for the sequence
295 // 1 - enable warp for the sequence
296 uint8_t enable_superres; // 0 - Disable superres for the sequence
297 // and no frame level superres flag
298 // 1 - Enable superres for the sequence
299 // enable per-frame superres flag
300 uint8_t enable_cdef; // To turn on/off CDEF
301 uint8_t enable_restoration; // To turn on/off loop restoration
302 BITSTREAM_PROFILE profile;
303
304 // Color config.
305 aom_bit_depth_t bit_depth; // AOM_BITS_8 in profile 0 or 1,
306 // AOM_BITS_10 or AOM_BITS_12 in profile 2 or 3.
307 uint8_t use_highbitdepth; // If true, we need to use 16bit frame buffers.
308 uint8_t monochrome; // Monochrome video
309 aom_color_primaries_t color_primaries;
310 aom_transfer_characteristics_t transfer_characteristics;
311 aom_matrix_coefficients_t matrix_coefficients;
312 int color_range;
313 int subsampling_x; // Chroma subsampling for x
314 int subsampling_y; // Chroma subsampling for y
315 aom_chroma_sample_position_t chroma_sample_position;
316 uint8_t separate_uv_delta_q;
317 uint8_t film_grain_params_present;
318
319 // Operating point info.
320 int operating_points_cnt_minus_1;
321 int operating_point_idc[MAX_NUM_OPERATING_POINTS];
322 // True if operating_point_idc[op] is not equal to 0 for any value of op from
323 // 0 to operating_points_cnt_minus_1.
324 bool has_nonzero_operating_point_idc;
325 int timing_info_present;
326 aom_timing_info_t timing_info;
327 uint8_t decoder_model_info_present_flag;
328 aom_dec_model_info_t decoder_model_info;
329 uint8_t display_model_info_present_flag;
330 AV1_LEVEL seq_level_idx[MAX_NUM_OPERATING_POINTS];
331 uint8_t tier[MAX_NUM_OPERATING_POINTS]; // seq_tier in spec. One bit: 0 or 1.
332
333 // IMPORTANT: the op_params member must be at the end of the struct so that
334 // are_seq_headers_consistent() can be implemented with a memcmp() call.
335 // TODO(urvang): We probably don't need the +1 here.
336 aom_dec_model_op_parameters_t op_params[MAX_NUM_OPERATING_POINTS + 1];
337 } SequenceHeader;
338
339 typedef struct {
340 int skip_mode_allowed;
341 int skip_mode_flag;
342 int ref_frame_idx_0;
343 int ref_frame_idx_1;
344 } SkipModeInfo;
345
346 typedef struct {
347 FRAME_TYPE frame_type;
348 REFERENCE_MODE reference_mode;
349
350 unsigned int order_hint;
351 unsigned int display_order_hint;
352 // Frame's level within the hierarchical structure.
353 unsigned int pyramid_level;
354 unsigned int frame_number;
355 SkipModeInfo skip_mode_info;
356 int refresh_frame_flags; // Which ref frames are overwritten by this frame
357 int frame_refs_short_signaling;
358 } CurrentFrame;
359
360 /*!\endcond */
361
362 /*!
363 * \brief Frame level features.
364 */
365 typedef struct {
366 /*!
367 * If true, CDF update in the symbol encoding/decoding process is disabled.
368 */
369 bool disable_cdf_update;
370 /*!
371 * If true, motion vectors are specified to eighth pel precision; and
372 * if false, motion vectors are specified to quarter pel precision.
373 */
374 bool allow_high_precision_mv;
375 /*!
376 * If true, force integer motion vectors; if false, use the default.
377 */
378 bool cur_frame_force_integer_mv;
379 /*!
380 * If true, palette tool and/or intra block copy tools may be used.
381 */
382 bool allow_screen_content_tools;
383 bool allow_intrabc; /*!< If true, intra block copy tool may be used. */
384 bool allow_warped_motion; /*!< If true, frame may use warped motion mode. */
385 /*!
386 * If true, using previous frames' motion vectors for prediction is allowed.
387 */
388 bool allow_ref_frame_mvs;
389 /*!
390 * If true, frame is fully lossless at coded resolution.
391 * */
392 bool coded_lossless;
393 /*!
394 * If true, frame is fully lossless at upscaled resolution.
395 */
396 bool all_lossless;
397 /*!
398 * If true, the frame is restricted to a reduced subset of the full set of
399 * transform types.
400 */
401 bool reduced_tx_set_used;
402 /*!
403 * If true, error resilient mode is enabled.
404 * Note: Error resilient mode allows the syntax of a frame to be parsed
405 * independently of previously decoded frames.
406 */
407 bool error_resilient_mode;
408 /*!
409 * If false, only MOTION_MODE that may be used is SIMPLE_TRANSLATION;
410 * if true, all MOTION_MODES may be used.
411 */
412 bool switchable_motion_mode;
413 TX_MODE tx_mode; /*!< Transform mode at frame level. */
414 InterpFilter interp_filter; /*!< Interpolation filter at frame level. */
415 /*!
416 * The reference frame that contains the CDF values and other state that
417 * should be loaded at the start of the frame.
418 */
419 int primary_ref_frame;
420 /*!
421 * Byte alignment of the planes in the reference buffers.
422 */
423 int byte_alignment;
424 /*!
425 * Flag signaling how frame contexts should be updated at the end of
426 * a frame decode.
427 */
428 REFRESH_FRAME_CONTEXT_MODE refresh_frame_context;
429 } FeatureFlags;
430
431 /*!
432 * \brief Params related to tiles.
433 */
434 typedef struct CommonTileParams {
435 int cols; /*!< number of tile columns that frame is divided into */
436 int rows; /*!< number of tile rows that frame is divided into */
437 int max_width_sb; /*!< maximum tile width in superblock units. */
438 int max_height_sb; /*!< maximum tile height in superblock units. */
439
440 /*!
441 * Min width of non-rightmost tile in MI units. Only valid if cols > 1.
442 */
443 int min_inner_width;
444
445 /*!
446 * If true, tiles are uniformly spaced with power-of-two number of rows and
447 * columns.
448 * If false, tiles have explicitly configured widths and heights.
449 */
450 int uniform_spacing;
451
452 /**
453 * \name Members only valid when uniform_spacing == 1
454 */
455 /**@{*/
456 int log2_cols; /*!< log2 of 'cols'. */
457 int log2_rows; /*!< log2 of 'rows'. */
458 int width; /*!< tile width in MI units */
459 int height; /*!< tile height in MI units */
460 /**@}*/
461
462 /*!
463 * Min num of tile columns possible based on 'max_width_sb' and frame width.
464 */
465 int min_log2_cols;
466 /*!
467 * Min num of tile rows possible based on 'max_height_sb' and frame height.
468 */
469 int min_log2_rows;
470 /*!
471 * Max num of tile columns possible based on frame width.
472 */
473 int max_log2_cols;
474 /*!
475 * Max num of tile rows possible based on frame height.
476 */
477 int max_log2_rows;
478 /*!
479 * log2 of min number of tiles (same as min_log2_cols + min_log2_rows).
480 */
481 int min_log2;
482 /*!
483 * col_start_sb[i] is the start position of tile column i in superblock units.
484 * valid for 0 <= i <= cols
485 */
486 int col_start_sb[MAX_TILE_COLS + 1];
487 /*!
488 * row_start_sb[i] is the start position of tile row i in superblock units.
489 * valid for 0 <= i <= rows
490 */
491 int row_start_sb[MAX_TILE_ROWS + 1];
492 /*!
493 * If true, we are using large scale tile mode.
494 */
495 unsigned int large_scale;
496 /*!
497 * Only relevant when large_scale == 1.
498 * If true, the independent decoding of a single tile or a section of a frame
499 * is allowed.
500 */
501 unsigned int single_tile_decoding;
502 } CommonTileParams;
503
504 typedef struct CommonModeInfoParams CommonModeInfoParams;
505 /*!
506 * \brief Params related to MB_MODE_INFO arrays and related info.
507 */
508 struct CommonModeInfoParams {
509 /*!
510 * Number of rows in the frame in 16 pixel units.
511 * This is computed from frame height aligned to a multiple of 8.
512 */
513 int mb_rows;
514 /*!
515 * Number of cols in the frame in 16 pixel units.
516 * This is computed from frame width aligned to a multiple of 8.
517 */
518 int mb_cols;
519
520 /*!
521 * Total MBs = mb_rows * mb_cols.
522 */
523 int MBs;
524
525 /*!
526 * Number of rows in the frame in 4 pixel (MB_MODE_INFO) units.
527 * This is computed from frame height aligned to a multiple of 8.
528 */
529 int mi_rows;
530 /*!
531 * Number of cols in the frame in 4 pixel (MB_MODE_INFO) units.
532 * This is computed from frame width aligned to a multiple of 8.
533 */
534 int mi_cols;
535
536 /*!
537 * An array of MB_MODE_INFO structs for every 'mi_alloc_bsize' sized block
538 * in the frame.
539 * Note: This array should be treated like a scratch memory, and should NOT be
540 * accessed directly, in most cases. Please use 'mi_grid_base' array instead.
541 */
542 MB_MODE_INFO *mi_alloc;
543 /*!
544 * Number of allocated elements in 'mi_alloc'.
545 */
546 int mi_alloc_size;
547 /*!
548 * Stride for 'mi_alloc' array.
549 */
550 int mi_alloc_stride;
551 /*!
552 * The minimum block size that each element in 'mi_alloc' can correspond to.
553 * For decoder, this is always BLOCK_4X4.
554 * For encoder, this is BLOCK_8X8 for resolution >= 4k case or REALTIME mode
555 * case. Otherwise, this is BLOCK_4X4.
556 */
557 BLOCK_SIZE mi_alloc_bsize;
558
559 /*!
560 * Grid of pointers to 4x4 MB_MODE_INFO structs allocated in 'mi_alloc'.
561 * It's possible that:
562 * - Multiple pointers in the grid point to the same element in 'mi_alloc'
563 * (for example, for all 4x4 blocks that belong to the same partition block).
564 * - Some pointers can be NULL (for example, for blocks outside visible area).
565 */
566 MB_MODE_INFO **mi_grid_base;
567 /*!
568 * Number of allocated elements in 'mi_grid_base' (and 'tx_type_map' also).
569 */
570 int mi_grid_size;
571 /*!
572 * Stride for 'mi_grid_base' (and 'tx_type_map' also).
573 */
574 int mi_stride;
575
576 /*!
577 * An array of tx types for each 4x4 block in the frame.
578 * Number of allocated elements is same as 'mi_grid_size', and stride is
579 * same as 'mi_grid_size'. So, indexing into 'tx_type_map' is same as that of
580 * 'mi_grid_base'.
581 */
582 TX_TYPE *tx_type_map;
583
584 /**
585 * \name Function pointers to allow separate logic for encoder and decoder.
586 */
587 /**@{*/
588 /*!
589 * Free the memory allocated to arrays in 'mi_params'.
590 * \param[in,out] mi_params object containing common mode info parameters
591 */
592 void (*free_mi)(struct CommonModeInfoParams *mi_params);
593 /*!
594 * Initialize / reset appropriate arrays in 'mi_params'.
595 * \param[in,out] mi_params object containing common mode info parameters
596 */
597 void (*setup_mi)(struct CommonModeInfoParams *mi_params);
598 /*!
599 * Allocate required memory for arrays in 'mi_params'.
600 * \param[in,out] mi_params object containing common mode info
601 * parameters
602 * \param width frame width
603 * \param height frame height
604 * \param min_partition_size minimum partition size allowed while
605 * encoding
606 */
607 void (*set_mb_mi)(struct CommonModeInfoParams *mi_params, int width,
608 int height, BLOCK_SIZE min_partition_size);
609 /**@}*/
610 };
611
612 typedef struct CommonQuantParams CommonQuantParams;
613 /*!
614 * \brief Parameters related to quantization at the frame level.
615 */
616 struct CommonQuantParams {
617 /*!
618 * Base qindex of the frame in the range 0 to 255.
619 */
620 int base_qindex;
621
622 /*!
623 * Delta of qindex (from base_qindex) for Y plane DC coefficient.
624 * Note: y_ac_delta_q is implicitly 0.
625 */
626 int y_dc_delta_q;
627
628 /*!
629 * Delta of qindex (from base_qindex) for U plane DC coefficients.
630 */
631 int u_dc_delta_q;
632 /*!
633 * Delta of qindex (from base_qindex) for U plane AC coefficients.
634 */
635 int v_dc_delta_q;
636
637 /*!
638 * Delta of qindex (from base_qindex) for V plane DC coefficients.
639 * Same as those for U plane if cm->seq_params->separate_uv_delta_q == 0.
640 */
641 int u_ac_delta_q;
642 /*!
643 * Delta of qindex (from base_qindex) for V plane AC coefficients.
644 * Same as those for U plane if cm->seq_params->separate_uv_delta_q == 0.
645 */
646 int v_ac_delta_q;
647
648 /*
649 * Note: The qindex per superblock may have a delta from the qindex obtained
650 * at frame level from parameters above, based on 'cm->delta_q_info'.
651 */
652
653 /**
654 * \name True dequantizers.
655 * The dequantizers below are true dequantizers used only in the
656 * dequantization process. They have the same coefficient
657 * shift/scale as TX.
658 */
659 /**@{*/
660 int16_t y_dequant_QTX[MAX_SEGMENTS][2]; /*!< Dequant for Y plane */
661 int16_t u_dequant_QTX[MAX_SEGMENTS][2]; /*!< Dequant for U plane */
662 int16_t v_dequant_QTX[MAX_SEGMENTS][2]; /*!< Dequant for V plane */
663 /**@}*/
664
665 /**
666 * \name Global quantization matrix tables.
667 */
668 /**@{*/
669 /*!
670 * Global dequantization matrix table.
671 */
672 const qm_val_t *giqmatrix[NUM_QM_LEVELS][3][TX_SIZES_ALL];
673 /*!
674 * Global quantization matrix table.
675 */
676 const qm_val_t *gqmatrix[NUM_QM_LEVELS][3][TX_SIZES_ALL];
677 /**@}*/
678
679 /**
680 * \name Local dequantization matrix tables for each frame.
681 */
682 /**@{*/
683 /*!
684 * Local dequant matrix for Y plane.
685 */
686 const qm_val_t *y_iqmatrix[MAX_SEGMENTS][TX_SIZES_ALL];
687 /*!
688 * Local dequant matrix for U plane.
689 */
690 const qm_val_t *u_iqmatrix[MAX_SEGMENTS][TX_SIZES_ALL];
691 /*!
692 * Local dequant matrix for V plane.
693 */
694 const qm_val_t *v_iqmatrix[MAX_SEGMENTS][TX_SIZES_ALL];
695 /**@}*/
696
697 /*!
698 * Flag indicating whether quantization matrices are being used:
699 * - If true, qm_level_y, qm_level_u and qm_level_v indicate the level
700 * indices to be used to access appropriate global quant matrix tables.
701 * - If false, we implicitly use level index 'NUM_QM_LEVELS - 1'.
702 */
703 bool using_qmatrix;
704 /**
705 * \name Valid only when using_qmatrix == true
706 * Indicate the level indices to be used to access appropriate global quant
707 * matrix tables.
708 */
709 /**@{*/
710 int qmatrix_level_y; /*!< Level index for Y plane */
711 int qmatrix_level_u; /*!< Level index for U plane */
712 int qmatrix_level_v; /*!< Level index for V plane */
713 /**@}*/
714 };
715
716 typedef struct CommonContexts CommonContexts;
717 /*!
718 * \brief Contexts used for transmitting various symbols in the bitstream.
719 */
720 struct CommonContexts {
721 /*!
722 * Context used by 'FRAME_CONTEXT.partition_cdf' to transmit partition type.
723 * partition[i][j] is the context for ith tile row, jth mi_col.
724 */
725 PARTITION_CONTEXT **partition;
726
727 /*!
728 * Context used to derive context for multiple symbols:
729 * - 'TXB_CTX.txb_skip_ctx' used by 'FRAME_CONTEXT.txb_skip_cdf' to transmit
730 * to transmit skip_txfm flag.
731 * - 'TXB_CTX.dc_sign_ctx' used by 'FRAME_CONTEXT.dc_sign_cdf' to transmit
732 * sign.
733 * entropy[i][j][k] is the context for ith plane, jth tile row, kth mi_col.
734 */
735 ENTROPY_CONTEXT **entropy[MAX_MB_PLANE];
736
737 /*!
738 * Context used to derive context for 'FRAME_CONTEXT.txfm_partition_cdf' to
739 * transmit 'is_split' flag to indicate if this transform block should be
740 * split into smaller sub-blocks.
741 * txfm[i][j] is the context for ith tile row, jth mi_col.
742 */
743 TXFM_CONTEXT **txfm;
744
745 /*!
746 * Dimensions that were used to allocate the arrays above.
747 * If these dimensions change, the arrays may have to be re-allocated.
748 */
749 int num_planes; /*!< Corresponds to av1_num_planes(cm) */
750 int num_tile_rows; /*!< Corresponds to cm->tiles.row */
751 int num_mi_cols; /*!< Corresponds to cm->mi_params.mi_cols */
752 };
753
754 /*!
755 * \brief Top level common structure used by both encoder and decoder.
756 */
757 typedef struct AV1Common {
758 /*!
759 * Information about the current frame that is being coded.
760 */
761 CurrentFrame current_frame;
762 /*!
763 * Code and details about current error status.
764 */
765 struct aom_internal_error_info *error;
766
767 /*!
768 * AV1 allows two types of frame scaling operations:
769 * 1. Frame super-resolution: that allows coding a frame at lower resolution
770 * and after decoding the frame, normatively scales and restores the frame --
771 * inside the coding loop.
772 * 2. Frame resize: that allows coding frame at lower/higher resolution, and
773 * then non-normatively upscale the frame at the time of rendering -- outside
774 * the coding loop.
775 * Hence, the need for 3 types of dimensions.
776 */
777
778 /**
779 * \name Coded frame dimensions.
780 */
781 /**@{*/
782 int width; /*!< Coded frame width */
783 int height; /*!< Coded frame height */
784 /**@}*/
785
786 /**
787 * \name Rendered frame dimensions.
788 * Dimensions after applying both super-resolution and resize to the coded
789 * frame. Different from coded dimensions if super-resolution and/or resize
790 * are being used for this frame.
791 */
792 /**@{*/
793 int render_width; /*!< Rendered frame width */
794 int render_height; /*!< Rendered frame height */
795 /**@}*/
796
797 /**
798 * \name Super-resolved frame dimensions.
799 * Frame dimensions after applying super-resolution to the coded frame (if
800 * present), but before applying resize.
801 * Larger than the coded dimensions if super-resolution is being used for
802 * this frame.
803 * Different from rendered dimensions if resize is being used for this frame.
804 */
805 /**@{*/
806 int superres_upscaled_width; /*!< Super-resolved frame width */
807 int superres_upscaled_height; /*!< Super-resolved frame height */
808 /**@}*/
809
810 /*!
811 * The denominator of the superres scale used by this frame.
812 * Note: The numerator is fixed to be SCALE_NUMERATOR.
813 */
814 uint8_t superres_scale_denominator;
815
816 /*!
817 * buffer_removal_times[op_num] specifies the frame removal time in units of
818 * DecCT clock ticks counted from the removal time of the last random access
819 * point for operating point op_num.
820 * TODO(urvang): We probably don't need the +1 here.
821 */
822 uint32_t buffer_removal_times[MAX_NUM_OPERATING_POINTS + 1];
823 /*!
824 * Presentation time of the frame in clock ticks DispCT counted from the
825 * removal time of the last random access point for the operating point that
826 * is being decoded.
827 */
828 uint32_t frame_presentation_time;
829
830 /*!
831 * Buffer where previous frame is stored.
832 */
833 RefCntBuffer *prev_frame;
834
835 /*!
836 * Buffer into which the current frame will be stored and other related info.
837 * TODO(hkuang): Combine this with cur_buf in macroblockd.
838 */
839 RefCntBuffer *cur_frame;
840
841 /*!
842 * For encoder, we have a two-level mapping from reference frame type to the
843 * corresponding buffer in the buffer pool:
844 * * 'remapped_ref_idx[i - 1]' maps reference type 'i' (range: LAST_FRAME ...
845 * EXTREF_FRAME) to a remapped index 'j' (in range: 0 ... REF_FRAMES - 1)
846 * * Later, 'cm->ref_frame_map[j]' maps the remapped index 'j' to a pointer to
847 * the reference counted buffer structure RefCntBuffer, taken from the buffer
848 * pool cm->buffer_pool->frame_bufs.
849 *
850 * LAST_FRAME, ..., EXTREF_FRAME
851 * | |
852 * v v
853 * remapped_ref_idx[LAST_FRAME - 1], ..., remapped_ref_idx[EXTREF_FRAME - 1]
854 * | |
855 * v v
856 * ref_frame_map[], ..., ref_frame_map[]
857 *
858 * Note: INTRA_FRAME always refers to the current frame, so there's no need to
859 * have a remapped index for the same.
860 */
861 int remapped_ref_idx[REF_FRAMES];
862
863 /*!
864 * Scale of the current frame with respect to itself.
865 * This is currently used for intra block copy, which behaves like an inter
866 * prediction mode, where the reference frame is the current frame itself.
867 */
868 struct scale_factors sf_identity;
869
870 /*!
871 * Scale factors of the reference frame with respect to the current frame.
872 * This is required for generating inter prediction and will be non-identity
873 * for a reference frame, if it has different dimensions than the coded
874 * dimensions of the current frame.
875 */
876 struct scale_factors ref_scale_factors[REF_FRAMES];
877
878 /*!
879 * For decoder, ref_frame_map[i] maps reference type 'i' to a pointer to
880 * the buffer in the buffer pool 'cm->buffer_pool.frame_bufs'.
881 * For encoder, ref_frame_map[j] (where j = remapped_ref_idx[i]) maps
882 * remapped reference index 'j' (that is, original reference type 'i') to
883 * a pointer to the buffer in the buffer pool 'cm->buffer_pool.frame_bufs'.
884 */
885 RefCntBuffer *ref_frame_map[REF_FRAMES];
886
887 /*!
888 * If true, this frame is actually shown after decoding.
889 * If false, this frame is coded in the bitstream, but not shown. It is only
890 * used as a reference for other frames coded later.
891 */
892 int show_frame;
893
894 /*!
895 * If true, this frame can be used as a show-existing frame for other frames
896 * coded later.
897 * When 'show_frame' is true, this is always true for all non-keyframes.
898 * When 'show_frame' is false, this value is transmitted in the bitstream.
899 */
900 int showable_frame;
901
902 /*!
903 * If true, show an existing frame coded before, instead of actually coding a
904 * frame. The existing frame comes from one of the existing reference buffers,
905 * as signaled in the bitstream.
906 */
907 int show_existing_frame;
908
909 /*!
910 * Whether some features are allowed or not.
911 */
912 FeatureFlags features;
913
914 /*!
915 * Params related to MB_MODE_INFO arrays and related info.
916 */
917 CommonModeInfoParams mi_params;
918
919 #if CONFIG_ENTROPY_STATS
920 /*!
921 * Context type used by token CDFs, in the range 0 .. (TOKEN_CDF_Q_CTXS - 1).
922 */
923 int coef_cdf_category;
924 #endif // CONFIG_ENTROPY_STATS
925
926 /*!
927 * Quantization params.
928 */
929 CommonQuantParams quant_params;
930
931 /*!
932 * Segmentation info for current frame.
933 */
934 struct segmentation seg;
935
936 /*!
937 * Segmentation map for previous frame.
938 */
939 uint8_t *last_frame_seg_map;
940
941 /**
942 * \name Deblocking filter parameters.
943 */
944 /**@{*/
945 loop_filter_info_n lf_info; /*!< Loop filter info */
946 struct loopfilter lf; /*!< Loop filter parameters */
947 /**@}*/
948
949 /**
950 * \name Loop Restoration filter parameters.
951 */
952 /**@{*/
953 RestorationInfo rst_info[MAX_MB_PLANE]; /*!< Loop Restoration filter info */
954 int32_t *rst_tmpbuf; /*!< Scratch buffer for self-guided restoration */
955 RestorationLineBuffers *rlbs; /*!< Line buffers needed by loop restoration */
956 YV12_BUFFER_CONFIG rst_frame; /*!< Stores the output of loop restoration */
957 /**@}*/
958
959 /*!
960 * CDEF (Constrained Directional Enhancement Filter) parameters.
961 */
962 CdefInfo cdef_info;
963
964 /*!
965 * Parameters for film grain synthesis.
966 */
967 aom_film_grain_t film_grain_params;
968
969 /*!
970 * Parameters for delta quantization and delta loop filter level.
971 */
972 DeltaQInfo delta_q_info;
973
974 /*!
975 * Global motion parameters for each reference frame.
976 */
977 WarpedMotionParams global_motion[REF_FRAMES];
978
979 /*!
980 * Elements part of the sequence header, that are applicable for all the
981 * frames in the video.
982 */
983 SequenceHeader *seq_params;
984
985 /*!
986 * Current CDFs of all the symbols for the current frame.
987 */
988 FRAME_CONTEXT *fc;
989 /*!
990 * Default CDFs used when features.primary_ref_frame = PRIMARY_REF_NONE
991 * (e.g. for a keyframe). These default CDFs are defined by the bitstream and
992 * copied from default CDF tables for each symbol.
993 */
994 FRAME_CONTEXT *default_frame_context;
995
996 /*!
997 * Parameters related to tiling.
998 */
999 CommonTileParams tiles;
1000
1001 /*!
1002 * External BufferPool passed from outside.
1003 */
1004 BufferPool *buffer_pool;
1005
1006 /*!
1007 * Above context buffers and their sizes.
1008 * Note: above contexts are allocated in this struct, as their size is
1009 * dependent on frame width, while left contexts are declared and allocated in
1010 * MACROBLOCKD struct, as they have a fixed size.
1011 */
1012 CommonContexts above_contexts;
1013
1014 /**
1015 * \name Signaled when cm->seq_params->frame_id_numbers_present_flag == 1
1016 */
1017 /**@{*/
1018 int current_frame_id; /*!< frame ID for the current frame. */
1019 int ref_frame_id[REF_FRAMES]; /*!< frame IDs for the reference frames. */
1020 /**@}*/
1021
1022 /*!
1023 * Motion vectors provided by motion field estimation.
1024 * tpl_mvs[row * stride + col] stores MV for block at [mi_row, mi_col] where:
1025 * mi_row = 2 * row,
1026 * mi_col = 2 * col, and
1027 * stride = cm->mi_params.mi_stride / 2
1028 */
1029 TPL_MV_REF *tpl_mvs;
1030 /*!
1031 * Allocated size of 'tpl_mvs' array. Refer to 'ensure_mv_buffer()' function.
1032 */
1033 int tpl_mvs_mem_size;
1034 /*!
1035 * ref_frame_sign_bias[k] is 1 if relative distance between reference 'k' and
1036 * current frame is positive; and 0 otherwise.
1037 */
1038 int ref_frame_sign_bias[REF_FRAMES];
1039 /*!
1040 * ref_frame_side[k] is 1 if relative distance between reference 'k' and
1041 * current frame is positive, -1 if relative distance is 0; and 0 otherwise.
1042 * TODO(jingning): This can be combined with sign_bias later.
1043 */
1044 int8_t ref_frame_side[REF_FRAMES];
1045
1046 /*!
1047 * Temporal layer ID of this frame
1048 * (in the range 0 ... (number_temporal_layers - 1)).
1049 */
1050 int temporal_layer_id;
1051
1052 /*!
1053 * Spatial layer ID of this frame
1054 * (in the range 0 ... (number_spatial_layers - 1)).
1055 */
1056 int spatial_layer_id;
1057
1058 #if TXCOEFF_TIMER
1059 int64_t cum_txcoeff_timer;
1060 int64_t txcoeff_timer;
1061 int txb_count;
1062 #endif // TXCOEFF_TIMER
1063
1064 #if TXCOEFF_COST_TIMER
1065 int64_t cum_txcoeff_cost_timer;
1066 int64_t txcoeff_cost_timer;
1067 int64_t txcoeff_cost_count;
1068 #endif // TXCOEFF_COST_TIMER
1069 } AV1_COMMON;
1070
1071 /*!\cond */
1072
1073 // TODO(hkuang): Don't need to lock the whole pool after implementing atomic
1074 // frame reference count.
lock_buffer_pool(BufferPool * const pool)1075 static void lock_buffer_pool(BufferPool *const pool) {
1076 #if CONFIG_MULTITHREAD
1077 pthread_mutex_lock(&pool->pool_mutex);
1078 #else
1079 (void)pool;
1080 #endif
1081 }
1082
unlock_buffer_pool(BufferPool * const pool)1083 static void unlock_buffer_pool(BufferPool *const pool) {
1084 #if CONFIG_MULTITHREAD
1085 pthread_mutex_unlock(&pool->pool_mutex);
1086 #else
1087 (void)pool;
1088 #endif
1089 }
1090
get_ref_frame(AV1_COMMON * cm,int index)1091 static inline YV12_BUFFER_CONFIG *get_ref_frame(AV1_COMMON *cm, int index) {
1092 if (index < 0 || index >= REF_FRAMES) return NULL;
1093 if (cm->ref_frame_map[index] == NULL) return NULL;
1094 return &cm->ref_frame_map[index]->buf;
1095 }
1096
get_free_fb(AV1_COMMON * cm)1097 static inline int get_free_fb(AV1_COMMON *cm) {
1098 RefCntBuffer *const frame_bufs = cm->buffer_pool->frame_bufs;
1099 int i;
1100
1101 lock_buffer_pool(cm->buffer_pool);
1102 const int num_frame_bufs = cm->buffer_pool->num_frame_bufs;
1103 for (i = 0; i < num_frame_bufs; ++i)
1104 if (frame_bufs[i].ref_count == 0) break;
1105
1106 if (i != num_frame_bufs) {
1107 if (frame_bufs[i].buf.use_external_reference_buffers) {
1108 // If this frame buffer's y_buffer, u_buffer, and v_buffer point to the
1109 // external reference buffers. Restore the buffer pointers to point to the
1110 // internally allocated memory.
1111 YV12_BUFFER_CONFIG *ybf = &frame_bufs[i].buf;
1112 ybf->y_buffer = ybf->store_buf_adr[0];
1113 ybf->u_buffer = ybf->store_buf_adr[1];
1114 ybf->v_buffer = ybf->store_buf_adr[2];
1115 ybf->use_external_reference_buffers = 0;
1116 }
1117
1118 frame_bufs[i].ref_count = 1;
1119 } else {
1120 // We should never run out of free buffers. If this assertion fails, there
1121 // is a reference leak.
1122 assert(0 && "Ran out of free frame buffers. Likely a reference leak.");
1123 // Reset i to be INVALID_IDX to indicate no free buffer found.
1124 i = INVALID_IDX;
1125 }
1126
1127 unlock_buffer_pool(cm->buffer_pool);
1128 return i;
1129 }
1130
assign_cur_frame_new_fb(AV1_COMMON * const cm)1131 static inline RefCntBuffer *assign_cur_frame_new_fb(AV1_COMMON *const cm) {
1132 // Release the previously-used frame-buffer
1133 if (cm->cur_frame != NULL) {
1134 --cm->cur_frame->ref_count;
1135 cm->cur_frame = NULL;
1136 }
1137
1138 // Assign a new framebuffer
1139 const int new_fb_idx = get_free_fb(cm);
1140 if (new_fb_idx == INVALID_IDX) return NULL;
1141
1142 cm->cur_frame = &cm->buffer_pool->frame_bufs[new_fb_idx];
1143 #if CONFIG_AV1_ENCODER && !CONFIG_REALTIME_ONLY
1144 aom_invalidate_pyramid(cm->cur_frame->buf.y_pyramid);
1145 av1_invalidate_corner_list(cm->cur_frame->buf.corners);
1146 #endif // CONFIG_AV1_ENCODER && !CONFIG_REALTIME_ONLY
1147 av1_zero(cm->cur_frame->interp_filter_selected);
1148 return cm->cur_frame;
1149 }
1150
1151 // Modify 'lhs_ptr' to reference the buffer at 'rhs_ptr', and update the ref
1152 // counts accordingly.
assign_frame_buffer_p(RefCntBuffer ** lhs_ptr,RefCntBuffer * rhs_ptr)1153 static inline void assign_frame_buffer_p(RefCntBuffer **lhs_ptr,
1154 RefCntBuffer *rhs_ptr) {
1155 RefCntBuffer *const old_ptr = *lhs_ptr;
1156 if (old_ptr != NULL) {
1157 assert(old_ptr->ref_count > 0);
1158 // One less reference to the buffer at 'old_ptr', so decrease ref count.
1159 --old_ptr->ref_count;
1160 }
1161
1162 *lhs_ptr = rhs_ptr;
1163 // One more reference to the buffer at 'rhs_ptr', so increase ref count.
1164 ++rhs_ptr->ref_count;
1165 }
1166
frame_is_intra_only(const AV1_COMMON * const cm)1167 static inline int frame_is_intra_only(const AV1_COMMON *const cm) {
1168 return cm->current_frame.frame_type == KEY_FRAME ||
1169 cm->current_frame.frame_type == INTRA_ONLY_FRAME;
1170 }
1171
frame_is_sframe(const AV1_COMMON * cm)1172 static inline int frame_is_sframe(const AV1_COMMON *cm) {
1173 return cm->current_frame.frame_type == S_FRAME;
1174 }
1175
1176 // These functions take a reference frame label between LAST_FRAME and
1177 // EXTREF_FRAME inclusive. Note that this is different to the indexing
1178 // previously used by the frame_refs[] array.
get_ref_frame_map_idx(const AV1_COMMON * const cm,const MV_REFERENCE_FRAME ref_frame)1179 static inline int get_ref_frame_map_idx(const AV1_COMMON *const cm,
1180 const MV_REFERENCE_FRAME ref_frame) {
1181 return (ref_frame >= LAST_FRAME && ref_frame <= EXTREF_FRAME)
1182 ? cm->remapped_ref_idx[ref_frame - LAST_FRAME]
1183 : INVALID_IDX;
1184 }
1185
get_ref_frame_buf(const AV1_COMMON * const cm,const MV_REFERENCE_FRAME ref_frame)1186 static inline RefCntBuffer *get_ref_frame_buf(
1187 const AV1_COMMON *const cm, const MV_REFERENCE_FRAME ref_frame) {
1188 const int map_idx = get_ref_frame_map_idx(cm, ref_frame);
1189 return (map_idx != INVALID_IDX) ? cm->ref_frame_map[map_idx] : NULL;
1190 }
1191
1192 // Both const and non-const versions of this function are provided so that it
1193 // can be used with a const AV1_COMMON if needed.
get_ref_scale_factors_const(const AV1_COMMON * const cm,const MV_REFERENCE_FRAME ref_frame)1194 static inline const struct scale_factors *get_ref_scale_factors_const(
1195 const AV1_COMMON *const cm, const MV_REFERENCE_FRAME ref_frame) {
1196 const int map_idx = get_ref_frame_map_idx(cm, ref_frame);
1197 return (map_idx != INVALID_IDX) ? &cm->ref_scale_factors[map_idx] : NULL;
1198 }
1199
get_ref_scale_factors(AV1_COMMON * const cm,const MV_REFERENCE_FRAME ref_frame)1200 static inline struct scale_factors *get_ref_scale_factors(
1201 AV1_COMMON *const cm, const MV_REFERENCE_FRAME ref_frame) {
1202 const int map_idx = get_ref_frame_map_idx(cm, ref_frame);
1203 return (map_idx != INVALID_IDX) ? &cm->ref_scale_factors[map_idx] : NULL;
1204 }
1205
get_primary_ref_frame_buf(const AV1_COMMON * const cm)1206 static inline RefCntBuffer *get_primary_ref_frame_buf(
1207 const AV1_COMMON *const cm) {
1208 const int primary_ref_frame = cm->features.primary_ref_frame;
1209 if (primary_ref_frame == PRIMARY_REF_NONE) return NULL;
1210 const int map_idx = get_ref_frame_map_idx(cm, primary_ref_frame + 1);
1211 return (map_idx != INVALID_IDX) ? cm->ref_frame_map[map_idx] : NULL;
1212 }
1213
1214 // Returns 1 if this frame might allow mvs from some reference frame.
frame_might_allow_ref_frame_mvs(const AV1_COMMON * cm)1215 static inline int frame_might_allow_ref_frame_mvs(const AV1_COMMON *cm) {
1216 return !cm->features.error_resilient_mode &&
1217 cm->seq_params->order_hint_info.enable_ref_frame_mvs &&
1218 cm->seq_params->order_hint_info.enable_order_hint &&
1219 !frame_is_intra_only(cm);
1220 }
1221
1222 // Returns 1 if this frame might use warped_motion
frame_might_allow_warped_motion(const AV1_COMMON * cm)1223 static inline int frame_might_allow_warped_motion(const AV1_COMMON *cm) {
1224 return !cm->features.error_resilient_mode && !frame_is_intra_only(cm) &&
1225 cm->seq_params->enable_warped_motion;
1226 }
1227
ensure_mv_buffer(RefCntBuffer * buf,AV1_COMMON * cm)1228 static inline void ensure_mv_buffer(RefCntBuffer *buf, AV1_COMMON *cm) {
1229 const int buf_rows = buf->mi_rows;
1230 const int buf_cols = buf->mi_cols;
1231 const CommonModeInfoParams *const mi_params = &cm->mi_params;
1232
1233 if (buf->mvs == NULL || buf_rows != mi_params->mi_rows ||
1234 buf_cols != mi_params->mi_cols) {
1235 aom_free(buf->mvs);
1236 buf->mi_rows = mi_params->mi_rows;
1237 buf->mi_cols = mi_params->mi_cols;
1238 CHECK_MEM_ERROR(cm, buf->mvs,
1239 (MV_REF *)aom_calloc(((mi_params->mi_rows + 1) >> 1) *
1240 ((mi_params->mi_cols + 1) >> 1),
1241 sizeof(*buf->mvs)));
1242 aom_free(buf->seg_map);
1243 CHECK_MEM_ERROR(
1244 cm, buf->seg_map,
1245 (uint8_t *)aom_calloc(mi_params->mi_rows * mi_params->mi_cols,
1246 sizeof(*buf->seg_map)));
1247 }
1248
1249 const int mem_size =
1250 ((mi_params->mi_rows + MAX_MIB_SIZE) >> 1) * (mi_params->mi_stride >> 1);
1251
1252 if (cm->tpl_mvs == NULL || cm->tpl_mvs_mem_size < mem_size) {
1253 aom_free(cm->tpl_mvs);
1254 CHECK_MEM_ERROR(cm, cm->tpl_mvs,
1255 (TPL_MV_REF *)aom_calloc(mem_size, sizeof(*cm->tpl_mvs)));
1256 cm->tpl_mvs_mem_size = mem_size;
1257 }
1258 }
1259
1260 #if !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
1261 void cfl_init(CFL_CTX *cfl, const SequenceHeader *seq_params);
1262 #endif
1263
av1_num_planes(const AV1_COMMON * cm)1264 static inline int av1_num_planes(const AV1_COMMON *cm) {
1265 return cm->seq_params->monochrome ? 1 : MAX_MB_PLANE;
1266 }
1267
av1_init_above_context(CommonContexts * above_contexts,int num_planes,int tile_row,MACROBLOCKD * xd)1268 static inline void av1_init_above_context(CommonContexts *above_contexts,
1269 int num_planes, int tile_row,
1270 MACROBLOCKD *xd) {
1271 for (int i = 0; i < num_planes; ++i) {
1272 xd->above_entropy_context[i] = above_contexts->entropy[i][tile_row];
1273 }
1274 xd->above_partition_context = above_contexts->partition[tile_row];
1275 xd->above_txfm_context = above_contexts->txfm[tile_row];
1276 }
1277
av1_init_macroblockd(AV1_COMMON * cm,MACROBLOCKD * xd)1278 static inline void av1_init_macroblockd(AV1_COMMON *cm, MACROBLOCKD *xd) {
1279 const int num_planes = av1_num_planes(cm);
1280 const CommonQuantParams *const quant_params = &cm->quant_params;
1281
1282 for (int i = 0; i < num_planes; ++i) {
1283 if (xd->plane[i].plane_type == PLANE_TYPE_Y) {
1284 memcpy(xd->plane[i].seg_dequant_QTX, quant_params->y_dequant_QTX,
1285 sizeof(quant_params->y_dequant_QTX));
1286 memcpy(xd->plane[i].seg_iqmatrix, quant_params->y_iqmatrix,
1287 sizeof(quant_params->y_iqmatrix));
1288
1289 } else {
1290 if (i == AOM_PLANE_U) {
1291 memcpy(xd->plane[i].seg_dequant_QTX, quant_params->u_dequant_QTX,
1292 sizeof(quant_params->u_dequant_QTX));
1293 memcpy(xd->plane[i].seg_iqmatrix, quant_params->u_iqmatrix,
1294 sizeof(quant_params->u_iqmatrix));
1295 } else {
1296 memcpy(xd->plane[i].seg_dequant_QTX, quant_params->v_dequant_QTX,
1297 sizeof(quant_params->v_dequant_QTX));
1298 memcpy(xd->plane[i].seg_iqmatrix, quant_params->v_iqmatrix,
1299 sizeof(quant_params->v_iqmatrix));
1300 }
1301 }
1302 }
1303 xd->mi_stride = cm->mi_params.mi_stride;
1304 xd->error_info = cm->error;
1305 #if !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
1306 cfl_init(&xd->cfl, cm->seq_params);
1307 #endif
1308 }
1309
set_entropy_context(MACROBLOCKD * xd,int mi_row,int mi_col,const int num_planes)1310 static inline void set_entropy_context(MACROBLOCKD *xd, int mi_row, int mi_col,
1311 const int num_planes) {
1312 int i;
1313 int row_offset = mi_row;
1314 int col_offset = mi_col;
1315 for (i = 0; i < num_planes; ++i) {
1316 struct macroblockd_plane *const pd = &xd->plane[i];
1317 // Offset the buffer pointer
1318 const BLOCK_SIZE bsize = xd->mi[0]->bsize;
1319 if (pd->subsampling_y && (mi_row & 0x01) && (mi_size_high[bsize] == 1))
1320 row_offset = mi_row - 1;
1321 if (pd->subsampling_x && (mi_col & 0x01) && (mi_size_wide[bsize] == 1))
1322 col_offset = mi_col - 1;
1323 int above_idx = col_offset;
1324 int left_idx = row_offset & MAX_MIB_MASK;
1325 pd->above_entropy_context =
1326 &xd->above_entropy_context[i][above_idx >> pd->subsampling_x];
1327 pd->left_entropy_context =
1328 &xd->left_entropy_context[i][left_idx >> pd->subsampling_y];
1329 }
1330 }
1331
calc_mi_size(int len)1332 static inline int calc_mi_size(int len) {
1333 // len is in mi units. Align to a multiple of SBs.
1334 return ALIGN_POWER_OF_TWO(len, MAX_MIB_SIZE_LOG2);
1335 }
1336
set_plane_n4(MACROBLOCKD * const xd,int bw,int bh,const int num_planes)1337 static inline void set_plane_n4(MACROBLOCKD *const xd, int bw, int bh,
1338 const int num_planes) {
1339 int i;
1340 for (i = 0; i < num_planes; i++) {
1341 xd->plane[i].width = (bw * MI_SIZE) >> xd->plane[i].subsampling_x;
1342 xd->plane[i].height = (bh * MI_SIZE) >> xd->plane[i].subsampling_y;
1343
1344 xd->plane[i].width = AOMMAX(xd->plane[i].width, 4);
1345 xd->plane[i].height = AOMMAX(xd->plane[i].height, 4);
1346 }
1347 }
1348
set_mi_row_col(MACROBLOCKD * xd,const TileInfo * const tile,int mi_row,int bh,int mi_col,int bw,int mi_rows,int mi_cols)1349 static inline void set_mi_row_col(MACROBLOCKD *xd, const TileInfo *const tile,
1350 int mi_row, int bh, int mi_col, int bw,
1351 int mi_rows, int mi_cols) {
1352 xd->mb_to_top_edge = -GET_MV_SUBPEL(mi_row * MI_SIZE);
1353 xd->mb_to_bottom_edge = GET_MV_SUBPEL((mi_rows - bh - mi_row) * MI_SIZE);
1354 xd->mb_to_left_edge = -GET_MV_SUBPEL((mi_col * MI_SIZE));
1355 xd->mb_to_right_edge = GET_MV_SUBPEL((mi_cols - bw - mi_col) * MI_SIZE);
1356
1357 xd->mi_row = mi_row;
1358 xd->mi_col = mi_col;
1359
1360 // Are edges available for intra prediction?
1361 xd->up_available = (mi_row > tile->mi_row_start);
1362
1363 const int ss_x = xd->plane[1].subsampling_x;
1364 const int ss_y = xd->plane[1].subsampling_y;
1365
1366 xd->left_available = (mi_col > tile->mi_col_start);
1367 xd->chroma_up_available = xd->up_available;
1368 xd->chroma_left_available = xd->left_available;
1369 if (ss_x && bw < mi_size_wide[BLOCK_8X8])
1370 xd->chroma_left_available = (mi_col - 1) > tile->mi_col_start;
1371 if (ss_y && bh < mi_size_high[BLOCK_8X8])
1372 xd->chroma_up_available = (mi_row - 1) > tile->mi_row_start;
1373 if (xd->up_available) {
1374 xd->above_mbmi = xd->mi[-xd->mi_stride];
1375 } else {
1376 xd->above_mbmi = NULL;
1377 }
1378
1379 if (xd->left_available) {
1380 xd->left_mbmi = xd->mi[-1];
1381 } else {
1382 xd->left_mbmi = NULL;
1383 }
1384
1385 const int chroma_ref = ((mi_row & 0x01) || !(bh & 0x01) || !ss_y) &&
1386 ((mi_col & 0x01) || !(bw & 0x01) || !ss_x);
1387 xd->is_chroma_ref = chroma_ref;
1388 if (chroma_ref) {
1389 // To help calculate the "above" and "left" chroma blocks, note that the
1390 // current block may cover multiple luma blocks (e.g., if partitioned into
1391 // 4x4 luma blocks).
1392 // First, find the top-left-most luma block covered by this chroma block
1393 MB_MODE_INFO **base_mi =
1394 &xd->mi[-(mi_row & ss_y) * xd->mi_stride - (mi_col & ss_x)];
1395
1396 // Then, we consider the luma region covered by the left or above 4x4 chroma
1397 // prediction. We want to point to the chroma reference block in that
1398 // region, which is the bottom-right-most mi unit.
1399 // This leads to the following offsets:
1400 MB_MODE_INFO *chroma_above_mi =
1401 xd->chroma_up_available ? base_mi[-xd->mi_stride + ss_x] : NULL;
1402 xd->chroma_above_mbmi = chroma_above_mi;
1403
1404 MB_MODE_INFO *chroma_left_mi =
1405 xd->chroma_left_available ? base_mi[ss_y * xd->mi_stride - 1] : NULL;
1406 xd->chroma_left_mbmi = chroma_left_mi;
1407 }
1408
1409 xd->height = bh;
1410 xd->width = bw;
1411
1412 xd->is_last_vertical_rect = 0;
1413 if (xd->width < xd->height) {
1414 if (!((mi_col + xd->width) & (xd->height - 1))) {
1415 xd->is_last_vertical_rect = 1;
1416 }
1417 }
1418
1419 xd->is_first_horizontal_rect = 0;
1420 if (xd->width > xd->height)
1421 if (!(mi_row & (xd->width - 1))) xd->is_first_horizontal_rect = 1;
1422 }
1423
get_y_mode_cdf(FRAME_CONTEXT * tile_ctx,const MB_MODE_INFO * above_mi,const MB_MODE_INFO * left_mi)1424 static inline aom_cdf_prob *get_y_mode_cdf(FRAME_CONTEXT *tile_ctx,
1425 const MB_MODE_INFO *above_mi,
1426 const MB_MODE_INFO *left_mi) {
1427 const PREDICTION_MODE above = av1_above_block_mode(above_mi);
1428 const PREDICTION_MODE left = av1_left_block_mode(left_mi);
1429 const int above_ctx = intra_mode_context[above];
1430 const int left_ctx = intra_mode_context[left];
1431 return tile_ctx->kf_y_cdf[above_ctx][left_ctx];
1432 }
1433
update_partition_context(MACROBLOCKD * xd,int mi_row,int mi_col,BLOCK_SIZE subsize,BLOCK_SIZE bsize)1434 static inline void update_partition_context(MACROBLOCKD *xd, int mi_row,
1435 int mi_col, BLOCK_SIZE subsize,
1436 BLOCK_SIZE bsize) {
1437 PARTITION_CONTEXT *const above_ctx = xd->above_partition_context + mi_col;
1438 PARTITION_CONTEXT *const left_ctx =
1439 xd->left_partition_context + (mi_row & MAX_MIB_MASK);
1440
1441 const int bw = mi_size_wide[bsize];
1442 const int bh = mi_size_high[bsize];
1443 memset(above_ctx, partition_context_lookup[subsize].above, bw);
1444 memset(left_ctx, partition_context_lookup[subsize].left, bh);
1445 }
1446
is_chroma_reference(int mi_row,int mi_col,BLOCK_SIZE bsize,int subsampling_x,int subsampling_y)1447 static inline int is_chroma_reference(int mi_row, int mi_col, BLOCK_SIZE bsize,
1448 int subsampling_x, int subsampling_y) {
1449 assert(bsize < BLOCK_SIZES_ALL);
1450 const int bw = mi_size_wide[bsize];
1451 const int bh = mi_size_high[bsize];
1452 int ref_pos = ((mi_row & 0x01) || !(bh & 0x01) || !subsampling_y) &&
1453 ((mi_col & 0x01) || !(bw & 0x01) || !subsampling_x);
1454 return ref_pos;
1455 }
1456
cdf_element_prob(const aom_cdf_prob * cdf,size_t element)1457 static inline aom_cdf_prob cdf_element_prob(const aom_cdf_prob *cdf,
1458 size_t element) {
1459 assert(cdf != NULL);
1460 return (element > 0 ? cdf[element - 1] : CDF_PROB_TOP) - cdf[element];
1461 }
1462
partition_gather_horz_alike(aom_cdf_prob * out,const aom_cdf_prob * const in,BLOCK_SIZE bsize)1463 static inline void partition_gather_horz_alike(aom_cdf_prob *out,
1464 const aom_cdf_prob *const in,
1465 BLOCK_SIZE bsize) {
1466 (void)bsize;
1467 out[0] = CDF_PROB_TOP;
1468 out[0] -= cdf_element_prob(in, PARTITION_HORZ);
1469 out[0] -= cdf_element_prob(in, PARTITION_SPLIT);
1470 out[0] -= cdf_element_prob(in, PARTITION_HORZ_A);
1471 out[0] -= cdf_element_prob(in, PARTITION_HORZ_B);
1472 out[0] -= cdf_element_prob(in, PARTITION_VERT_A);
1473 if (bsize != BLOCK_128X128) out[0] -= cdf_element_prob(in, PARTITION_HORZ_4);
1474 out[0] = AOM_ICDF(out[0]);
1475 out[1] = AOM_ICDF(CDF_PROB_TOP);
1476 }
1477
partition_gather_vert_alike(aom_cdf_prob * out,const aom_cdf_prob * const in,BLOCK_SIZE bsize)1478 static inline void partition_gather_vert_alike(aom_cdf_prob *out,
1479 const aom_cdf_prob *const in,
1480 BLOCK_SIZE bsize) {
1481 (void)bsize;
1482 out[0] = CDF_PROB_TOP;
1483 out[0] -= cdf_element_prob(in, PARTITION_VERT);
1484 out[0] -= cdf_element_prob(in, PARTITION_SPLIT);
1485 out[0] -= cdf_element_prob(in, PARTITION_HORZ_A);
1486 out[0] -= cdf_element_prob(in, PARTITION_VERT_A);
1487 out[0] -= cdf_element_prob(in, PARTITION_VERT_B);
1488 if (bsize != BLOCK_128X128) out[0] -= cdf_element_prob(in, PARTITION_VERT_4);
1489 out[0] = AOM_ICDF(out[0]);
1490 out[1] = AOM_ICDF(CDF_PROB_TOP);
1491 }
1492
update_ext_partition_context(MACROBLOCKD * xd,int mi_row,int mi_col,BLOCK_SIZE subsize,BLOCK_SIZE bsize,PARTITION_TYPE partition)1493 static inline void update_ext_partition_context(MACROBLOCKD *xd, int mi_row,
1494 int mi_col, BLOCK_SIZE subsize,
1495 BLOCK_SIZE bsize,
1496 PARTITION_TYPE partition) {
1497 if (bsize >= BLOCK_8X8) {
1498 const int hbs = mi_size_wide[bsize] / 2;
1499 BLOCK_SIZE bsize2 = get_partition_subsize(bsize, PARTITION_SPLIT);
1500 switch (partition) {
1501 case PARTITION_SPLIT:
1502 if (bsize != BLOCK_8X8) break;
1503 AOM_FALLTHROUGH_INTENDED;
1504 case PARTITION_NONE:
1505 case PARTITION_HORZ:
1506 case PARTITION_VERT:
1507 case PARTITION_HORZ_4:
1508 case PARTITION_VERT_4:
1509 update_partition_context(xd, mi_row, mi_col, subsize, bsize);
1510 break;
1511 case PARTITION_HORZ_A:
1512 update_partition_context(xd, mi_row, mi_col, bsize2, subsize);
1513 update_partition_context(xd, mi_row + hbs, mi_col, subsize, subsize);
1514 break;
1515 case PARTITION_HORZ_B:
1516 update_partition_context(xd, mi_row, mi_col, subsize, subsize);
1517 update_partition_context(xd, mi_row + hbs, mi_col, bsize2, subsize);
1518 break;
1519 case PARTITION_VERT_A:
1520 update_partition_context(xd, mi_row, mi_col, bsize2, subsize);
1521 update_partition_context(xd, mi_row, mi_col + hbs, subsize, subsize);
1522 break;
1523 case PARTITION_VERT_B:
1524 update_partition_context(xd, mi_row, mi_col, subsize, subsize);
1525 update_partition_context(xd, mi_row, mi_col + hbs, bsize2, subsize);
1526 break;
1527 default: assert(0 && "Invalid partition type");
1528 }
1529 }
1530 }
1531
partition_plane_context(const MACROBLOCKD * xd,int mi_row,int mi_col,BLOCK_SIZE bsize)1532 static inline int partition_plane_context(const MACROBLOCKD *xd, int mi_row,
1533 int mi_col, BLOCK_SIZE bsize) {
1534 const PARTITION_CONTEXT *above_ctx = xd->above_partition_context + mi_col;
1535 const PARTITION_CONTEXT *left_ctx =
1536 xd->left_partition_context + (mi_row & MAX_MIB_MASK);
1537 // Minimum partition point is 8x8. Offset the bsl accordingly.
1538 const int bsl = mi_size_wide_log2[bsize] - mi_size_wide_log2[BLOCK_8X8];
1539 int above = (*above_ctx >> bsl) & 1, left = (*left_ctx >> bsl) & 1;
1540
1541 assert(mi_size_wide_log2[bsize] == mi_size_high_log2[bsize]);
1542 assert(bsl >= 0);
1543
1544 return (left * 2 + above) + bsl * PARTITION_PLOFFSET;
1545 }
1546
1547 // Return the number of elements in the partition CDF when
1548 // partitioning the (square) block with luma block size of bsize.
partition_cdf_length(BLOCK_SIZE bsize)1549 static inline int partition_cdf_length(BLOCK_SIZE bsize) {
1550 if (bsize <= BLOCK_8X8)
1551 return PARTITION_TYPES;
1552 else if (bsize == BLOCK_128X128)
1553 return EXT_PARTITION_TYPES - 2;
1554 else
1555 return EXT_PARTITION_TYPES;
1556 }
1557
max_block_wide(const MACROBLOCKD * xd,BLOCK_SIZE bsize,int plane)1558 static inline int max_block_wide(const MACROBLOCKD *xd, BLOCK_SIZE bsize,
1559 int plane) {
1560 assert(bsize < BLOCK_SIZES_ALL);
1561 int max_blocks_wide = block_size_wide[bsize];
1562
1563 if (xd->mb_to_right_edge < 0) {
1564 const struct macroblockd_plane *const pd = &xd->plane[plane];
1565 max_blocks_wide += xd->mb_to_right_edge >> (3 + pd->subsampling_x);
1566 }
1567
1568 // Scale the width in the transform block unit.
1569 return max_blocks_wide >> MI_SIZE_LOG2;
1570 }
1571
max_block_high(const MACROBLOCKD * xd,BLOCK_SIZE bsize,int plane)1572 static inline int max_block_high(const MACROBLOCKD *xd, BLOCK_SIZE bsize,
1573 int plane) {
1574 int max_blocks_high = block_size_high[bsize];
1575
1576 if (xd->mb_to_bottom_edge < 0) {
1577 const struct macroblockd_plane *const pd = &xd->plane[plane];
1578 max_blocks_high += xd->mb_to_bottom_edge >> (3 + pd->subsampling_y);
1579 }
1580
1581 // Scale the height in the transform block unit.
1582 return max_blocks_high >> MI_SIZE_LOG2;
1583 }
1584
av1_zero_above_context(AV1_COMMON * const cm,const MACROBLOCKD * xd,int mi_col_start,int mi_col_end,const int tile_row)1585 static inline void av1_zero_above_context(AV1_COMMON *const cm,
1586 const MACROBLOCKD *xd,
1587 int mi_col_start, int mi_col_end,
1588 const int tile_row) {
1589 const SequenceHeader *const seq_params = cm->seq_params;
1590 const int num_planes = av1_num_planes(cm);
1591 const int width = mi_col_end - mi_col_start;
1592 const int aligned_width =
1593 ALIGN_POWER_OF_TWO(width, seq_params->mib_size_log2);
1594 const int offset_y = mi_col_start;
1595 const int width_y = aligned_width;
1596 const int offset_uv = offset_y >> seq_params->subsampling_x;
1597 const int width_uv = width_y >> seq_params->subsampling_x;
1598 CommonContexts *const above_contexts = &cm->above_contexts;
1599
1600 av1_zero_array(above_contexts->entropy[0][tile_row] + offset_y, width_y);
1601 if (num_planes > 1) {
1602 if (above_contexts->entropy[1][tile_row] &&
1603 above_contexts->entropy[2][tile_row]) {
1604 av1_zero_array(above_contexts->entropy[1][tile_row] + offset_uv,
1605 width_uv);
1606 av1_zero_array(above_contexts->entropy[2][tile_row] + offset_uv,
1607 width_uv);
1608 } else {
1609 aom_internal_error(xd->error_info, AOM_CODEC_CORRUPT_FRAME,
1610 "Invalid value of planes");
1611 }
1612 }
1613
1614 av1_zero_array(above_contexts->partition[tile_row] + mi_col_start,
1615 aligned_width);
1616
1617 memset(above_contexts->txfm[tile_row] + mi_col_start,
1618 tx_size_wide[TX_SIZES_LARGEST], aligned_width * sizeof(TXFM_CONTEXT));
1619 }
1620
av1_zero_left_context(MACROBLOCKD * const xd)1621 static inline void av1_zero_left_context(MACROBLOCKD *const xd) {
1622 av1_zero(xd->left_entropy_context);
1623 av1_zero(xd->left_partition_context);
1624
1625 memset(xd->left_txfm_context_buffer, tx_size_high[TX_SIZES_LARGEST],
1626 sizeof(xd->left_txfm_context_buffer));
1627 }
1628
set_txfm_ctx(TXFM_CONTEXT * txfm_ctx,uint8_t txs,int len)1629 static inline void set_txfm_ctx(TXFM_CONTEXT *txfm_ctx, uint8_t txs, int len) {
1630 int i;
1631 for (i = 0; i < len; ++i) txfm_ctx[i] = txs;
1632 }
1633
set_txfm_ctxs(TX_SIZE tx_size,int n4_w,int n4_h,int skip,const MACROBLOCKD * xd)1634 static inline void set_txfm_ctxs(TX_SIZE tx_size, int n4_w, int n4_h, int skip,
1635 const MACROBLOCKD *xd) {
1636 uint8_t bw = tx_size_wide[tx_size];
1637 uint8_t bh = tx_size_high[tx_size];
1638
1639 if (skip) {
1640 bw = n4_w * MI_SIZE;
1641 bh = n4_h * MI_SIZE;
1642 }
1643
1644 set_txfm_ctx(xd->above_txfm_context, bw, n4_w);
1645 set_txfm_ctx(xd->left_txfm_context, bh, n4_h);
1646 }
1647
get_mi_grid_idx(const CommonModeInfoParams * const mi_params,int mi_row,int mi_col)1648 static inline int get_mi_grid_idx(const CommonModeInfoParams *const mi_params,
1649 int mi_row, int mi_col) {
1650 return mi_row * mi_params->mi_stride + mi_col;
1651 }
1652
get_alloc_mi_idx(const CommonModeInfoParams * const mi_params,int mi_row,int mi_col)1653 static inline int get_alloc_mi_idx(const CommonModeInfoParams *const mi_params,
1654 int mi_row, int mi_col) {
1655 const int mi_alloc_size_1d = mi_size_wide[mi_params->mi_alloc_bsize];
1656 const int mi_alloc_row = mi_row / mi_alloc_size_1d;
1657 const int mi_alloc_col = mi_col / mi_alloc_size_1d;
1658
1659 return mi_alloc_row * mi_params->mi_alloc_stride + mi_alloc_col;
1660 }
1661
1662 // For this partition block, set pointers in mi_params->mi_grid_base and xd->mi.
set_mi_offsets(const CommonModeInfoParams * const mi_params,MACROBLOCKD * const xd,int mi_row,int mi_col)1663 static inline void set_mi_offsets(const CommonModeInfoParams *const mi_params,
1664 MACROBLOCKD *const xd, int mi_row,
1665 int mi_col) {
1666 // 'mi_grid_base' should point to appropriate memory in 'mi'.
1667 const int mi_grid_idx = get_mi_grid_idx(mi_params, mi_row, mi_col);
1668 const int mi_alloc_idx = get_alloc_mi_idx(mi_params, mi_row, mi_col);
1669 mi_params->mi_grid_base[mi_grid_idx] = &mi_params->mi_alloc[mi_alloc_idx];
1670 // 'xd->mi' should point to an offset in 'mi_grid_base';
1671 xd->mi = mi_params->mi_grid_base + mi_grid_idx;
1672 // 'xd->tx_type_map' should point to an offset in 'mi_params->tx_type_map'.
1673 xd->tx_type_map = mi_params->tx_type_map + mi_grid_idx;
1674 xd->tx_type_map_stride = mi_params->mi_stride;
1675 }
1676
txfm_partition_update(TXFM_CONTEXT * above_ctx,TXFM_CONTEXT * left_ctx,TX_SIZE tx_size,TX_SIZE txb_size)1677 static inline void txfm_partition_update(TXFM_CONTEXT *above_ctx,
1678 TXFM_CONTEXT *left_ctx,
1679 TX_SIZE tx_size, TX_SIZE txb_size) {
1680 BLOCK_SIZE bsize = txsize_to_bsize[txb_size];
1681 int bh = mi_size_high[bsize];
1682 int bw = mi_size_wide[bsize];
1683 uint8_t txw = tx_size_wide[tx_size];
1684 uint8_t txh = tx_size_high[tx_size];
1685 int i;
1686 for (i = 0; i < bh; ++i) left_ctx[i] = txh;
1687 for (i = 0; i < bw; ++i) above_ctx[i] = txw;
1688 }
1689
get_sqr_tx_size(int tx_dim)1690 static inline TX_SIZE get_sqr_tx_size(int tx_dim) {
1691 switch (tx_dim) {
1692 case 128:
1693 case 64: return TX_64X64; break;
1694 case 32: return TX_32X32; break;
1695 case 16: return TX_16X16; break;
1696 case 8: return TX_8X8; break;
1697 default: return TX_4X4;
1698 }
1699 }
1700
get_tx_size(int width,int height)1701 static inline TX_SIZE get_tx_size(int width, int height) {
1702 if (width == height) {
1703 return get_sqr_tx_size(width);
1704 }
1705 if (width < height) {
1706 if (width + width == height) {
1707 switch (width) {
1708 case 4: return TX_4X8; break;
1709 case 8: return TX_8X16; break;
1710 case 16: return TX_16X32; break;
1711 case 32: return TX_32X64; break;
1712 }
1713 } else {
1714 switch (width) {
1715 case 4: return TX_4X16; break;
1716 case 8: return TX_8X32; break;
1717 case 16: return TX_16X64; break;
1718 }
1719 }
1720 } else {
1721 if (height + height == width) {
1722 switch (height) {
1723 case 4: return TX_8X4; break;
1724 case 8: return TX_16X8; break;
1725 case 16: return TX_32X16; break;
1726 case 32: return TX_64X32; break;
1727 }
1728 } else {
1729 switch (height) {
1730 case 4: return TX_16X4; break;
1731 case 8: return TX_32X8; break;
1732 case 16: return TX_64X16; break;
1733 }
1734 }
1735 }
1736 assert(0);
1737 return TX_4X4;
1738 }
1739
txfm_partition_context(const TXFM_CONTEXT * const above_ctx,const TXFM_CONTEXT * const left_ctx,BLOCK_SIZE bsize,TX_SIZE tx_size)1740 static inline int txfm_partition_context(const TXFM_CONTEXT *const above_ctx,
1741 const TXFM_CONTEXT *const left_ctx,
1742 BLOCK_SIZE bsize, TX_SIZE tx_size) {
1743 const uint8_t txw = tx_size_wide[tx_size];
1744 const uint8_t txh = tx_size_high[tx_size];
1745 const int above = *above_ctx < txw;
1746 const int left = *left_ctx < txh;
1747 int category = TXFM_PARTITION_CONTEXTS;
1748
1749 // dummy return, not used by others.
1750 if (tx_size <= TX_4X4) return 0;
1751
1752 TX_SIZE max_tx_size =
1753 get_sqr_tx_size(AOMMAX(block_size_wide[bsize], block_size_high[bsize]));
1754
1755 if (max_tx_size >= TX_8X8) {
1756 category =
1757 (txsize_sqr_up_map[tx_size] != max_tx_size && max_tx_size > TX_8X8) +
1758 (TX_SIZES - 1 - max_tx_size) * 2;
1759 }
1760 assert(category != TXFM_PARTITION_CONTEXTS);
1761 return category * 3 + above + left;
1762 }
1763
1764 // Compute the next partition in the direction of the sb_type stored in the mi
1765 // array, starting with bsize.
get_partition(const AV1_COMMON * const cm,int mi_row,int mi_col,BLOCK_SIZE bsize)1766 static inline PARTITION_TYPE get_partition(const AV1_COMMON *const cm,
1767 int mi_row, int mi_col,
1768 BLOCK_SIZE bsize) {
1769 const CommonModeInfoParams *const mi_params = &cm->mi_params;
1770 if (mi_row >= mi_params->mi_rows || mi_col >= mi_params->mi_cols)
1771 return PARTITION_INVALID;
1772
1773 const int offset = mi_row * mi_params->mi_stride + mi_col;
1774 MB_MODE_INFO **mi = mi_params->mi_grid_base + offset;
1775 const BLOCK_SIZE subsize = mi[0]->bsize;
1776
1777 assert(bsize < BLOCK_SIZES_ALL);
1778
1779 if (subsize == bsize) return PARTITION_NONE;
1780
1781 const int bhigh = mi_size_high[bsize];
1782 const int bwide = mi_size_wide[bsize];
1783 const int sshigh = mi_size_high[subsize];
1784 const int sswide = mi_size_wide[subsize];
1785
1786 if (bsize > BLOCK_8X8 && mi_row + bwide / 2 < mi_params->mi_rows &&
1787 mi_col + bhigh / 2 < mi_params->mi_cols) {
1788 // In this case, the block might be using an extended partition
1789 // type.
1790 const MB_MODE_INFO *const mbmi_right = mi[bwide / 2];
1791 const MB_MODE_INFO *const mbmi_below = mi[bhigh / 2 * mi_params->mi_stride];
1792
1793 if (sswide == bwide) {
1794 // Smaller height but same width. Is PARTITION_HORZ_4, PARTITION_HORZ or
1795 // PARTITION_HORZ_B. To distinguish the latter two, check if the lower
1796 // half was split.
1797 if (sshigh * 4 == bhigh) return PARTITION_HORZ_4;
1798 assert(sshigh * 2 == bhigh);
1799
1800 if (mbmi_below->bsize == subsize)
1801 return PARTITION_HORZ;
1802 else
1803 return PARTITION_HORZ_B;
1804 } else if (sshigh == bhigh) {
1805 // Smaller width but same height. Is PARTITION_VERT_4, PARTITION_VERT or
1806 // PARTITION_VERT_B. To distinguish the latter two, check if the right
1807 // half was split.
1808 if (sswide * 4 == bwide) return PARTITION_VERT_4;
1809 assert(sswide * 2 == bwide);
1810
1811 if (mbmi_right->bsize == subsize)
1812 return PARTITION_VERT;
1813 else
1814 return PARTITION_VERT_B;
1815 } else {
1816 // Smaller width and smaller height. Might be PARTITION_SPLIT or could be
1817 // PARTITION_HORZ_A or PARTITION_VERT_A. If subsize isn't halved in both
1818 // dimensions, we immediately know this is a split (which will recurse to
1819 // get to subsize). Otherwise look down and to the right. With
1820 // PARTITION_VERT_A, the right block will have height bhigh; with
1821 // PARTITION_HORZ_A, the lower block with have width bwide. Otherwise
1822 // it's PARTITION_SPLIT.
1823 if (sswide * 2 != bwide || sshigh * 2 != bhigh) return PARTITION_SPLIT;
1824
1825 if (mi_size_wide[mbmi_below->bsize] == bwide) return PARTITION_HORZ_A;
1826 if (mi_size_high[mbmi_right->bsize] == bhigh) return PARTITION_VERT_A;
1827
1828 return PARTITION_SPLIT;
1829 }
1830 }
1831 const int vert_split = sswide < bwide;
1832 const int horz_split = sshigh < bhigh;
1833 const int split_idx = (vert_split << 1) | horz_split;
1834 assert(split_idx != 0);
1835
1836 static const PARTITION_TYPE base_partitions[4] = {
1837 PARTITION_INVALID, PARTITION_HORZ, PARTITION_VERT, PARTITION_SPLIT
1838 };
1839
1840 return base_partitions[split_idx];
1841 }
1842
set_sb_size(SequenceHeader * const seq_params,BLOCK_SIZE sb_size)1843 static inline void set_sb_size(SequenceHeader *const seq_params,
1844 BLOCK_SIZE sb_size) {
1845 seq_params->sb_size = sb_size;
1846 seq_params->mib_size = mi_size_wide[seq_params->sb_size];
1847 seq_params->mib_size_log2 = mi_size_wide_log2[seq_params->sb_size];
1848 }
1849
1850 // Returns true if the frame is fully lossless at the coded resolution.
1851 // Note: If super-resolution is used, such a frame will still NOT be lossless at
1852 // the upscaled resolution.
is_coded_lossless(const AV1_COMMON * cm,const MACROBLOCKD * xd)1853 static inline int is_coded_lossless(const AV1_COMMON *cm,
1854 const MACROBLOCKD *xd) {
1855 int coded_lossless = 1;
1856 if (cm->seg.enabled) {
1857 for (int i = 0; i < MAX_SEGMENTS; ++i) {
1858 if (!xd->lossless[i]) {
1859 coded_lossless = 0;
1860 break;
1861 }
1862 }
1863 } else {
1864 coded_lossless = xd->lossless[0];
1865 }
1866 return coded_lossless;
1867 }
1868
is_valid_seq_level_idx(AV1_LEVEL seq_level_idx)1869 static inline int is_valid_seq_level_idx(AV1_LEVEL seq_level_idx) {
1870 return seq_level_idx == SEQ_LEVEL_MAX ||
1871 (seq_level_idx < SEQ_LEVELS &&
1872 // The following levels are currently undefined.
1873 seq_level_idx != SEQ_LEVEL_2_2 && seq_level_idx != SEQ_LEVEL_2_3 &&
1874 seq_level_idx != SEQ_LEVEL_3_2 && seq_level_idx != SEQ_LEVEL_3_3 &&
1875 seq_level_idx != SEQ_LEVEL_4_2 && seq_level_idx != SEQ_LEVEL_4_3
1876 #if !CONFIG_CWG_C013
1877 && seq_level_idx != SEQ_LEVEL_7_0 && seq_level_idx != SEQ_LEVEL_7_1 &&
1878 seq_level_idx != SEQ_LEVEL_7_2 && seq_level_idx != SEQ_LEVEL_7_3 &&
1879 seq_level_idx != SEQ_LEVEL_8_0 && seq_level_idx != SEQ_LEVEL_8_1 &&
1880 seq_level_idx != SEQ_LEVEL_8_2 && seq_level_idx != SEQ_LEVEL_8_3
1881 #endif
1882 );
1883 }
1884
1885 /*!\endcond */
1886
1887 #ifdef __cplusplus
1888 } // extern "C"
1889 #endif
1890
1891 #endif // AOM_AV1_COMMON_AV1_COMMON_INT_H_
1892