1 /*
2 * Copyright (c) 2016, Alliance for Open Media. All rights reserved.
3 *
4 * This source code is subject to the terms of the BSD 2 Clause License and
5 * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
6 * was not distributed with this source code in the LICENSE file, you can
7 * obtain it at www.aomedia.org/license/software. If the Alliance for Open
8 * Media Patent License 1.0 was not distributed with this source code in the
9 * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
10 */
11
12 #include "av1/common/av1_common_int.h"
13 #include "av1/common/cfl.h"
14 #include "av1/common/common_data.h"
15
16 #include "config/av1_rtcd.h"
17
cfl_init(CFL_CTX * cfl,const SequenceHeader * seq_params)18 void cfl_init(CFL_CTX *cfl, const SequenceHeader *seq_params) {
19 assert(block_size_wide[CFL_MAX_BLOCK_SIZE] == CFL_BUF_LINE);
20 assert(block_size_high[CFL_MAX_BLOCK_SIZE] == CFL_BUF_LINE);
21
22 memset(&cfl->recon_buf_q3, 0, sizeof(cfl->recon_buf_q3));
23 memset(&cfl->ac_buf_q3, 0, sizeof(cfl->ac_buf_q3));
24 cfl->subsampling_x = seq_params->subsampling_x;
25 cfl->subsampling_y = seq_params->subsampling_y;
26 cfl->are_parameters_computed = 0;
27 cfl->store_y = 0;
28 // The DC_PRED cache is disabled by default and is only enabled in
29 // cfl_rd_pick_alpha
30 clear_cfl_dc_pred_cache_flags(cfl);
31 }
32
cfl_store_dc_pred(MACROBLOCKD * const xd,const uint8_t * input,CFL_PRED_TYPE pred_plane,int width)33 void cfl_store_dc_pred(MACROBLOCKD *const xd, const uint8_t *input,
34 CFL_PRED_TYPE pred_plane, int width) {
35 assert(pred_plane < CFL_PRED_PLANES);
36 assert(width <= CFL_BUF_LINE);
37
38 if (is_cur_buf_hbd(xd)) {
39 uint16_t *const input_16 = CONVERT_TO_SHORTPTR(input);
40 memcpy(xd->cfl.dc_pred_cache[pred_plane], input_16, width << 1);
41 return;
42 }
43
44 memcpy(xd->cfl.dc_pred_cache[pred_plane], input, width);
45 }
46
cfl_load_dc_pred_lbd(const int16_t * dc_pred_cache,uint8_t * dst,int dst_stride,int width,int height)47 static void cfl_load_dc_pred_lbd(const int16_t *dc_pred_cache, uint8_t *dst,
48 int dst_stride, int width, int height) {
49 for (int j = 0; j < height; j++) {
50 memcpy(dst, dc_pred_cache, width);
51 dst += dst_stride;
52 }
53 }
54
cfl_load_dc_pred_hbd(const int16_t * dc_pred_cache,uint16_t * dst,int dst_stride,int width,int height)55 static void cfl_load_dc_pred_hbd(const int16_t *dc_pred_cache, uint16_t *dst,
56 int dst_stride, int width, int height) {
57 const size_t num_bytes = width << 1;
58 for (int j = 0; j < height; j++) {
59 memcpy(dst, dc_pred_cache, num_bytes);
60 dst += dst_stride;
61 }
62 }
cfl_load_dc_pred(MACROBLOCKD * const xd,uint8_t * dst,int dst_stride,TX_SIZE tx_size,CFL_PRED_TYPE pred_plane)63 void cfl_load_dc_pred(MACROBLOCKD *const xd, uint8_t *dst, int dst_stride,
64 TX_SIZE tx_size, CFL_PRED_TYPE pred_plane) {
65 const int width = tx_size_wide[tx_size];
66 const int height = tx_size_high[tx_size];
67 assert(pred_plane < CFL_PRED_PLANES);
68 assert(width <= CFL_BUF_LINE);
69 assert(height <= CFL_BUF_LINE);
70 if (is_cur_buf_hbd(xd)) {
71 uint16_t *dst_16 = CONVERT_TO_SHORTPTR(dst);
72 cfl_load_dc_pred_hbd(xd->cfl.dc_pred_cache[pred_plane], dst_16, dst_stride,
73 width, height);
74 return;
75 }
76 cfl_load_dc_pred_lbd(xd->cfl.dc_pred_cache[pred_plane], dst, dst_stride,
77 width, height);
78 }
79
80 // Due to frame boundary issues, it is possible that the total area covered by
81 // chroma exceeds that of luma. When this happens, we fill the missing pixels by
82 // repeating the last columns and/or rows.
cfl_pad(CFL_CTX * cfl,int width,int height)83 static inline void cfl_pad(CFL_CTX *cfl, int width, int height) {
84 const int diff_width = width - cfl->buf_width;
85 const int diff_height = height - cfl->buf_height;
86
87 if (diff_width > 0) {
88 const int min_height = height - diff_height;
89 uint16_t *recon_buf_q3 = cfl->recon_buf_q3 + (width - diff_width);
90 for (int j = 0; j < min_height; j++) {
91 const uint16_t last_pixel = recon_buf_q3[-1];
92 assert(recon_buf_q3 + diff_width <= cfl->recon_buf_q3 + CFL_BUF_SQUARE);
93 for (int i = 0; i < diff_width; i++) {
94 recon_buf_q3[i] = last_pixel;
95 }
96 recon_buf_q3 += CFL_BUF_LINE;
97 }
98 cfl->buf_width = width;
99 }
100 if (diff_height > 0) {
101 uint16_t *recon_buf_q3 =
102 cfl->recon_buf_q3 + ((height - diff_height) * CFL_BUF_LINE);
103 for (int j = 0; j < diff_height; j++) {
104 const uint16_t *last_row_q3 = recon_buf_q3 - CFL_BUF_LINE;
105 assert(recon_buf_q3 + width <= cfl->recon_buf_q3 + CFL_BUF_SQUARE);
106 for (int i = 0; i < width; i++) {
107 recon_buf_q3[i] = last_row_q3[i];
108 }
109 recon_buf_q3 += CFL_BUF_LINE;
110 }
111 cfl->buf_height = height;
112 }
113 }
114
subtract_average_c(const uint16_t * src,int16_t * dst,int width,int height,int round_offset,int num_pel_log2)115 static void subtract_average_c(const uint16_t *src, int16_t *dst, int width,
116 int height, int round_offset, int num_pel_log2) {
117 int sum = round_offset;
118 const uint16_t *recon = src;
119 for (int j = 0; j < height; j++) {
120 for (int i = 0; i < width; i++) {
121 sum += recon[i];
122 }
123 recon += CFL_BUF_LINE;
124 }
125 const int avg = sum >> num_pel_log2;
126 for (int j = 0; j < height; j++) {
127 for (int i = 0; i < width; i++) {
128 dst[i] = src[i] - avg;
129 }
130 src += CFL_BUF_LINE;
131 dst += CFL_BUF_LINE;
132 }
133 }
134
CFL_SUB_AVG_FN(c)135 CFL_SUB_AVG_FN(c)
136
137 static inline int cfl_idx_to_alpha(uint8_t alpha_idx, int8_t joint_sign,
138 CFL_PRED_TYPE pred_type) {
139 const int alpha_sign = (pred_type == CFL_PRED_U) ? CFL_SIGN_U(joint_sign)
140 : CFL_SIGN_V(joint_sign);
141 if (alpha_sign == CFL_SIGN_ZERO) return 0;
142 const int abs_alpha_q3 =
143 (pred_type == CFL_PRED_U) ? CFL_IDX_U(alpha_idx) : CFL_IDX_V(alpha_idx);
144 return (alpha_sign == CFL_SIGN_POS) ? abs_alpha_q3 + 1 : -abs_alpha_q3 - 1;
145 }
146
cfl_predict_lbd_c(const int16_t * ac_buf_q3,uint8_t * dst,int dst_stride,int alpha_q3,int width,int height)147 static inline void cfl_predict_lbd_c(const int16_t *ac_buf_q3, uint8_t *dst,
148 int dst_stride, int alpha_q3, int width,
149 int height) {
150 for (int j = 0; j < height; j++) {
151 for (int i = 0; i < width; i++) {
152 dst[i] = clip_pixel(get_scaled_luma_q0(alpha_q3, ac_buf_q3[i]) + dst[i]);
153 }
154 dst += dst_stride;
155 ac_buf_q3 += CFL_BUF_LINE;
156 }
157 }
158
CFL_PREDICT_FN(c,lbd)159 CFL_PREDICT_FN(c, lbd)
160
161 #if CONFIG_AV1_HIGHBITDEPTH
162 static inline void cfl_predict_hbd_c(const int16_t *ac_buf_q3, uint16_t *dst,
163 int dst_stride, int alpha_q3,
164 int bit_depth, int width, int height) {
165 for (int j = 0; j < height; j++) {
166 for (int i = 0; i < width; i++) {
167 dst[i] = clip_pixel_highbd(
168 get_scaled_luma_q0(alpha_q3, ac_buf_q3[i]) + dst[i], bit_depth);
169 }
170 dst += dst_stride;
171 ac_buf_q3 += CFL_BUF_LINE;
172 }
173 }
174
CFL_PREDICT_FN(c,hbd)175 CFL_PREDICT_FN(c, hbd)
176 #endif
177
178 static void cfl_compute_parameters(MACROBLOCKD *const xd, TX_SIZE tx_size) {
179 CFL_CTX *const cfl = &xd->cfl;
180 // Do not call cfl_compute_parameters multiple time on the same values.
181 assert(cfl->are_parameters_computed == 0);
182
183 cfl_pad(cfl, tx_size_wide[tx_size], tx_size_high[tx_size]);
184 cfl_get_subtract_average_fn(tx_size)(cfl->recon_buf_q3, cfl->ac_buf_q3);
185 cfl->are_parameters_computed = 1;
186 }
187
av1_cfl_predict_block(MACROBLOCKD * const xd,uint8_t * dst,int dst_stride,TX_SIZE tx_size,int plane)188 void av1_cfl_predict_block(MACROBLOCKD *const xd, uint8_t *dst, int dst_stride,
189 TX_SIZE tx_size, int plane) {
190 CFL_CTX *const cfl = &xd->cfl;
191 MB_MODE_INFO *mbmi = xd->mi[0];
192 assert(is_cfl_allowed(xd));
193
194 if (!cfl->are_parameters_computed) cfl_compute_parameters(xd, tx_size);
195
196 const int alpha_q3 =
197 cfl_idx_to_alpha(mbmi->cfl_alpha_idx, mbmi->cfl_alpha_signs, plane - 1);
198 assert((tx_size_high[tx_size] - 1) * CFL_BUF_LINE + tx_size_wide[tx_size] <=
199 CFL_BUF_SQUARE);
200 #if CONFIG_AV1_HIGHBITDEPTH
201 if (is_cur_buf_hbd(xd)) {
202 uint16_t *dst_16 = CONVERT_TO_SHORTPTR(dst);
203 cfl_get_predict_hbd_fn(tx_size)(cfl->ac_buf_q3, dst_16, dst_stride,
204 alpha_q3, xd->bd);
205 return;
206 }
207 #endif
208 cfl_get_predict_lbd_fn(tx_size)(cfl->ac_buf_q3, dst, dst_stride, alpha_q3);
209 }
210
cfl_luma_subsampling_420_lbd_c(const uint8_t * input,int input_stride,uint16_t * output_q3,int width,int height)211 static void cfl_luma_subsampling_420_lbd_c(const uint8_t *input,
212 int input_stride,
213 uint16_t *output_q3, int width,
214 int height) {
215 for (int j = 0; j < height; j += 2) {
216 for (int i = 0; i < width; i += 2) {
217 const int bot = i + input_stride;
218 output_q3[i >> 1] =
219 (input[i] + input[i + 1] + input[bot] + input[bot + 1]) << 1;
220 }
221 input += input_stride << 1;
222 output_q3 += CFL_BUF_LINE;
223 }
224 }
225
cfl_luma_subsampling_422_lbd_c(const uint8_t * input,int input_stride,uint16_t * output_q3,int width,int height)226 static void cfl_luma_subsampling_422_lbd_c(const uint8_t *input,
227 int input_stride,
228 uint16_t *output_q3, int width,
229 int height) {
230 assert((height - 1) * CFL_BUF_LINE + width <= CFL_BUF_SQUARE);
231 for (int j = 0; j < height; j++) {
232 for (int i = 0; i < width; i += 2) {
233 output_q3[i >> 1] = (input[i] + input[i + 1]) << 2;
234 }
235 input += input_stride;
236 output_q3 += CFL_BUF_LINE;
237 }
238 }
239
cfl_luma_subsampling_444_lbd_c(const uint8_t * input,int input_stride,uint16_t * output_q3,int width,int height)240 static void cfl_luma_subsampling_444_lbd_c(const uint8_t *input,
241 int input_stride,
242 uint16_t *output_q3, int width,
243 int height) {
244 assert((height - 1) * CFL_BUF_LINE + width <= CFL_BUF_SQUARE);
245 for (int j = 0; j < height; j++) {
246 for (int i = 0; i < width; i++) {
247 output_q3[i] = input[i] << 3;
248 }
249 input += input_stride;
250 output_q3 += CFL_BUF_LINE;
251 }
252 }
253
254 #if CONFIG_AV1_HIGHBITDEPTH
cfl_luma_subsampling_420_hbd_c(const uint16_t * input,int input_stride,uint16_t * output_q3,int width,int height)255 static void cfl_luma_subsampling_420_hbd_c(const uint16_t *input,
256 int input_stride,
257 uint16_t *output_q3, int width,
258 int height) {
259 for (int j = 0; j < height; j += 2) {
260 for (int i = 0; i < width; i += 2) {
261 const int bot = i + input_stride;
262 output_q3[i >> 1] =
263 (input[i] + input[i + 1] + input[bot] + input[bot + 1]) << 1;
264 }
265 input += input_stride << 1;
266 output_q3 += CFL_BUF_LINE;
267 }
268 }
269
cfl_luma_subsampling_422_hbd_c(const uint16_t * input,int input_stride,uint16_t * output_q3,int width,int height)270 static void cfl_luma_subsampling_422_hbd_c(const uint16_t *input,
271 int input_stride,
272 uint16_t *output_q3, int width,
273 int height) {
274 assert((height - 1) * CFL_BUF_LINE + width <= CFL_BUF_SQUARE);
275 for (int j = 0; j < height; j++) {
276 for (int i = 0; i < width; i += 2) {
277 output_q3[i >> 1] = (input[i] + input[i + 1]) << 2;
278 }
279 input += input_stride;
280 output_q3 += CFL_BUF_LINE;
281 }
282 }
283
cfl_luma_subsampling_444_hbd_c(const uint16_t * input,int input_stride,uint16_t * output_q3,int width,int height)284 static void cfl_luma_subsampling_444_hbd_c(const uint16_t *input,
285 int input_stride,
286 uint16_t *output_q3, int width,
287 int height) {
288 assert((height - 1) * CFL_BUF_LINE + width <= CFL_BUF_SQUARE);
289 for (int j = 0; j < height; j++) {
290 for (int i = 0; i < width; i++) {
291 output_q3[i] = input[i] << 3;
292 }
293 input += input_stride;
294 output_q3 += CFL_BUF_LINE;
295 }
296 }
297 #endif
298
CFL_GET_SUBSAMPLE_FUNCTION(c)299 CFL_GET_SUBSAMPLE_FUNCTION(c)
300
301 #if CONFIG_AV1_HIGHBITDEPTH
302 static inline cfl_subsample_hbd_fn cfl_subsampling_hbd(TX_SIZE tx_size,
303 int sub_x, int sub_y) {
304 if (sub_x == 1) {
305 if (sub_y == 1) {
306 return cfl_get_luma_subsampling_420_hbd(tx_size);
307 }
308 return cfl_get_luma_subsampling_422_hbd(tx_size);
309 }
310 return cfl_get_luma_subsampling_444_hbd(tx_size);
311 }
312 #endif
313
cfl_subsampling_lbd(TX_SIZE tx_size,int sub_x,int sub_y)314 static inline cfl_subsample_lbd_fn cfl_subsampling_lbd(TX_SIZE tx_size,
315 int sub_x, int sub_y) {
316 if (sub_x == 1) {
317 if (sub_y == 1) {
318 return cfl_get_luma_subsampling_420_lbd(tx_size);
319 }
320 return cfl_get_luma_subsampling_422_lbd(tx_size);
321 }
322 return cfl_get_luma_subsampling_444_lbd(tx_size);
323 }
324
cfl_store(CFL_CTX * cfl,const uint8_t * input,int input_stride,int row,int col,TX_SIZE tx_size,int use_hbd)325 static void cfl_store(CFL_CTX *cfl, const uint8_t *input, int input_stride,
326 int row, int col, TX_SIZE tx_size, int use_hbd) {
327 const int width = tx_size_wide[tx_size];
328 const int height = tx_size_high[tx_size];
329 const int tx_off_log2 = MI_SIZE_LOG2;
330 const int sub_x = cfl->subsampling_x;
331 const int sub_y = cfl->subsampling_y;
332 const int store_row = row << (tx_off_log2 - sub_y);
333 const int store_col = col << (tx_off_log2 - sub_x);
334 const int store_height = height >> sub_y;
335 const int store_width = width >> sub_x;
336
337 // Invalidate current parameters
338 cfl->are_parameters_computed = 0;
339
340 // Store the surface of the pixel buffer that was written to, this way we
341 // can manage chroma overrun (e.g. when the chroma surfaces goes beyond the
342 // frame boundary)
343 if (col == 0 && row == 0) {
344 cfl->buf_width = store_width;
345 cfl->buf_height = store_height;
346 } else {
347 cfl->buf_width = OD_MAXI(store_col + store_width, cfl->buf_width);
348 cfl->buf_height = OD_MAXI(store_row + store_height, cfl->buf_height);
349 }
350
351 // Check that we will remain inside the pixel buffer.
352 assert(store_row + store_height <= CFL_BUF_LINE);
353 assert(store_col + store_width <= CFL_BUF_LINE);
354
355 // Store the input into the CfL pixel buffer
356 uint16_t *recon_buf_q3 =
357 cfl->recon_buf_q3 + (store_row * CFL_BUF_LINE + store_col);
358 #if CONFIG_AV1_HIGHBITDEPTH
359 if (use_hbd) {
360 cfl_subsampling_hbd(tx_size, sub_x, sub_y)(CONVERT_TO_SHORTPTR(input),
361 input_stride, recon_buf_q3);
362 } else {
363 cfl_subsampling_lbd(tx_size, sub_x, sub_y)(input, input_stride,
364 recon_buf_q3);
365 }
366 #else
367 (void)use_hbd;
368 cfl_subsampling_lbd(tx_size, sub_x, sub_y)(input, input_stride, recon_buf_q3);
369 #endif
370 }
371
372 // Adjust the row and column of blocks smaller than 8X8, as chroma-referenced
373 // and non-chroma-referenced blocks are stored together in the CfL buffer.
sub8x8_adjust_offset(const CFL_CTX * cfl,int mi_row,int mi_col,int * row_out,int * col_out)374 static inline void sub8x8_adjust_offset(const CFL_CTX *cfl, int mi_row,
375 int mi_col, int *row_out,
376 int *col_out) {
377 // Increment row index for bottom: 8x4, 16x4 or both bottom 4x4s.
378 if ((mi_row & 0x01) && cfl->subsampling_y) {
379 assert(*row_out == 0);
380 (*row_out)++;
381 }
382
383 // Increment col index for right: 4x8, 4x16 or both right 4x4s.
384 if ((mi_col & 0x01) && cfl->subsampling_x) {
385 assert(*col_out == 0);
386 (*col_out)++;
387 }
388 }
389
cfl_store_tx(MACROBLOCKD * const xd,int row,int col,TX_SIZE tx_size,BLOCK_SIZE bsize)390 void cfl_store_tx(MACROBLOCKD *const xd, int row, int col, TX_SIZE tx_size,
391 BLOCK_SIZE bsize) {
392 CFL_CTX *const cfl = &xd->cfl;
393 struct macroblockd_plane *const pd = &xd->plane[AOM_PLANE_Y];
394 uint8_t *dst = &pd->dst.buf[(row * pd->dst.stride + col) << MI_SIZE_LOG2];
395
396 if (block_size_high[bsize] == 4 || block_size_wide[bsize] == 4) {
397 // Only dimensions of size 4 can have an odd offset.
398 assert(!((col & 1) && tx_size_wide[tx_size] != 4));
399 assert(!((row & 1) && tx_size_high[tx_size] != 4));
400 sub8x8_adjust_offset(cfl, xd->mi_row, xd->mi_col, &row, &col);
401 }
402 cfl_store(cfl, dst, pd->dst.stride, row, col, tx_size, is_cur_buf_hbd(xd));
403 }
404
max_intra_block_width(const MACROBLOCKD * xd,BLOCK_SIZE plane_bsize,int plane,TX_SIZE tx_size)405 static inline int max_intra_block_width(const MACROBLOCKD *xd,
406 BLOCK_SIZE plane_bsize, int plane,
407 TX_SIZE tx_size) {
408 const int max_blocks_wide = max_block_wide(xd, plane_bsize, plane)
409 << MI_SIZE_LOG2;
410 return ALIGN_POWER_OF_TWO(max_blocks_wide, tx_size_wide_log2[tx_size]);
411 }
412
max_intra_block_height(const MACROBLOCKD * xd,BLOCK_SIZE plane_bsize,int plane,TX_SIZE tx_size)413 static inline int max_intra_block_height(const MACROBLOCKD *xd,
414 BLOCK_SIZE plane_bsize, int plane,
415 TX_SIZE tx_size) {
416 const int max_blocks_high = max_block_high(xd, plane_bsize, plane)
417 << MI_SIZE_LOG2;
418 return ALIGN_POWER_OF_TWO(max_blocks_high, tx_size_high_log2[tx_size]);
419 }
420
cfl_store_block(MACROBLOCKD * const xd,BLOCK_SIZE bsize,TX_SIZE tx_size)421 void cfl_store_block(MACROBLOCKD *const xd, BLOCK_SIZE bsize, TX_SIZE tx_size) {
422 CFL_CTX *const cfl = &xd->cfl;
423 struct macroblockd_plane *const pd = &xd->plane[AOM_PLANE_Y];
424 int row = 0;
425 int col = 0;
426
427 if (block_size_high[bsize] == 4 || block_size_wide[bsize] == 4) {
428 sub8x8_adjust_offset(cfl, xd->mi_row, xd->mi_col, &row, &col);
429 }
430 const int width = max_intra_block_width(xd, bsize, AOM_PLANE_Y, tx_size);
431 const int height = max_intra_block_height(xd, bsize, AOM_PLANE_Y, tx_size);
432 tx_size = get_tx_size(width, height);
433 cfl_store(cfl, pd->dst.buf, pd->dst.stride, row, col, tx_size,
434 is_cur_buf_hbd(xd));
435 }
436