1 /*
2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
11 #include <assert.h>
12 #include <limits.h>
13 #include <stdio.h>
14
15 #include "./vp9_rtcd.h"
16 #include "./vpx_dsp_rtcd.h"
17 #include "./vpx_scale_rtcd.h"
18
19 #include "vpx_mem/vpx_mem.h"
20 #include "vpx_ports/system_state.h"
21 #include "vpx_ports/vpx_once.h"
22 #include "vpx_ports/vpx_timer.h"
23 #include "vpx_scale/vpx_scale.h"
24 #include "vpx_util/vpx_pthread.h"
25 #include "vpx_util/vpx_thread.h"
26
27 #include "vp9/common/vp9_alloccommon.h"
28 #include "vp9/common/vp9_loopfilter.h"
29 #include "vp9/common/vp9_onyxc_int.h"
30 #if CONFIG_VP9_POSTPROC
31 #include "vp9/common/vp9_postproc.h"
32 #endif
33 #include "vp9/common/vp9_quant_common.h"
34 #include "vp9/common/vp9_reconintra.h"
35
36 #include "vp9/decoder/vp9_decodeframe.h"
37 #include "vp9/decoder/vp9_decoder.h"
38 #include "vp9/decoder/vp9_detokenize.h"
39
initialize_dec(void)40 static void initialize_dec(void) {
41 static volatile int init_done = 0;
42
43 if (!init_done) {
44 vp9_rtcd();
45 vpx_dsp_rtcd();
46 vpx_scale_rtcd();
47 vp9_init_intra_predictors();
48 init_done = 1;
49 }
50 }
51
vp9_dec_setup_mi(VP9_COMMON * cm)52 static void vp9_dec_setup_mi(VP9_COMMON *cm) {
53 cm->mi = cm->mip + cm->mi_stride + 1;
54 cm->mi_grid_visible = cm->mi_grid_base + cm->mi_stride + 1;
55 memset(cm->mi_grid_base, 0,
56 cm->mi_stride * (cm->mi_rows + 1) * sizeof(*cm->mi_grid_base));
57 }
58
vp9_dec_alloc_row_mt_mem(RowMTWorkerData * row_mt_worker_data,VP9_COMMON * cm,int num_sbs,int max_threads,int num_jobs)59 void vp9_dec_alloc_row_mt_mem(RowMTWorkerData *row_mt_worker_data,
60 VP9_COMMON *cm, int num_sbs, int max_threads,
61 int num_jobs) {
62 int plane;
63 const size_t dqcoeff_size = (num_sbs << DQCOEFFS_PER_SB_LOG2) *
64 sizeof(*row_mt_worker_data->dqcoeff[0]);
65 row_mt_worker_data->num_jobs = num_jobs;
66 #if CONFIG_MULTITHREAD
67 {
68 int i;
69 CHECK_MEM_ERROR(
70 &cm->error, row_mt_worker_data->recon_sync_mutex,
71 vpx_malloc(sizeof(*row_mt_worker_data->recon_sync_mutex) * num_jobs));
72 if (row_mt_worker_data->recon_sync_mutex) {
73 for (i = 0; i < num_jobs; ++i) {
74 pthread_mutex_init(&row_mt_worker_data->recon_sync_mutex[i], NULL);
75 }
76 }
77
78 CHECK_MEM_ERROR(
79 &cm->error, row_mt_worker_data->recon_sync_cond,
80 vpx_malloc(sizeof(*row_mt_worker_data->recon_sync_cond) * num_jobs));
81 if (row_mt_worker_data->recon_sync_cond) {
82 for (i = 0; i < num_jobs; ++i) {
83 pthread_cond_init(&row_mt_worker_data->recon_sync_cond[i], NULL);
84 }
85 }
86 }
87 #endif
88 row_mt_worker_data->num_sbs = num_sbs;
89 for (plane = 0; plane < 3; ++plane) {
90 CHECK_MEM_ERROR(&cm->error, row_mt_worker_data->dqcoeff[plane],
91 vpx_memalign(32, dqcoeff_size));
92 memset(row_mt_worker_data->dqcoeff[plane], 0, dqcoeff_size);
93 CHECK_MEM_ERROR(&cm->error, row_mt_worker_data->eob[plane],
94 vpx_calloc(num_sbs << EOBS_PER_SB_LOG2,
95 sizeof(*row_mt_worker_data->eob[plane])));
96 }
97 CHECK_MEM_ERROR(&cm->error, row_mt_worker_data->partition,
98 vpx_calloc(num_sbs * PARTITIONS_PER_SB,
99 sizeof(*row_mt_worker_data->partition)));
100 CHECK_MEM_ERROR(&cm->error, row_mt_worker_data->recon_map,
101 vpx_calloc(num_sbs, sizeof(*row_mt_worker_data->recon_map)));
102
103 // allocate memory for thread_data
104 if (row_mt_worker_data->thread_data == NULL) {
105 const size_t thread_size =
106 max_threads * sizeof(*row_mt_worker_data->thread_data);
107 CHECK_MEM_ERROR(&cm->error, row_mt_worker_data->thread_data,
108 vpx_memalign(32, thread_size));
109 }
110 }
111
vp9_dec_free_row_mt_mem(RowMTWorkerData * row_mt_worker_data)112 void vp9_dec_free_row_mt_mem(RowMTWorkerData *row_mt_worker_data) {
113 if (row_mt_worker_data != NULL) {
114 int plane;
115 #if CONFIG_MULTITHREAD
116 int i;
117 if (row_mt_worker_data->recon_sync_mutex != NULL) {
118 for (i = 0; i < row_mt_worker_data->num_jobs; ++i) {
119 pthread_mutex_destroy(&row_mt_worker_data->recon_sync_mutex[i]);
120 }
121 vpx_free(row_mt_worker_data->recon_sync_mutex);
122 row_mt_worker_data->recon_sync_mutex = NULL;
123 }
124 if (row_mt_worker_data->recon_sync_cond != NULL) {
125 for (i = 0; i < row_mt_worker_data->num_jobs; ++i) {
126 pthread_cond_destroy(&row_mt_worker_data->recon_sync_cond[i]);
127 }
128 vpx_free(row_mt_worker_data->recon_sync_cond);
129 row_mt_worker_data->recon_sync_cond = NULL;
130 }
131 #endif
132 for (plane = 0; plane < 3; ++plane) {
133 vpx_free(row_mt_worker_data->eob[plane]);
134 row_mt_worker_data->eob[plane] = NULL;
135 vpx_free(row_mt_worker_data->dqcoeff[plane]);
136 row_mt_worker_data->dqcoeff[plane] = NULL;
137 }
138 vpx_free(row_mt_worker_data->partition);
139 row_mt_worker_data->partition = NULL;
140 vpx_free(row_mt_worker_data->recon_map);
141 row_mt_worker_data->recon_map = NULL;
142 vpx_free(row_mt_worker_data->thread_data);
143 row_mt_worker_data->thread_data = NULL;
144 }
145 }
146
vp9_dec_alloc_mi(VP9_COMMON * cm,int mi_size)147 static int vp9_dec_alloc_mi(VP9_COMMON *cm, int mi_size) {
148 cm->mip = vpx_calloc(mi_size, sizeof(*cm->mip));
149 if (!cm->mip) return 1;
150 cm->mi_alloc_size = mi_size;
151 cm->mi_grid_base = (MODE_INFO **)vpx_calloc(mi_size, sizeof(MODE_INFO *));
152 if (!cm->mi_grid_base) return 1;
153 return 0;
154 }
155
vp9_dec_free_mi(VP9_COMMON * cm)156 static void vp9_dec_free_mi(VP9_COMMON *cm) {
157 #if CONFIG_VP9_POSTPROC
158 // MFQE allocates an additional mip and swaps it with cm->mip.
159 vpx_free(cm->postproc_state.prev_mip);
160 cm->postproc_state.prev_mip = NULL;
161 #endif
162 vpx_free(cm->mip);
163 cm->mip = NULL;
164 vpx_free(cm->mi_grid_base);
165 cm->mi_grid_base = NULL;
166 cm->mi_alloc_size = 0;
167 }
168
vp9_decoder_create(BufferPool * const pool)169 VP9Decoder *vp9_decoder_create(BufferPool *const pool) {
170 VP9Decoder *volatile const pbi = vpx_memalign(32, sizeof(*pbi));
171 VP9_COMMON *volatile const cm = pbi ? &pbi->common : NULL;
172
173 if (!cm) return NULL;
174
175 vp9_zero(*pbi);
176
177 if (setjmp(cm->error.jmp)) {
178 cm->error.setjmp = 0;
179 vp9_decoder_remove(pbi);
180 return NULL;
181 }
182
183 cm->error.setjmp = 1;
184
185 CHECK_MEM_ERROR(&cm->error, cm->fc,
186 (FRAME_CONTEXT *)vpx_calloc(1, sizeof(*cm->fc)));
187 CHECK_MEM_ERROR(
188 &cm->error, cm->frame_contexts,
189 (FRAME_CONTEXT *)vpx_calloc(FRAME_CONTEXTS, sizeof(*cm->frame_contexts)));
190
191 pbi->need_resync = 1;
192 once(initialize_dec);
193
194 // Initialize the references to not point to any frame buffers.
195 memset(&cm->ref_frame_map, -1, sizeof(cm->ref_frame_map));
196 memset(&cm->next_ref_frame_map, -1, sizeof(cm->next_ref_frame_map));
197
198 init_frame_indexes(cm);
199 pbi->ready_for_new_data = 1;
200 pbi->common.buffer_pool = pool;
201
202 cm->bit_depth = VPX_BITS_8;
203 cm->dequant_bit_depth = VPX_BITS_8;
204
205 cm->alloc_mi = vp9_dec_alloc_mi;
206 cm->free_mi = vp9_dec_free_mi;
207 cm->setup_mi = vp9_dec_setup_mi;
208
209 vp9_loop_filter_init(cm);
210
211 cm->error.setjmp = 0;
212
213 vpx_get_worker_interface()->init(&pbi->lf_worker);
214 pbi->lf_worker.thread_name = "vpx lf worker";
215
216 return pbi;
217 }
218
vp9_decoder_remove(VP9Decoder * pbi)219 void vp9_decoder_remove(VP9Decoder *pbi) {
220 int i;
221
222 if (!pbi) return;
223
224 vpx_get_worker_interface()->end(&pbi->lf_worker);
225 vpx_free(pbi->lf_worker.data1);
226
227 for (i = 0; i < pbi->num_tile_workers; ++i) {
228 VPxWorker *const worker = &pbi->tile_workers[i];
229 vpx_get_worker_interface()->end(worker);
230 }
231
232 vpx_free(pbi->tile_worker_data);
233 vpx_free(pbi->tile_workers);
234
235 if (pbi->num_tile_workers > 0) {
236 vp9_loop_filter_dealloc(&pbi->lf_row_sync);
237 }
238
239 if (pbi->row_mt == 1) {
240 vp9_dec_free_row_mt_mem(pbi->row_mt_worker_data);
241 if (pbi->row_mt_worker_data != NULL) {
242 vp9_jobq_deinit(&pbi->row_mt_worker_data->jobq);
243 vpx_free(pbi->row_mt_worker_data->jobq_buf);
244 #if CONFIG_MULTITHREAD
245 pthread_mutex_destroy(&pbi->row_mt_worker_data->recon_done_mutex);
246 #endif
247 }
248 vpx_free(pbi->row_mt_worker_data);
249 }
250
251 vp9_remove_common(&pbi->common);
252 vpx_free(pbi);
253 }
254
equal_dimensions(const YV12_BUFFER_CONFIG * a,const YV12_BUFFER_CONFIG * b)255 static int equal_dimensions(const YV12_BUFFER_CONFIG *a,
256 const YV12_BUFFER_CONFIG *b) {
257 return a->y_height == b->y_height && a->y_width == b->y_width &&
258 a->uv_height == b->uv_height && a->uv_width == b->uv_width;
259 }
260
vp9_copy_reference_dec(VP9Decoder * pbi,VP9_REFFRAME ref_frame_flag,YV12_BUFFER_CONFIG * sd)261 vpx_codec_err_t vp9_copy_reference_dec(VP9Decoder *pbi,
262 VP9_REFFRAME ref_frame_flag,
263 YV12_BUFFER_CONFIG *sd) {
264 VP9_COMMON *cm = &pbi->common;
265
266 /* TODO(jkoleszar): The decoder doesn't have any real knowledge of what the
267 * encoder is using the frame buffers for. This is just a stub to keep the
268 * vpxenc --test-decode functionality working, and will be replaced in a
269 * later commit that adds VP9-specific controls for this functionality.
270 */
271 if (ref_frame_flag == VP9_LAST_FLAG) {
272 const YV12_BUFFER_CONFIG *const cfg = get_ref_frame(cm, 0);
273 if (cfg == NULL) {
274 vpx_internal_error(&cm->error, VPX_CODEC_ERROR,
275 "No 'last' reference frame");
276 return VPX_CODEC_ERROR;
277 }
278 if (!equal_dimensions(cfg, sd))
279 vpx_internal_error(&cm->error, VPX_CODEC_ERROR,
280 "Incorrect buffer dimensions");
281 else
282 vpx_yv12_copy_frame(cfg, sd);
283 } else {
284 vpx_internal_error(&cm->error, VPX_CODEC_ERROR, "Invalid reference frame");
285 }
286
287 return cm->error.error_code;
288 }
289
vp9_set_reference_dec(VP9_COMMON * cm,VP9_REFFRAME ref_frame_flag,YV12_BUFFER_CONFIG * sd)290 vpx_codec_err_t vp9_set_reference_dec(VP9_COMMON *cm,
291 VP9_REFFRAME ref_frame_flag,
292 YV12_BUFFER_CONFIG *sd) {
293 int idx;
294 YV12_BUFFER_CONFIG *ref_buf = NULL;
295
296 // TODO(jkoleszar): The decoder doesn't have any real knowledge of what the
297 // encoder is using the frame buffers for. This is just a stub to keep the
298 // vpxenc --test-decode functionality working, and will be replaced in a
299 // later commit that adds VP9-specific controls for this functionality.
300 // (Yunqing) The set_reference control depends on the following setting in
301 // encoder.
302 // cpi->lst_fb_idx = 0;
303 // cpi->gld_fb_idx = 1;
304 // cpi->alt_fb_idx = 2;
305 if (ref_frame_flag == VP9_LAST_FLAG) {
306 idx = cm->ref_frame_map[0];
307 } else if (ref_frame_flag == VP9_GOLD_FLAG) {
308 idx = cm->ref_frame_map[1];
309 } else if (ref_frame_flag == VP9_ALT_FLAG) {
310 idx = cm->ref_frame_map[2];
311 } else {
312 vpx_internal_error(&cm->error, VPX_CODEC_ERROR, "Invalid reference frame");
313 return cm->error.error_code;
314 }
315
316 if (idx < 0 || idx >= FRAME_BUFFERS) {
317 vpx_internal_error(&cm->error, VPX_CODEC_ERROR,
318 "Invalid reference frame map");
319 return cm->error.error_code;
320 }
321
322 // Get the destination reference buffer.
323 ref_buf = &cm->buffer_pool->frame_bufs[idx].buf;
324
325 if (!equal_dimensions(ref_buf, sd)) {
326 vpx_internal_error(&cm->error, VPX_CODEC_ERROR,
327 "Incorrect buffer dimensions");
328 } else {
329 // Overwrite the reference frame buffer.
330 vpx_yv12_copy_frame(sd, ref_buf);
331 }
332
333 return cm->error.error_code;
334 }
335
336 /* If any buffer updating is signaled it should be done here. */
swap_frame_buffers(VP9Decoder * pbi)337 static void swap_frame_buffers(VP9Decoder *pbi) {
338 int ref_index = 0, mask;
339 VP9_COMMON *const cm = &pbi->common;
340 BufferPool *const pool = cm->buffer_pool;
341 RefCntBuffer *const frame_bufs = cm->buffer_pool->frame_bufs;
342
343 for (mask = pbi->refresh_frame_flags; mask; mask >>= 1) {
344 const int old_idx = cm->ref_frame_map[ref_index];
345 // Current thread releases the holding of reference frame.
346 decrease_ref_count(old_idx, frame_bufs, pool);
347
348 // Release the reference frame in reference map.
349 if (mask & 1) {
350 decrease_ref_count(old_idx, frame_bufs, pool);
351 }
352 cm->ref_frame_map[ref_index] = cm->next_ref_frame_map[ref_index];
353 ++ref_index;
354 }
355
356 // Current thread releases the holding of reference frame.
357 for (; ref_index < REF_FRAMES && !cm->show_existing_frame; ++ref_index) {
358 const int old_idx = cm->ref_frame_map[ref_index];
359 decrease_ref_count(old_idx, frame_bufs, pool);
360 cm->ref_frame_map[ref_index] = cm->next_ref_frame_map[ref_index];
361 }
362 pbi->hold_ref_buf = 0;
363 cm->frame_to_show = get_frame_new_buffer(cm);
364
365 --frame_bufs[cm->new_fb_idx].ref_count;
366
367 // Invalidate these references until the next frame starts.
368 for (ref_index = 0; ref_index < 3; ref_index++)
369 cm->frame_refs[ref_index].idx = -1;
370 }
371
release_fb_on_decoder_exit(VP9Decoder * pbi)372 static void release_fb_on_decoder_exit(VP9Decoder *pbi) {
373 const VPxWorkerInterface *const winterface = vpx_get_worker_interface();
374 VP9_COMMON *volatile const cm = &pbi->common;
375 BufferPool *volatile const pool = cm->buffer_pool;
376 RefCntBuffer *volatile const frame_bufs = cm->buffer_pool->frame_bufs;
377 int i;
378
379 // Synchronize all threads immediately as a subsequent decode call may
380 // cause a resize invalidating some allocations.
381 winterface->sync(&pbi->lf_worker);
382 for (i = 0; i < pbi->num_tile_workers; ++i) {
383 winterface->sync(&pbi->tile_workers[i]);
384 }
385
386 // Release all the reference buffers if worker thread is holding them.
387 if (pbi->hold_ref_buf == 1) {
388 int ref_index = 0, mask;
389 for (mask = pbi->refresh_frame_flags; mask; mask >>= 1) {
390 const int old_idx = cm->ref_frame_map[ref_index];
391 // Current thread releases the holding of reference frame.
392 decrease_ref_count(old_idx, frame_bufs, pool);
393
394 // Release the reference frame in reference map.
395 if (mask & 1) {
396 decrease_ref_count(old_idx, frame_bufs, pool);
397 }
398 ++ref_index;
399 }
400
401 // Current thread releases the holding of reference frame.
402 for (; ref_index < REF_FRAMES && !cm->show_existing_frame; ++ref_index) {
403 const int old_idx = cm->ref_frame_map[ref_index];
404 decrease_ref_count(old_idx, frame_bufs, pool);
405 }
406 pbi->hold_ref_buf = 0;
407 }
408 }
409
vp9_receive_compressed_data(VP9Decoder * pbi,size_t size,const uint8_t ** psource)410 int vp9_receive_compressed_data(VP9Decoder *pbi, size_t size,
411 const uint8_t **psource) {
412 VP9_COMMON *volatile const cm = &pbi->common;
413 BufferPool *volatile const pool = cm->buffer_pool;
414 RefCntBuffer *volatile const frame_bufs = cm->buffer_pool->frame_bufs;
415 const uint8_t *source = *psource;
416 int retcode = 0;
417 cm->error.error_code = VPX_CODEC_OK;
418
419 if (size == 0) {
420 // This is used to signal that we are missing frames.
421 // We do not know if the missing frame(s) was supposed to update
422 // any of the reference buffers, but we act conservative and
423 // mark only the last buffer as corrupted.
424 //
425 // TODO(jkoleszar): Error concealment is undefined and non-normative
426 // at this point, but if it becomes so, [0] may not always be the correct
427 // thing to do here.
428 if (cm->frame_refs[0].idx > 0) {
429 assert(cm->frame_refs[0].buf != NULL);
430 cm->frame_refs[0].buf->corrupted = 1;
431 }
432 }
433
434 pbi->ready_for_new_data = 0;
435
436 // Check if the previous frame was a frame without any references to it.
437 if (cm->new_fb_idx >= 0 && frame_bufs[cm->new_fb_idx].ref_count == 0 &&
438 !frame_bufs[cm->new_fb_idx].released) {
439 pool->release_fb_cb(pool->cb_priv,
440 &frame_bufs[cm->new_fb_idx].raw_frame_buffer);
441 frame_bufs[cm->new_fb_idx].released = 1;
442 }
443
444 // Find a free frame buffer. Return error if can not find any.
445 cm->new_fb_idx = get_free_fb(cm);
446 if (cm->new_fb_idx == INVALID_IDX) {
447 pbi->ready_for_new_data = 1;
448 release_fb_on_decoder_exit(pbi);
449 vpx_clear_system_state();
450 vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
451 "Unable to find free frame buffer");
452 return cm->error.error_code;
453 }
454
455 // Assign a MV array to the frame buffer.
456 cm->cur_frame = &pool->frame_bufs[cm->new_fb_idx];
457
458 pbi->hold_ref_buf = 0;
459 pbi->cur_buf = &frame_bufs[cm->new_fb_idx];
460
461 if (setjmp(cm->error.jmp)) {
462 cm->error.setjmp = 0;
463 pbi->ready_for_new_data = 1;
464 release_fb_on_decoder_exit(pbi);
465 // Release current frame.
466 decrease_ref_count(cm->new_fb_idx, frame_bufs, pool);
467 vpx_clear_system_state();
468 return -1;
469 }
470
471 cm->error.setjmp = 1;
472 vp9_decode_frame(pbi, source, source + size, psource);
473
474 swap_frame_buffers(pbi);
475
476 vpx_clear_system_state();
477
478 if (!cm->show_existing_frame) {
479 cm->last_show_frame = cm->show_frame;
480 cm->prev_frame = cm->cur_frame;
481 if (cm->seg.enabled) vp9_swap_current_and_last_seg_map(cm);
482 }
483
484 if (cm->show_frame) cm->cur_show_frame_fb_idx = cm->new_fb_idx;
485
486 // Update progress in frame parallel decode.
487 cm->last_width = cm->width;
488 cm->last_height = cm->height;
489 if (cm->show_frame) {
490 cm->current_video_frame++;
491 }
492
493 cm->error.setjmp = 0;
494 return retcode;
495 }
496
vp9_get_raw_frame(VP9Decoder * pbi,YV12_BUFFER_CONFIG * sd,vp9_ppflags_t * flags)497 int vp9_get_raw_frame(VP9Decoder *pbi, YV12_BUFFER_CONFIG *sd,
498 vp9_ppflags_t *flags) {
499 VP9_COMMON *const cm = &pbi->common;
500 int ret = -1;
501 #if !CONFIG_VP9_POSTPROC
502 (void)*flags;
503 #endif
504
505 if (pbi->ready_for_new_data == 1) return ret;
506
507 pbi->ready_for_new_data = 1;
508
509 /* no raw frame to show!!! */
510 if (!cm->show_frame) return ret;
511
512 pbi->ready_for_new_data = 1;
513
514 #if CONFIG_VP9_POSTPROC
515 if (!cm->show_existing_frame) {
516 ret = vp9_post_proc_frame(cm, sd, flags, cm->width);
517 } else {
518 *sd = *cm->frame_to_show;
519 ret = 0;
520 }
521 #else
522 *sd = *cm->frame_to_show;
523 ret = 0;
524 #endif /*!CONFIG_POSTPROC*/
525 vpx_clear_system_state();
526 return ret;
527 }
528
vp9_parse_superframe_index(const uint8_t * data,size_t data_sz,uint32_t sizes[8],int * count,vpx_decrypt_cb decrypt_cb,void * decrypt_state)529 vpx_codec_err_t vp9_parse_superframe_index(const uint8_t *data, size_t data_sz,
530 uint32_t sizes[8], int *count,
531 vpx_decrypt_cb decrypt_cb,
532 void *decrypt_state) {
533 // A chunk ending with a byte matching 0xc0 is an invalid chunk unless
534 // it is a super frame index. If the last byte of real video compression
535 // data is 0xc0 the encoder must add a 0 byte. If we have the marker but
536 // not the associated matching marker byte at the front of the index we have
537 // an invalid bitstream and need to return an error.
538
539 uint8_t marker;
540
541 assert(data_sz);
542 marker = read_marker(decrypt_cb, decrypt_state, data + data_sz - 1);
543 *count = 0;
544
545 if ((marker & 0xe0) == 0xc0) {
546 const uint32_t frames = (marker & 0x7) + 1;
547 const uint32_t mag = ((marker >> 3) & 0x3) + 1;
548 const size_t index_sz = 2 + mag * frames;
549
550 // This chunk is marked as having a superframe index but doesn't have
551 // enough data for it, thus it's an invalid superframe index.
552 if (data_sz < index_sz) return VPX_CODEC_CORRUPT_FRAME;
553
554 {
555 const uint8_t marker2 =
556 read_marker(decrypt_cb, decrypt_state, data + data_sz - index_sz);
557
558 // This chunk is marked as having a superframe index but doesn't have
559 // the matching marker byte at the front of the index therefore it's an
560 // invalid chunk.
561 if (marker != marker2) return VPX_CODEC_CORRUPT_FRAME;
562 }
563
564 {
565 // Found a valid superframe index.
566 uint32_t i, j;
567 const uint8_t *x = &data[data_sz - index_sz + 1];
568
569 // Frames has a maximum of 8 and mag has a maximum of 4.
570 uint8_t clear_buffer[32];
571 assert(sizeof(clear_buffer) >= frames * mag);
572 if (decrypt_cb) {
573 decrypt_cb(decrypt_state, x, clear_buffer, frames * mag);
574 x = clear_buffer;
575 }
576
577 for (i = 0; i < frames; ++i) {
578 uint32_t this_sz = 0;
579
580 for (j = 0; j < mag; ++j) this_sz |= ((uint32_t)(*x++)) << (j * 8);
581 sizes[i] = this_sz;
582 }
583 *count = frames;
584 }
585 }
586 return VPX_CODEC_OK;
587 }
588