1 /*
2 * Copyright (c) 2014 The WebM project authors. All Rights Reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
11 #include "vp9/common/vp9_thread_common.h"
12 #include "vp9/encoder/vp9_bitstream.h"
13 #include "vp9/encoder/vp9_encodeframe.h"
14 #include "vp9/encoder/vp9_encoder.h"
15 #include "vp9/encoder/vp9_ethread.h"
16 #include "vp9/encoder/vp9_firstpass.h"
17 #include "vp9/encoder/vp9_multi_thread.h"
18 #include "vp9/encoder/vp9_temporal_filter.h"
19 #include "vpx_dsp/vpx_dsp_common.h"
20 #include "vpx_util/vpx_pthread.h"
21
accumulate_rd_opt(ThreadData * td,ThreadData * td_t)22 static void accumulate_rd_opt(ThreadData *td, ThreadData *td_t) {
23 int i, j, k, l, m, n;
24
25 for (i = 0; i < REFERENCE_MODES; i++)
26 td->rd_counts.comp_pred_diff[i] += td_t->rd_counts.comp_pred_diff[i];
27
28 for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++)
29 td->rd_counts.filter_diff[i] += td_t->rd_counts.filter_diff[i];
30
31 for (i = 0; i < TX_SIZES; i++)
32 for (j = 0; j < PLANE_TYPES; j++)
33 for (k = 0; k < REF_TYPES; k++)
34 for (l = 0; l < COEF_BANDS; l++)
35 for (m = 0; m < COEFF_CONTEXTS; m++)
36 for (n = 0; n < ENTROPY_TOKENS; n++)
37 td->rd_counts.coef_counts[i][j][k][l][m][n] +=
38 td_t->rd_counts.coef_counts[i][j][k][l][m][n];
39 }
40
enc_worker_hook(void * arg1,void * unused)41 static int enc_worker_hook(void *arg1, void *unused) {
42 EncWorkerData *const thread_data = (EncWorkerData *)arg1;
43 VP9_COMP *const cpi = thread_data->cpi;
44 const VP9_COMMON *const cm = &cpi->common;
45 const int tile_cols = 1 << cm->log2_tile_cols;
46 const int tile_rows = 1 << cm->log2_tile_rows;
47 int t;
48
49 (void)unused;
50
51 for (t = thread_data->start; t < tile_rows * tile_cols;
52 t += cpi->num_workers) {
53 int tile_row = t / tile_cols;
54 int tile_col = t % tile_cols;
55
56 vp9_encode_tile(cpi, thread_data->td, tile_row, tile_col);
57 }
58
59 return 1;
60 }
61
get_max_tile_cols(VP9_COMP * cpi)62 static int get_max_tile_cols(VP9_COMP *cpi) {
63 const int aligned_width = ALIGN_POWER_OF_TWO(cpi->oxcf.width, MI_SIZE_LOG2);
64 int mi_cols = aligned_width >> MI_SIZE_LOG2;
65 int min_log2_tile_cols, max_log2_tile_cols;
66 int log2_tile_cols;
67
68 vp9_get_tile_n_bits(mi_cols, &min_log2_tile_cols, &max_log2_tile_cols);
69 log2_tile_cols =
70 clamp(cpi->oxcf.tile_columns, min_log2_tile_cols, max_log2_tile_cols);
71 if (cpi->oxcf.target_level == LEVEL_AUTO) {
72 const int level_tile_cols =
73 log_tile_cols_from_picsize_level(cpi->common.width, cpi->common.height);
74 if (log2_tile_cols > level_tile_cols) {
75 log2_tile_cols = VPXMAX(level_tile_cols, min_log2_tile_cols);
76 }
77 }
78 return (1 << log2_tile_cols);
79 }
80
create_enc_workers(VP9_COMP * cpi,int num_workers)81 static void create_enc_workers(VP9_COMP *cpi, int num_workers) {
82 VP9_COMMON *const cm = &cpi->common;
83 const VPxWorkerInterface *const winterface = vpx_get_worker_interface();
84 int i;
85 // While using SVC, we need to allocate threads according to the highest
86 // resolution. When row based multithreading is enabled, it is OK to
87 // allocate more threads than the number of max tile columns.
88 if (cpi->use_svc && !cpi->row_mt) {
89 int max_tile_cols = get_max_tile_cols(cpi);
90 num_workers = VPXMIN(cpi->oxcf.max_threads, max_tile_cols);
91 }
92 assert(num_workers > 0);
93 if (num_workers == cpi->num_workers) return;
94 vp9_loop_filter_dealloc(&cpi->lf_row_sync);
95 vp9_bitstream_encode_tiles_buffer_dealloc(cpi);
96 vp9_encode_free_mt_data(cpi);
97
98 CHECK_MEM_ERROR(&cm->error, cpi->workers,
99 vpx_malloc(num_workers * sizeof(*cpi->workers)));
100
101 CHECK_MEM_ERROR(&cm->error, cpi->tile_thr_data,
102 vpx_calloc(num_workers, sizeof(*cpi->tile_thr_data)));
103
104 for (i = 0; i < num_workers; i++) {
105 VPxWorker *const worker = &cpi->workers[i];
106 EncWorkerData *thread_data = &cpi->tile_thr_data[i];
107
108 ++cpi->num_workers;
109 winterface->init(worker);
110 worker->thread_name = "vpx enc worker";
111
112 if (i < num_workers - 1) {
113 thread_data->cpi = cpi;
114
115 // Allocate thread data.
116 CHECK_MEM_ERROR(&cm->error, thread_data->td,
117 vpx_memalign(32, sizeof(*thread_data->td)));
118 vp9_zero(*thread_data->td);
119
120 // Set up pc_tree.
121 thread_data->td->leaf_tree = NULL;
122 thread_data->td->pc_tree = NULL;
123 vp9_setup_pc_tree(cm, thread_data->td);
124
125 // Allocate frame counters in thread data.
126 CHECK_MEM_ERROR(&cm->error, thread_data->td->counts,
127 vpx_calloc(1, sizeof(*thread_data->td->counts)));
128
129 // Create threads
130 if (!winterface->reset(worker))
131 vpx_internal_error(&cm->error, VPX_CODEC_ERROR,
132 "Tile encoder thread creation failed");
133 } else {
134 // Main thread acts as a worker and uses the thread data in cpi.
135 thread_data->cpi = cpi;
136 thread_data->td = &cpi->td;
137 }
138 winterface->sync(worker);
139 }
140 }
141
launch_enc_workers(VP9_COMP * cpi,VPxWorkerHook hook,void * data2,int num_workers)142 static void launch_enc_workers(VP9_COMP *cpi, VPxWorkerHook hook, void *data2,
143 int num_workers) {
144 const VPxWorkerInterface *const winterface = vpx_get_worker_interface();
145 int i;
146
147 for (i = 0; i < num_workers; i++) {
148 VPxWorker *const worker = &cpi->workers[i];
149 worker->hook = hook;
150 worker->data1 = &cpi->tile_thr_data[i];
151 worker->data2 = data2;
152 }
153
154 // Encode a frame
155 for (i = 0; i < num_workers; i++) {
156 VPxWorker *const worker = &cpi->workers[i];
157 EncWorkerData *const thread_data = (EncWorkerData *)worker->data1;
158
159 // Set the starting tile for each thread.
160 thread_data->start = i;
161
162 if (i == cpi->num_workers - 1)
163 winterface->execute(worker);
164 else
165 winterface->launch(worker);
166 }
167
168 // Encoding ends.
169 for (i = 0; i < num_workers; i++) {
170 VPxWorker *const worker = &cpi->workers[i];
171 winterface->sync(worker);
172 }
173 }
174
vp9_encode_free_mt_data(struct VP9_COMP * cpi)175 void vp9_encode_free_mt_data(struct VP9_COMP *cpi) {
176 int t;
177 for (t = 0; t < cpi->num_workers; ++t) {
178 VPxWorker *const worker = &cpi->workers[t];
179 EncWorkerData *const thread_data = &cpi->tile_thr_data[t];
180
181 // Deallocate allocated threads.
182 vpx_get_worker_interface()->end(worker);
183
184 // Deallocate allocated thread data.
185 if (t < cpi->num_workers - 1) {
186 vpx_free(thread_data->td->counts);
187 vp9_free_pc_tree(thread_data->td);
188 vpx_free(thread_data->td);
189 }
190 }
191 vpx_free(cpi->tile_thr_data);
192 cpi->tile_thr_data = NULL;
193 vpx_free(cpi->workers);
194 cpi->workers = NULL;
195 cpi->num_workers = 0;
196 }
197
vp9_encode_tiles_mt(VP9_COMP * cpi)198 void vp9_encode_tiles_mt(VP9_COMP *cpi) {
199 VP9_COMMON *const cm = &cpi->common;
200 const int tile_cols = 1 << cm->log2_tile_cols;
201 const int num_workers = VPXMIN(cpi->oxcf.max_threads, tile_cols);
202 int i;
203
204 vp9_init_tile_data(cpi);
205
206 create_enc_workers(cpi, num_workers);
207
208 for (i = 0; i < num_workers; i++) {
209 EncWorkerData *const thread_data = &cpi->tile_thr_data[i];
210
211 // Before encoding a frame, copy the thread data from cpi.
212 if (thread_data->td != &cpi->td) {
213 thread_data->td->mb = cpi->td.mb;
214 thread_data->td->rd_counts = cpi->td.rd_counts;
215 }
216 if (thread_data->td->counts != &cpi->common.counts) {
217 memcpy(thread_data->td->counts, &cpi->common.counts,
218 sizeof(cpi->common.counts));
219 }
220
221 // Handle use_nonrd_pick_mode case.
222 if (cpi->sf.use_nonrd_pick_mode) {
223 MACROBLOCK *const x = &thread_data->td->mb;
224 MACROBLOCKD *const xd = &x->e_mbd;
225 struct macroblock_plane *const p = x->plane;
226 struct macroblockd_plane *const pd = xd->plane;
227 PICK_MODE_CONTEXT *ctx = &thread_data->td->pc_root->none;
228 int j;
229
230 for (j = 0; j < MAX_MB_PLANE; ++j) {
231 p[j].coeff = ctx->coeff_pbuf[j][0];
232 p[j].qcoeff = ctx->qcoeff_pbuf[j][0];
233 pd[j].dqcoeff = ctx->dqcoeff_pbuf[j][0];
234 p[j].eobs = ctx->eobs_pbuf[j][0];
235 }
236 }
237 }
238
239 launch_enc_workers(cpi, enc_worker_hook, NULL, num_workers);
240
241 for (i = 0; i < num_workers; i++) {
242 VPxWorker *const worker = &cpi->workers[i];
243 EncWorkerData *const thread_data = (EncWorkerData *)worker->data1;
244
245 // Accumulate counters.
246 if (i < cpi->num_workers - 1) {
247 vp9_accumulate_frame_counts(&cm->counts, thread_data->td->counts, 0);
248 accumulate_rd_opt(&cpi->td, thread_data->td);
249 }
250 }
251 }
252
253 #if !CONFIG_REALTIME_ONLY
accumulate_fp_tile_stat(TileDataEnc * tile_data,TileDataEnc * tile_data_t)254 static void accumulate_fp_tile_stat(TileDataEnc *tile_data,
255 TileDataEnc *tile_data_t) {
256 tile_data->fp_data.intra_factor += tile_data_t->fp_data.intra_factor;
257 tile_data->fp_data.brightness_factor +=
258 tile_data_t->fp_data.brightness_factor;
259 tile_data->fp_data.coded_error += tile_data_t->fp_data.coded_error;
260 tile_data->fp_data.sr_coded_error += tile_data_t->fp_data.sr_coded_error;
261 tile_data->fp_data.frame_noise_energy +=
262 tile_data_t->fp_data.frame_noise_energy;
263 tile_data->fp_data.intra_error += tile_data_t->fp_data.intra_error;
264 tile_data->fp_data.intercount += tile_data_t->fp_data.intercount;
265 tile_data->fp_data.second_ref_count += tile_data_t->fp_data.second_ref_count;
266 tile_data->fp_data.neutral_count += tile_data_t->fp_data.neutral_count;
267 tile_data->fp_data.intra_count_low += tile_data_t->fp_data.intra_count_low;
268 tile_data->fp_data.intra_count_high += tile_data_t->fp_data.intra_count_high;
269 tile_data->fp_data.intra_skip_count += tile_data_t->fp_data.intra_skip_count;
270 tile_data->fp_data.mvcount += tile_data_t->fp_data.mvcount;
271 tile_data->fp_data.new_mv_count += tile_data_t->fp_data.new_mv_count;
272 tile_data->fp_data.sum_mvr += tile_data_t->fp_data.sum_mvr;
273 tile_data->fp_data.sum_mvr_abs += tile_data_t->fp_data.sum_mvr_abs;
274 tile_data->fp_data.sum_mvc += tile_data_t->fp_data.sum_mvc;
275 tile_data->fp_data.sum_mvc_abs += tile_data_t->fp_data.sum_mvc_abs;
276 tile_data->fp_data.sum_mvrs += tile_data_t->fp_data.sum_mvrs;
277 tile_data->fp_data.sum_mvcs += tile_data_t->fp_data.sum_mvcs;
278 tile_data->fp_data.sum_in_vectors += tile_data_t->fp_data.sum_in_vectors;
279 tile_data->fp_data.intra_smooth_count +=
280 tile_data_t->fp_data.intra_smooth_count;
281 tile_data->fp_data.image_data_start_row =
282 VPXMIN(tile_data->fp_data.image_data_start_row,
283 tile_data_t->fp_data.image_data_start_row) == INVALID_ROW
284 ? VPXMAX(tile_data->fp_data.image_data_start_row,
285 tile_data_t->fp_data.image_data_start_row)
286 : VPXMIN(tile_data->fp_data.image_data_start_row,
287 tile_data_t->fp_data.image_data_start_row);
288 }
289 #endif // !CONFIG_REALTIME_ONLY
290
291 // Allocate memory for row synchronization
vp9_row_mt_sync_mem_alloc(VP9RowMTSync * row_mt_sync,VP9_COMMON * cm,int rows)292 void vp9_row_mt_sync_mem_alloc(VP9RowMTSync *row_mt_sync, VP9_COMMON *cm,
293 int rows) {
294 row_mt_sync->rows = rows;
295 #if CONFIG_MULTITHREAD
296 {
297 int i;
298
299 CHECK_MEM_ERROR(&cm->error, row_mt_sync->mutex,
300 vpx_malloc(sizeof(*row_mt_sync->mutex) * rows));
301 if (row_mt_sync->mutex) {
302 for (i = 0; i < rows; ++i) {
303 pthread_mutex_init(&row_mt_sync->mutex[i], NULL);
304 }
305 }
306
307 CHECK_MEM_ERROR(&cm->error, row_mt_sync->cond,
308 vpx_malloc(sizeof(*row_mt_sync->cond) * rows));
309 if (row_mt_sync->cond) {
310 for (i = 0; i < rows; ++i) {
311 pthread_cond_init(&row_mt_sync->cond[i], NULL);
312 }
313 }
314 }
315 #endif // CONFIG_MULTITHREAD
316
317 CHECK_MEM_ERROR(&cm->error, row_mt_sync->cur_col,
318 vpx_malloc(sizeof(*row_mt_sync->cur_col) * rows));
319
320 // Set up nsync.
321 row_mt_sync->sync_range = 1;
322 }
323
324 // Deallocate row based multi-threading synchronization related mutex and data
vp9_row_mt_sync_mem_dealloc(VP9RowMTSync * row_mt_sync)325 void vp9_row_mt_sync_mem_dealloc(VP9RowMTSync *row_mt_sync) {
326 if (row_mt_sync != NULL) {
327 #if CONFIG_MULTITHREAD
328 int i;
329
330 if (row_mt_sync->mutex != NULL) {
331 for (i = 0; i < row_mt_sync->rows; ++i) {
332 pthread_mutex_destroy(&row_mt_sync->mutex[i]);
333 }
334 vpx_free(row_mt_sync->mutex);
335 }
336 if (row_mt_sync->cond != NULL) {
337 for (i = 0; i < row_mt_sync->rows; ++i) {
338 pthread_cond_destroy(&row_mt_sync->cond[i]);
339 }
340 vpx_free(row_mt_sync->cond);
341 }
342 #endif // CONFIG_MULTITHREAD
343 vpx_free(row_mt_sync->cur_col);
344 // clear the structure as the source of this call may be dynamic change
345 // in tiles in which case this call will be followed by an _alloc()
346 // which may fail.
347 vp9_zero(*row_mt_sync);
348 }
349 }
350
vp9_row_mt_sync_read(VP9RowMTSync * const row_mt_sync,int r,int c)351 void vp9_row_mt_sync_read(VP9RowMTSync *const row_mt_sync, int r, int c) {
352 #if CONFIG_MULTITHREAD
353 const int nsync = row_mt_sync->sync_range;
354
355 if (r && !(c & (nsync - 1))) {
356 pthread_mutex_t *const mutex = &row_mt_sync->mutex[r - 1];
357 pthread_mutex_lock(mutex);
358
359 while (c > row_mt_sync->cur_col[r - 1] - nsync + 1) {
360 pthread_cond_wait(&row_mt_sync->cond[r - 1], mutex);
361 }
362 pthread_mutex_unlock(mutex);
363 }
364 #else
365 (void)row_mt_sync;
366 (void)r;
367 (void)c;
368 #endif // CONFIG_MULTITHREAD
369 }
370
vp9_row_mt_sync_read_dummy(VP9RowMTSync * const row_mt_sync,int r,int c)371 void vp9_row_mt_sync_read_dummy(VP9RowMTSync *const row_mt_sync, int r, int c) {
372 (void)row_mt_sync;
373 (void)r;
374 (void)c;
375 return;
376 }
377
vp9_row_mt_sync_write(VP9RowMTSync * const row_mt_sync,int r,int c,const int cols)378 void vp9_row_mt_sync_write(VP9RowMTSync *const row_mt_sync, int r, int c,
379 const int cols) {
380 #if CONFIG_MULTITHREAD
381 const int nsync = row_mt_sync->sync_range;
382 int cur;
383 // Only signal when there are enough encoded blocks for next row to run.
384 int sig = 1;
385
386 if (c < cols - 1) {
387 cur = c;
388 if (c % nsync != nsync - 1) sig = 0;
389 } else {
390 cur = cols + nsync;
391 }
392
393 if (sig) {
394 pthread_mutex_lock(&row_mt_sync->mutex[r]);
395
396 row_mt_sync->cur_col[r] = cur;
397
398 pthread_cond_signal(&row_mt_sync->cond[r]);
399 pthread_mutex_unlock(&row_mt_sync->mutex[r]);
400 }
401 #else
402 (void)row_mt_sync;
403 (void)r;
404 (void)c;
405 (void)cols;
406 #endif // CONFIG_MULTITHREAD
407 }
408
vp9_row_mt_sync_write_dummy(VP9RowMTSync * const row_mt_sync,int r,int c,const int cols)409 void vp9_row_mt_sync_write_dummy(VP9RowMTSync *const row_mt_sync, int r, int c,
410 const int cols) {
411 (void)row_mt_sync;
412 (void)r;
413 (void)c;
414 (void)cols;
415 return;
416 }
417
418 #if !CONFIG_REALTIME_ONLY
first_pass_worker_hook(void * arg1,void * arg2)419 static int first_pass_worker_hook(void *arg1, void *arg2) {
420 EncWorkerData *const thread_data = (EncWorkerData *)arg1;
421 MultiThreadHandle *multi_thread_ctxt = (MultiThreadHandle *)arg2;
422 VP9_COMP *const cpi = thread_data->cpi;
423 const VP9_COMMON *const cm = &cpi->common;
424 const int tile_cols = 1 << cm->log2_tile_cols;
425 int tile_row, tile_col;
426 TileDataEnc *this_tile;
427 int end_of_frame;
428 int thread_id = thread_data->thread_id;
429 int cur_tile_id = multi_thread_ctxt->thread_id_to_tile_id[thread_id];
430 JobNode *proc_job = NULL;
431 FIRSTPASS_DATA fp_acc_data;
432 MV zero_mv = { 0, 0 };
433 MV best_ref_mv;
434 int mb_row;
435
436 end_of_frame = 0;
437 while (0 == end_of_frame) {
438 // Get the next job in the queue
439 proc_job =
440 (JobNode *)vp9_enc_grp_get_next_job(multi_thread_ctxt, cur_tile_id);
441 if (NULL == proc_job) {
442 // Query for the status of other tiles
443 end_of_frame = vp9_get_tiles_proc_status(
444 multi_thread_ctxt, thread_data->tile_completion_status, &cur_tile_id,
445 tile_cols);
446 } else {
447 tile_col = proc_job->tile_col_id;
448 tile_row = proc_job->tile_row_id;
449
450 this_tile = &cpi->tile_data[tile_row * tile_cols + tile_col];
451 mb_row = proc_job->vert_unit_row_num;
452
453 best_ref_mv = zero_mv;
454 vp9_zero(fp_acc_data);
455 fp_acc_data.image_data_start_row = INVALID_ROW;
456 vp9_first_pass_encode_tile_mb_row(cpi, thread_data->td, &fp_acc_data,
457 this_tile, &best_ref_mv, mb_row);
458 }
459 }
460 return 1;
461 }
462
vp9_encode_fp_row_mt(VP9_COMP * cpi)463 void vp9_encode_fp_row_mt(VP9_COMP *cpi) {
464 VP9_COMMON *const cm = &cpi->common;
465 const int tile_cols = 1 << cm->log2_tile_cols;
466 const int tile_rows = 1 << cm->log2_tile_rows;
467 MultiThreadHandle *multi_thread_ctxt = &cpi->multi_thread_ctxt;
468 TileDataEnc *first_tile_col;
469 int num_workers = VPXMAX(cpi->oxcf.max_threads, 1);
470 int i;
471
472 if (multi_thread_ctxt->allocated_tile_cols < tile_cols ||
473 multi_thread_ctxt->allocated_tile_rows < tile_rows ||
474 multi_thread_ctxt->allocated_vert_unit_rows < cm->mb_rows) {
475 vp9_row_mt_mem_dealloc(cpi);
476 vp9_init_tile_data(cpi);
477 vp9_row_mt_mem_alloc(cpi);
478 } else {
479 vp9_init_tile_data(cpi);
480 }
481
482 create_enc_workers(cpi, num_workers);
483
484 vp9_assign_tile_to_thread(multi_thread_ctxt, tile_cols, cpi->num_workers);
485
486 vp9_prepare_job_queue(cpi, FIRST_PASS_JOB);
487
488 vp9_multi_thread_tile_init(cpi);
489
490 for (i = 0; i < num_workers; i++) {
491 EncWorkerData *thread_data;
492 thread_data = &cpi->tile_thr_data[i];
493
494 // Before encoding a frame, copy the thread data from cpi.
495 if (thread_data->td != &cpi->td) {
496 thread_data->td->mb = cpi->td.mb;
497 }
498 }
499
500 launch_enc_workers(cpi, first_pass_worker_hook, multi_thread_ctxt,
501 num_workers);
502
503 first_tile_col = &cpi->tile_data[0];
504 for (i = 1; i < tile_cols; i++) {
505 TileDataEnc *this_tile = &cpi->tile_data[i];
506 accumulate_fp_tile_stat(first_tile_col, this_tile);
507 }
508 }
509
temporal_filter_worker_hook(void * arg1,void * arg2)510 static int temporal_filter_worker_hook(void *arg1, void *arg2) {
511 EncWorkerData *const thread_data = (EncWorkerData *)arg1;
512 MultiThreadHandle *multi_thread_ctxt = (MultiThreadHandle *)arg2;
513 VP9_COMP *const cpi = thread_data->cpi;
514 const VP9_COMMON *const cm = &cpi->common;
515 const int tile_cols = 1 << cm->log2_tile_cols;
516 int tile_row, tile_col;
517 int mb_col_start, mb_col_end;
518 TileDataEnc *this_tile;
519 int end_of_frame;
520 int thread_id = thread_data->thread_id;
521 int cur_tile_id = multi_thread_ctxt->thread_id_to_tile_id[thread_id];
522 JobNode *proc_job = NULL;
523 int mb_row;
524
525 end_of_frame = 0;
526 while (0 == end_of_frame) {
527 // Get the next job in the queue
528 proc_job =
529 (JobNode *)vp9_enc_grp_get_next_job(multi_thread_ctxt, cur_tile_id);
530 if (NULL == proc_job) {
531 // Query for the status of other tiles
532 end_of_frame = vp9_get_tiles_proc_status(
533 multi_thread_ctxt, thread_data->tile_completion_status, &cur_tile_id,
534 tile_cols);
535 } else {
536 tile_col = proc_job->tile_col_id;
537 tile_row = proc_job->tile_row_id;
538 this_tile = &cpi->tile_data[tile_row * tile_cols + tile_col];
539 mb_col_start = (this_tile->tile_info.mi_col_start) >> TF_SHIFT;
540 mb_col_end = (this_tile->tile_info.mi_col_end + TF_ROUND) >> TF_SHIFT;
541 mb_row = proc_job->vert_unit_row_num;
542
543 vp9_temporal_filter_iterate_row_c(cpi, thread_data->td, mb_row,
544 mb_col_start, mb_col_end);
545 }
546 }
547 return 1;
548 }
549
vp9_temporal_filter_row_mt(VP9_COMP * cpi)550 void vp9_temporal_filter_row_mt(VP9_COMP *cpi) {
551 VP9_COMMON *const cm = &cpi->common;
552 const int tile_cols = 1 << cm->log2_tile_cols;
553 const int tile_rows = 1 << cm->log2_tile_rows;
554 MultiThreadHandle *multi_thread_ctxt = &cpi->multi_thread_ctxt;
555 int num_workers = cpi->num_workers ? cpi->num_workers : 1;
556 int i;
557
558 if (multi_thread_ctxt->allocated_tile_cols < tile_cols ||
559 multi_thread_ctxt->allocated_tile_rows < tile_rows ||
560 multi_thread_ctxt->allocated_vert_unit_rows < cm->mb_rows) {
561 vp9_row_mt_mem_dealloc(cpi);
562 vp9_init_tile_data(cpi);
563 vp9_row_mt_mem_alloc(cpi);
564 } else {
565 vp9_init_tile_data(cpi);
566 }
567
568 create_enc_workers(cpi, num_workers);
569
570 vp9_assign_tile_to_thread(multi_thread_ctxt, tile_cols, cpi->num_workers);
571
572 vp9_prepare_job_queue(cpi, ARNR_JOB);
573
574 for (i = 0; i < num_workers; i++) {
575 EncWorkerData *thread_data;
576 thread_data = &cpi->tile_thr_data[i];
577
578 // Before encoding a frame, copy the thread data from cpi.
579 if (thread_data->td != &cpi->td) {
580 thread_data->td->mb = cpi->td.mb;
581 }
582 }
583
584 launch_enc_workers(cpi, temporal_filter_worker_hook, multi_thread_ctxt,
585 num_workers);
586 }
587 #endif // !CONFIG_REALTIME_ONLY
588
enc_row_mt_worker_hook(void * arg1,void * arg2)589 static int enc_row_mt_worker_hook(void *arg1, void *arg2) {
590 EncWorkerData *const thread_data = (EncWorkerData *)arg1;
591 MultiThreadHandle *multi_thread_ctxt = (MultiThreadHandle *)arg2;
592 VP9_COMP *const cpi = thread_data->cpi;
593 const VP9_COMMON *const cm = &cpi->common;
594 const int tile_cols = 1 << cm->log2_tile_cols;
595 int tile_row, tile_col;
596 int end_of_frame;
597 int thread_id = thread_data->thread_id;
598 int cur_tile_id = multi_thread_ctxt->thread_id_to_tile_id[thread_id];
599 JobNode *proc_job = NULL;
600 int mi_row;
601
602 end_of_frame = 0;
603 while (0 == end_of_frame) {
604 // Get the next job in the queue
605 proc_job =
606 (JobNode *)vp9_enc_grp_get_next_job(multi_thread_ctxt, cur_tile_id);
607 if (NULL == proc_job) {
608 // Query for the status of other tiles
609 end_of_frame = vp9_get_tiles_proc_status(
610 multi_thread_ctxt, thread_data->tile_completion_status, &cur_tile_id,
611 tile_cols);
612 } else {
613 tile_col = proc_job->tile_col_id;
614 tile_row = proc_job->tile_row_id;
615 mi_row = proc_job->vert_unit_row_num * MI_BLOCK_SIZE;
616
617 vp9_encode_sb_row(cpi, thread_data->td, tile_row, tile_col, mi_row);
618 }
619 }
620 return 1;
621 }
622
vp9_encode_tiles_row_mt(VP9_COMP * cpi)623 void vp9_encode_tiles_row_mt(VP9_COMP *cpi) {
624 VP9_COMMON *const cm = &cpi->common;
625 const int tile_cols = 1 << cm->log2_tile_cols;
626 const int tile_rows = 1 << cm->log2_tile_rows;
627 MultiThreadHandle *multi_thread_ctxt = &cpi->multi_thread_ctxt;
628 int num_workers = VPXMAX(cpi->oxcf.max_threads, 1);
629 int i;
630
631 if (multi_thread_ctxt->allocated_tile_cols < tile_cols ||
632 multi_thread_ctxt->allocated_tile_rows < tile_rows ||
633 multi_thread_ctxt->allocated_vert_unit_rows < cm->mb_rows) {
634 vp9_row_mt_mem_dealloc(cpi);
635 vp9_init_tile_data(cpi);
636 vp9_row_mt_mem_alloc(cpi);
637 } else {
638 vp9_init_tile_data(cpi);
639 }
640
641 create_enc_workers(cpi, num_workers);
642
643 vp9_assign_tile_to_thread(multi_thread_ctxt, tile_cols, cpi->num_workers);
644
645 vp9_prepare_job_queue(cpi, ENCODE_JOB);
646
647 vp9_multi_thread_tile_init(cpi);
648
649 for (i = 0; i < num_workers; i++) {
650 EncWorkerData *thread_data;
651 thread_data = &cpi->tile_thr_data[i];
652 // Before encoding a frame, copy the thread data from cpi.
653 if (thread_data->td != &cpi->td) {
654 thread_data->td->mb = cpi->td.mb;
655 thread_data->td->rd_counts = cpi->td.rd_counts;
656 }
657 if (thread_data->td->counts != &cpi->common.counts) {
658 memcpy(thread_data->td->counts, &cpi->common.counts,
659 sizeof(cpi->common.counts));
660 }
661
662 // Handle use_nonrd_pick_mode case.
663 if (cpi->sf.use_nonrd_pick_mode) {
664 MACROBLOCK *const x = &thread_data->td->mb;
665 MACROBLOCKD *const xd = &x->e_mbd;
666 struct macroblock_plane *const p = x->plane;
667 struct macroblockd_plane *const pd = xd->plane;
668 PICK_MODE_CONTEXT *ctx = &thread_data->td->pc_root->none;
669 int j;
670
671 for (j = 0; j < MAX_MB_PLANE; ++j) {
672 p[j].coeff = ctx->coeff_pbuf[j][0];
673 p[j].qcoeff = ctx->qcoeff_pbuf[j][0];
674 pd[j].dqcoeff = ctx->dqcoeff_pbuf[j][0];
675 p[j].eobs = ctx->eobs_pbuf[j][0];
676 }
677 }
678 }
679
680 launch_enc_workers(cpi, enc_row_mt_worker_hook, multi_thread_ctxt,
681 num_workers);
682
683 for (i = 0; i < num_workers; i++) {
684 VPxWorker *const worker = &cpi->workers[i];
685 EncWorkerData *const thread_data = (EncWorkerData *)worker->data1;
686
687 // Accumulate counters.
688 if (i < cpi->num_workers - 1) {
689 vp9_accumulate_frame_counts(&cm->counts, thread_data->td->counts, 0);
690 accumulate_rd_opt(&cpi->td, thread_data->td);
691 }
692 }
693 }
694