1 /* $Id: render.c,v 1.17 2005/04/14 17:37:54 titer Exp $
3 This file is part of the HandBrake source code.
4 Homepage: <http://handbrake.fr/>.
5 It may be used under the terms of the GNU General Public License. */
9 #include "ffmpeg/avcodec.h"
10 #include "ffmpeg/swscale.h"
12 struct hb_work_private_s
16 struct SwsContext * context;
18 AVPicture pic_tmp_crop;
19 AVPicture pic_tmp_out;
20 hb_buffer_t * buf_scale;
21 hb_fifo_t * subtitle_queue;
22 hb_fifo_t * delay_queue;
25 uint64_t last_start[4];
26 uint64_t last_stop[4];
27 uint64_t lost_time[4];
28 uint64_t total_lost_time;
29 uint64_t total_gained_time;
34 int renderInit( hb_work_object_t *, hb_job_t * );
35 int renderWork( hb_work_object_t *, hb_buffer_t **, hb_buffer_t ** );
36 void renderClose( hb_work_object_t * );
38 hb_work_object_t hb_render =
50 * Utility function that finds where the U is in the YUV sub-picture
52 * The Y data is at the top, followed by U and V, but the U and V
53 * are half the width of the Y, i.e. each chroma element covers 2x2
56 static uint8_t *getU(uint8_t *data, int width, int height, int x, int y)
58 return(&data[(((y/2) * (width/2)) + (x/2)) + (width*height)]);
61 static uint8_t *getV(uint8_t *data, int width, int height, int x, int y)
63 return(&data[(((y/2) * (width/2)) + (x/2)) + (width*height) +
67 static void ApplySub( hb_job_t * job, hb_buffer_t * buf,
70 hb_buffer_t * sub = *_sub;
71 hb_title_t * title = job->title;
72 int i, j, offset_top, offset_left, margin_top, margin_percent;
73 uint8_t * lum, * alpha, * out, * sub_chromaU, * sub_chromaV;
76 * Percent of height of picture that form a margin that subtitles
77 * should not be displayed within.
87 * If necessary, move the subtitle so it is not in a cropped zone.
88 * When it won't fit, we center it so we lose as much on both ends.
89 * Otherwise we try to leave a 20px or 2% margin around it.
91 margin_top = ( ( title->height - job->crop[0] - job->crop[1] ) *
92 margin_percent ) / 100;
97 * A maximum margin of 20px regardless of height of the picture.
102 if( sub->height > title->height - job->crop[0] - job->crop[1] -
106 * The subtitle won't fit in the cropped zone, so center
107 * it vertically so we fit in as much as we can.
109 offset_top = job->crop[0] + ( title->height - job->crop[0] -
110 job->crop[1] - sub->height ) / 2;
112 else if( sub->y < job->crop[0] + margin_top )
115 * The subtitle fits in the cropped zone, but is currently positioned
116 * within our top margin, so move it outside of our margin.
118 offset_top = job->crop[0] + margin_top;
120 else if( sub->y > title->height - job->crop[1] - margin_top - sub->height )
123 * The subtitle fits in the cropped zone, and is not within the top
124 * margin but is within the bottom margin, so move it to be above
127 offset_top = title->height - job->crop[1] - margin_top - sub->height;
132 * The subtitle is fine where it is.
137 if( sub->width > title->width - job->crop[2] - job->crop[3] - 40 )
138 offset_left = job->crop[2] + ( title->width - job->crop[2] -
139 job->crop[3] - sub->width ) / 2;
140 else if( sub->x < job->crop[2] + 20 )
141 offset_left = job->crop[2] + 20;
142 else if( sub->x > title->width - job->crop[3] - 20 - sub->width )
143 offset_left = title->width - job->crop[3] - 20 - sub->width;
145 offset_left = sub->x;
148 alpha = lum + sub->width * sub->height;
149 sub_chromaU = alpha + sub->width * sub->height;
150 sub_chromaV = sub_chromaU + sub->width * sub->height;
152 out = buf->data + offset_top * title->width + offset_left;
154 for( i = 0; i < sub->height; i++ )
156 if( offset_top + i >= 0 && offset_top + i < title->height )
158 for( j = 0; j < sub->width; j++ )
160 if( offset_left + j >= 0 && offset_left + j < title->width )
162 uint8_t *chromaU, *chromaV;
165 * Merge the luminance and alpha with the picture
167 out[j] = ( (uint16_t) out[j] * ( 16 - (uint16_t) alpha[j] ) +
168 (uint16_t) lum[j] * (uint16_t) alpha[j] ) >> 4;
170 * Set the chroma (colour) based on whether there is
171 * any alpha at all. Don't try to blend with the picture.
173 chromaU = getU(buf->data, title->width, title->height,
174 offset_left+j, offset_top+i);
176 chromaV = getV(buf->data, title->width, title->height,
177 offset_left+j, offset_top+i);
182 * Add the chroma from the sub-picture, as this is
183 * not a transparent element.
185 *chromaU = sub_chromaU[j];
186 *chromaV = sub_chromaV[j];
194 sub_chromaU += sub->width;
195 sub_chromaV += sub->width;
199 hb_buffer_close( _sub );
202 int renderWork( hb_work_object_t * w, hb_buffer_t ** buf_in,
203 hb_buffer_t ** buf_out )
205 hb_work_private_t * pv = w->private_data;
206 hb_job_t * job = pv->job;
207 hb_title_t * title = job->title;
208 hb_buffer_t * in = *buf_in, * buf_tmp_in = *buf_in;
209 hb_buffer_t * ivtc_buffer = NULL;
213 /* If the input buffer is end of stream, send out an empty one
214 * to the next stage as well. Note that this will result in us
215 * losing the current contents of the delay queue.
217 *buf_out = job->indepth_scan? NULL : hb_buffer_init(0);
222 * During the indepth_scan ditch the buffers here before applying filters or attempting to
225 if( job->indepth_scan )
231 /* Push subtitles onto queue just in case we need to delay a frame */
234 hb_fifo_push( pv->subtitle_queue, in->sub );
238 hb_fifo_push( pv->subtitle_queue, hb_buffer_init(0) );
241 /* If there's a chapter mark remember it in case we delay or drop its frame */
242 if( in->new_chap && job->vfr )
244 pv->chapter_time = in->start;
245 pv->chapter_val = in->new_chap;
249 /* Setup render buffer */
250 hb_buffer_t * buf_render = hb_buffer_init( 3 * job->width * job->height / 2 );
255 int filter_count = hb_list_count( job->filters );
258 for( i = 0; i < filter_count; i++ )
260 hb_filter_object_t * filter = hb_list_item( job->filters, i );
267 hb_buffer_t * buf_tmp_out = NULL;
269 int result = filter->work( buf_tmp_in,
274 filter->private_data );
277 * FILTER_OK: set temp buffer to filter buffer, continue
278 * FILTER_DELAY: set temp buffer to NULL, abort
279 * FILTER_DROP: set temp buffer to NULL, pop subtitle, abort
280 * FILTER_FAILED: leave temp buffer alone, continue
282 if( result == FILTER_OK )
284 buf_tmp_in = buf_tmp_out;
286 else if( result == FILTER_DELAY )
291 else if( result == FILTER_DROP )
295 /* We need to compensate for the time lost by dropping this frame.
296 Spread its duration out in quarters, because usually dropped frames
297 maintain a 1-out-of-5 pattern and this spreads it out amongst the remaining ones.
298 Store these in the lost_time array, which has 4 slots in it.
299 Because not every frame duration divides evenly by 4, and we can't lose the
300 remainder, we have to go through an awkward process to preserve it in the 4th array index. */
301 uint64_t temp_duration = buf_tmp_out->stop - buf_tmp_out->start;
302 pv->lost_time[0] += (temp_duration / 4);
303 pv->lost_time[1] += (temp_duration / 4);
304 pv->lost_time[2] += (temp_duration / 4);
305 pv->lost_time[3] += ( temp_duration - (temp_duration / 4) - (temp_duration / 4) - (temp_duration / 4) );
307 pv->total_lost_time += temp_duration;
308 pv->dropped_frames++;
310 /* Pop the frame's subtitle and dispose of it. */
311 hb_buffer_t * subtitles = hb_fifo_get( pv->subtitle_queue );
312 hb_buffer_close( &subtitles );
319 buf_tmp_in = buf_tmp_out;
327 /* Cache frame start and stop times, so we can renumber
328 time stamps if dropping frames for VFR. */
330 for( i = 3; i >= 1; i-- )
332 pv->last_start[i] = pv->last_start[i-1];
333 pv->last_stop[i] = pv->last_stop[i-1];
336 /* In order to make sure we have continuous time stamps, store
337 the current frame's duration as starting when the last one stopped. */
338 pv->last_start[0] = pv->last_stop[1];
339 pv->last_stop[0] = pv->last_start[0] + (in->stop - in->start);
342 /* Apply subtitles */
345 hb_buffer_t * subtitles = hb_fifo_get( pv->subtitle_queue );
348 ApplySub( job, buf_tmp_in, &subtitles );
352 /* Apply crop/scale if specified */
353 if( buf_tmp_in && pv->context )
355 avpicture_fill( &pv->pic_tmp_in, buf_tmp_in->data,
357 title->width, title->height );
359 avpicture_fill( &pv->pic_tmp_out, buf_render->data,
361 job->width, job->height );
363 // Crop; this alters the pointer to the data to point to the correct place for cropped frame
364 av_picture_crop( &pv->pic_tmp_crop, &pv->pic_tmp_in, PIX_FMT_YUV420P,
365 job->crop[0], job->crop[2] );
367 // Scale pic_crop into pic_render according to the context set up in renderInit
368 sws_scale(pv->context,
369 pv->pic_tmp_crop.data, pv->pic_tmp_crop.linesize,
370 0, title->height - (job->crop[0] + job->crop[1]),
371 pv->pic_tmp_out.data, pv->pic_tmp_out.linesize);
373 hb_buffer_copy_settings( buf_render, buf_tmp_in );
375 buf_tmp_in = buf_render;
378 /* Set output to render buffer */
379 (*buf_out) = buf_render;
381 if( buf_tmp_in == NULL )
383 /* Teardown and cleanup buffers if we are emitting NULL */
384 if( buf_in && *buf_in )
386 hb_buffer_close( buf_in );
389 if( buf_out && *buf_out )
391 hb_buffer_close( buf_out );
395 else if( buf_tmp_in != buf_render )
397 /* Copy temporary results and settings into render buffer */
398 memcpy( buf_render->data, buf_tmp_in->data, buf_render->size );
399 hb_buffer_copy_settings( buf_render, buf_tmp_in );
402 if (*buf_out && job->vfr)
404 hb_fifo_push( pv->delay_queue, *buf_out );
409 * Keep the last three frames in our queue, this ensures that we have the last
410 * two always in there should we need to rewrite the durations on them.
415 if( hb_fifo_size( pv->delay_queue ) >= 3 )
417 *buf_out = hb_fifo_get( pv->delay_queue );
421 if( *buf_out && job->vfr)
423 /* The current frame exists. That means it hasn't been dropped by a filter.
424 Make it accessible as ivtc_buffer so we can edit its duration if needed. */
425 ivtc_buffer = *buf_out;
427 if( pv->lost_time[3] > 0 )
430 * A frame's been dropped earlier by VFR detelecine.
431 * Gotta make up the lost time. This will also
432 * slow down the video.
433 * The dropped frame's has to be accounted for, so
434 * divvy it up amongst the 4 frames left behind.
435 * This is what the delay_queue is for;
436 * telecined sequences start 2 frames before
437 * the dropped frame, so to slow down the right
438 * ones you need a 2 frame delay between
439 * reading input and writing output.
442 /* We want to extend the outputted frame's duration by the value
443 stored in the 4th slot of the lost_time array. Because we need
444 to adjust all the values in the array so they're contiguous,
445 extend the duration inside the array first, before applying
446 it to the current frame buffer. */
447 pv->last_stop[3] += pv->lost_time[3];
449 /* Log how much time has been added back in to the video. */
450 pv->total_gained_time += pv->lost_time[3];
452 /* We've pulled the 4th value from the lost_time array
453 and added it to the last_stop array's 4th slot. Now, rotate the
454 lost_time array so the 4th slot now holds the 3rd's value, and
455 so on down the line, and set the 0 index to a value of 0. */
457 for( i=2; i >= 0; i--)
459 pv->lost_time[i+1] = pv->lost_time[i];
461 pv->lost_time[0] = 0;
463 /* Log how many frames have had their durations extended. */
464 pv->extended_frames++;
467 /* We can't use the given time stamps. Previous frames
468 might already have been extended, throwing off the
469 raw values fed to render.c. Instead, their
470 stop and start times are stored in arrays.
471 The 4th cached frame will be the to use.
472 If it needed its duration extended to make up
473 lost time, it will have happened above. */
474 ivtc_buffer->start = pv->last_start[3];
475 ivtc_buffer->stop = pv->last_stop[3];
477 /* Set the 3rd cached frame to start when this one stops,
478 and so on down the line. If any of them need to be
479 extended as well to make up lost time, it'll be handled
480 on the next loop through the renderer. */
482 for (i = 2; i >= 0; i--)
484 int temp_duration = pv->last_stop[i] - pv->last_start[i];
485 pv->last_start[i] = pv->last_stop[i+1];
486 pv->last_stop[i] = pv->last_start[i] + temp_duration;
489 /* If we have a pending chapter mark and this frame is at
490 or after the time of the mark, mark this frame & clear
492 if( pv->chapter_time && pv->chapter_time <= ivtc_buffer->start )
494 ivtc_buffer->new_chap = pv->chapter_val;
495 pv->chapter_time = 0;
503 void renderClose( hb_work_object_t * w )
505 hb_work_private_t * pv = w->private_data;
507 hb_log("render: lost time: %lld (%i frames)", pv->total_lost_time, pv->dropped_frames);
508 hb_log("render: gained time: %lld (%i frames) (%lld not accounted for)", pv->total_gained_time, pv->extended_frames, pv->total_lost_time - pv->total_gained_time);
509 if (pv->dropped_frames)
510 hb_log("render: average dropped frame duration: %lld", (pv->total_lost_time / pv->dropped_frames) );
512 /* Cleanup subtitle queue */
513 if( pv->subtitle_queue )
515 hb_fifo_close( &pv->subtitle_queue );
518 if( pv->delay_queue )
520 hb_fifo_close( &pv->delay_queue );
523 /* Cleanup render work structure */
525 w->private_data = NULL;
528 int renderInit( hb_work_object_t * w, hb_job_t * job )
530 /* Allocate new private work object */
531 hb_work_private_t * pv = calloc( 1, sizeof( hb_work_private_t ) );
533 w->private_data = pv;
536 swsflags = SWS_LANCZOS;
538 swsflags |= SWS_ACCURATE_RND;
539 #endif /* __x86_64__ */
541 /* Get title and title size */
542 hb_title_t * title = job->title;
544 /* If crop or scale is specified, setup rescale context */
545 if( job->crop[0] || job->crop[1] || job->crop[2] || job->crop[3] ||
546 job->width != title->width || job->height != title->height )
548 pv->context = sws_getContext(title->width - (job->crop[2] + job->crop[3]),
549 title->height - (job->crop[0] + job->crop[1]),
551 job->width, job->height, PIX_FMT_YUV420P,
552 swsflags, NULL, NULL, NULL);
555 /* Setup FIFO queue for subtitle cache */
556 pv->subtitle_queue = hb_fifo_init( 8 );
557 pv->delay_queue = hb_fifo_init( 8 );
559 /* VFR IVTC needs a bunch of time-keeping variables to track
560 how many frames are dropped, how many are extended, what the
561 last 4 start and stop times were (so they can be modified),
562 how much time has been lost and gained overall, how much time
563 the latest 4 frames should be extended by, and where chapter
564 markers are (so they can be saved if their frames are dropped.) */
565 pv->dropped_frames = 0;
566 pv->extended_frames = 0;
567 pv->last_start[0] = 0;
568 pv->last_stop[0] = 0;
569 pv->total_lost_time = 0;
570 pv->total_gained_time = 0;
571 pv->lost_time[0] = 0; pv->lost_time[1] = 0; pv->lost_time[2] = 0; pv->lost_time[3] = 0;
572 pv->chapter_time = 0;
576 /* TODO: Move to work.c? */
579 int filter_count = hb_list_count( job->filters );
582 for( i = 0; i < filter_count; i++ )
584 hb_filter_object_t * filter = hb_list_item( job->filters, i );
586 if( !filter ) continue;
588 filter->private_data = filter->init( PIX_FMT_YUV420P,