|
发表于 2013-9-30 14:03:25
|
显示全部楼层
- if (oc->oformat->flags & AVFMT_RAWPICTURE) {
- /* Raw video case - directly store the picture in the packet */
- AVPacket pkt;
- av_init_packet(&pkt);
- pkt.flags |= AV_PKT_FLAG_KEY;
- pkt.stream_index = st->index;
- pkt.data = dst_picture.data[0];
- pkt.size = sizeof(AVPicture);
- ret = av_interleaved_write_frame(oc, &pkt);
- } else {
- AVPacket pkt = { 0 };
- int got_packet;
- av_init_packet(&pkt);
- /* encode the image */
- ret = avcodec_encode_video2(c, &pkt, frame, &got_packet); ////看这里
- if (ret < 0) {
- fprintf(stderr, "Error encoding video frame: %s\n", av_err2str(ret));
- exit(1);
- }
- /* If size is zero, it means the image was buffered. */
- if (!ret && got_packet && pkt.size) {
- pkt.stream_index = st->index;
- /* Write the compressed frame to the media file. */
- ret = av_interleaved_write_frame(oc, &pkt);
- } else {
- ret = 0;
- }
- }
复制代码 根据上面的代码可以知道,是在encode 的时候计算出来的pkt的pts的,当然,你也可以在encode以后去修改pts
那么encode怎么得到的pkt呢
可以看encode里面的具体实现- int attribute_align_arg avcodec_encode_video2(AVCodecContext *avctx,
- AVPacket *avpkt,
- const AVFrame *frame,
- int *got_packet_ptr)
- {
- int ret;
- AVPacket user_pkt = *avpkt;
- int needs_realloc = !user_pkt.data;
- *got_packet_ptr = 0;
- if(CONFIG_FRAME_THREAD_ENCODER &&
- avctx->internal->frame_thread_encoder && (avctx->active_thread_type&FF_THREAD_FRAME))
- return ff_thread_video_encode_frame(avctx, avpkt, frame, got_packet_ptr);
- if ((avctx->flags&CODEC_FLAG_PASS1) && avctx->stats_out)
- avctx->stats_out[0] = '\0';
- if (!(avctx->codec->capabilities & CODEC_CAP_DELAY) && !frame) {
- av_free_packet(avpkt);
- av_init_packet(avpkt);
- avpkt->size = 0;
- return 0;
- }
- if (av_image_check_size(avctx->width, avctx->height, 0, avctx))
- return AVERROR(EINVAL);
- av_assert0(avctx->codec->encode2);
- ret = avctx->codec->encode2(avctx, avpkt, frame, got_packet_ptr); ///////看这里
- av_assert0(ret <= 0);
- if (avpkt->data && avpkt->data == avctx->internal->byte_buffer) {
- needs_realloc = 0;
- if (user_pkt.data) {
- if (user_pkt.size >= avpkt->size) {
- memcpy(user_pkt.data, avpkt->data, avpkt->size);
- } else {
- av_log(avctx, AV_LOG_ERROR, "Provided packet is too small, needs to be %d\n", avpkt->size);
- avpkt->size = user_pkt.size;
- ret = -1;
- }
- avpkt->buf = user_pkt.buf;
复制代码 这个encode代码是在libavcodec/utils.c里面
在ffmpeg里面这个utils是一个主要的switcher,每一个codec都是以模块的形式加载的,通过这个switcher进入对应的编解码模块中
下面可以假设是mpeg4编码,那么就进入如下接口中
- AVCodec ff_mpeg4_encoder = {
- .name = "mpeg4",
- .type = AVMEDIA_TYPE_VIDEO,
- .id = AV_CODEC_ID_MPEG4,
- .priv_data_size = sizeof(MpegEncContext),
- .init = encode_init,
- .encode2 = ff_MPV_encode_picture, //////进入到这个接口中,这个在registerall的时候就已经注册了
- .close = ff_MPV_encode_end,
- .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
- .capabilities = CODEC_CAP_DELAY | CODEC_CAP_SLICE_THREADS,
- .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2"),
- .priv_class = &mpeg4enc_class,
- };
复制代码 如果使用的mpeg4编码时,前面的可以跟进入这个 ff_MPV_encode_picture中
- pkt->pts = s->current_picture.f.pts;
- if (!s->low_delay && s->pict_type != AV_PICTURE_TYPE_B) {
- if (!s->current_picture.f.coded_picture_number)
- pkt->dts = pkt->pts - s->dts_delta;
- else
- pkt->dts = s->reordered_pts;
- s->reordered_pts = pkt->pts;
- } else
- pkt->dts = pkt->pts;
- if (s->current_picture.f.key_frame)
- pkt->flags |= AV_PKT_FLAG_KEY;
- if (s->mb_info)
- av_packet_shrink_side_data(pkt, AV_PKT_DATA_H263_MB_INFO, s->mb_info_size);
复制代码 从这段代码可以看出,pts是从s->current_picture.f.pts中获得的,然后根据判断图像是否有延迟,然后进行pts-dts_delta计算得来的
这个 s->current_picture.f.pts即为AVFrame的pts,至于这个AVFrame的pts怎么得到的,可以找到如下实现,也是mpeg4里面计算得到的
- static int load_input_picture(MpegEncContext *s, const AVFrame *pic_arg)
- {
- Picture *pic = NULL;
- int64_t pts;
- int i, display_picture_number = 0, ret;
- const int encoding_delay = s->max_b_frames ? s->max_b_frames :
- (s->low_delay ? 0 : 1);
- int direct = 1;
- if (pic_arg) {
- pts = pic_arg->pts;
- display_picture_number = s->input_picture_number++;
- if (pts != AV_NOPTS_VALUE) {
- if (s->user_specified_pts != AV_NOPTS_VALUE) {
- int64_t last = s->user_specified_pts;
- if (pts <= last) {
- av_log(s->avctx, AV_LOG_ERROR,
- "Invalid pts (%"PRId64") <= last (%"PRId64")\n",
- pts, last);
- return AVERROR(EINVAL);
- }
- if (!s->low_delay && display_picture_number == 1)
- s->dts_delta = pts - last;
- }
- s->user_specified_pts = pts;
- } else {
- if (s->user_specified_pts != AV_NOPTS_VALUE) {
- s->user_specified_pts =
- pts = s->user_specified_pts + 1;
- av_log(s->avctx, AV_LOG_INFO,
- "Warning: AVFrame.pts=? trying to guess (%"PRId64")\n",
- pts);
- } else {
- pts = display_picture_number;
- }
- }
- }
- if (pic_arg) {
- if (!pic_arg->buf[0])
- direct = 0;
- if (pic_arg->linesize[0] != s->linesize)
- direct = 0;
- if (pic_arg->linesize[1] != s->uvlinesize)
- direct = 0;
- if (pic_arg->linesize[2] != s->uvlinesize)
- direct = 0;
- av_dlog(s->avctx, "%d %d %d %d\n", pic_arg->linesize[0],
- pic_arg->linesize[1], s->linesize, s->uvlinesize);
- if (direct) {
- i = ff_find_unused_picture(s, 1);
- if (i < 0)
- return i;
- pic = &s->picture[i];
- pic->reference = 3;
- if ((ret = av_frame_ref(&pic->f, pic_arg)) < 0)
- return ret;
- if (ff_alloc_picture(s, pic, 1) < 0) {
- return -1;
- }
- } else {
- i = ff_find_unused_picture(s, 0);
- if (i < 0)
- return i;
- pic = &s->picture[i];
- pic->reference = 3;
- if (ff_alloc_picture(s, pic, 0) < 0) {
- return -1;
- }
- if (pic->f.data[0] + INPLACE_OFFSET == pic_arg->data[0] &&
- pic->f.data[1] + INPLACE_OFFSET == pic_arg->data[1] &&
- pic->f.data[2] + INPLACE_OFFSET == pic_arg->data[2]) {
- // empty
- } else {
- int h_chroma_shift, v_chroma_shift;
- av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
- &h_chroma_shift,
- &v_chroma_shift);
- for (i = 0; i < 3; i++) {
- int src_stride = pic_arg->linesize[i];
- int dst_stride = i ? s->uvlinesize : s->linesize;
- int h_shift = i ? h_chroma_shift : 0;
- int v_shift = i ? v_chroma_shift : 0;
- int w = s->width >> h_shift;
- int h = s->height >> v_shift;
- uint8_t *src = pic_arg->data[i];
- uint8_t *dst = pic->f.data[i];
- if (s->codec_id == AV_CODEC_ID_AMV && !(s->avctx->flags & CODEC_FLAG_EMU_EDGE)) {
- h = ((s->height + 15)/16*16) >> v_shift;
- }
- if (!s->avctx->rc_buffer_size)
- dst += INPLACE_OFFSET;
- if (src_stride == dst_stride)
- memcpy(dst, src, src_stride * h);
- else {
- int h2 = h;
- uint8_t *dst2 = dst;
- while (h2--) {
- memcpy(dst2, src, w);
- dst2 += dst_stride;
- src += src_stride;
- }
- }
- if ((s->width & 15) || (s->height & 15)) {
- s->dsp.draw_edges(dst, dst_stride,
- w, h,
- 16>>h_shift,
- 16>>v_shift,
- EDGE_BOTTOM);
- }
- }
- }
- }
- ret = av_frame_copy_props(&pic->f, pic_arg);
- if (ret < 0)
- return ret;
- pic->f.display_picture_number = display_picture_number;
- pic->f.pts = pts; // we set this here to avoid modifiying pic_arg
- }
- /* shift buffer entries */
- for (i = 1; i < MAX_PICTURE_COUNT /*s->encoding_delay + 1*/; i++)
- s->input_picture[i - 1] = s->input_picture[i];
- s->input_picture[encoding_delay] = (Picture*) pic;
- return 0;
- }
复制代码 从这段代码中可以看到AVFrame的pts或得到了
至于你说的音视频同步,是通过一个flag
video_sync_method或者audio_sync_method
- format_video_sync = video_sync_method;
- if (format_video_sync == VSYNC_AUTO)
- format_video_sync = (s->oformat->flags & AVFMT_VARIABLE_FPS) ? ((s->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR;
- switch (format_video_sync) {
- case VSYNC_CFR:
- // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
- if (delta < -1.1)
- nb_frames = 0;
- else if (delta > 1.1)
- nb_frames = lrintf(delta);
- break;
- case VSYNC_VFR:
- if (delta <= -0.6)
- nb_frames = 0;
- else if (delta > 0.6)
- ost->sync_opts = lrint(sync_ipts);
- break;
- case VSYNC_DROP:
- case VSYNC_PASSTHROUGH:
- ost->sync_opts = lrint(sync_ipts);
- break;
- default:
- av_assert0(0);
- }
复制代码 这样来判断的
|
|