新学ffmpeg,参照论坛上的代码写了一个按时间切割视频的程序。从第二个得到的视频开始就会出现前几秒整个图像模糊。纠结了两天了也不知道是什么问题
只能劳烦大家有空看一看。谢谢
#include "stdio.h"
#include <string.h>
extern "C"
{
#include <libavformat/avformat.h>
#include "libavcodec/avcodec.h"
#include "libavfilter/avfiltergraph.h"
#include "libavfilter/buffersink.h"
#include "libavfilter/buffersrc.h"
#include "libavutil/avutil.h"
#include "libavutil/opt.h"
#include "libavutil/pixdesc.h"
#include "libswresample/swresample.h"
#include "libavutil/fifo.h"
#include "libavutil/audio_fifo.h"
#pragma comment(lib, "avcodec.lib")
#pragma comment(lib, "avformat.lib")
#pragma comment(lib, "avutil.lib")
//#pragma comment(lib, "avdevice.lib")
#pragma comment(lib, "avfilter.lib")
//#pragma comment(lib, "postproc.lib")
#pragma comment(lib, "swresample.lib")
//#pragma comment(lib, "swscale.lib")
};
#define MAX 1000
int main(int argc, char* argv[]){
if(argc < 2)
{
printf("no input file!\n");
return -1;
}
av_register_all();
int ret;
AVFormatContext *ifmt_ctx = NULL; //input file context
if ((ret = avformat_open_input(&ifmt_ctx, argv[1], NULL, NULL)) < 0)
{
printf("can not open the input file format context!\n");
return -1;
}
if ((ret = avformat_find_stream_info(ifmt_ctx, NULL)) < 0)
{
printf("can not find the input stream info!\n");
return -1;
}
double a[MAX];
for(int i=0;i<MAX;i++){ //initial split time num
a[i] = -1;
}
a[0]=0;a[1]=0.9;a[2]=2.4;a[3]=5.9;a[4]=10;a[5]=12.5;a[6]=13.6;//a is time
AVStream *out_vstream[MAX]; //
AVStream *out_astream[MAX];
AVFormatContext *ofmt_ctx[MAX];
for(int j=0;;j++){ //initial ofmt_ctx
if(a[j]==-1){
break;
}
char str_out_filename[10];
sprintf(str_out_filename, "keyframe%d.flv", j);
avformat_alloc_output_context2(&(ofmt_ctx[j]), NULL, NULL, str_out_filename);
if (!ofmt_ctx[j])
{
printf( "Could not create output1 context\n");
ret = AVERROR_UNKNOWN;
return -1;
}
}
int inVideo_StreamIndex = -1,inAudio_StreamIndex = -1;
for (int i = 0; i < ifmt_ctx->nb_streams; i++)
{
if (ifmt_ctx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO)
{
inVideo_StreamIndex = i;
//open decoder
if(0 > avcodec_open2(ifmt_ctx->streams[i]->codec, avcodec_find_decoder(ifmt_ctx->streams[i]->codec->codec_id), NULL))
{
printf("can not find or open video decoder!\n");
return -1;
}
for(int j=0;;j++){
if(a[j]==-1){
break;
}
out_vstream[j] = avformat_new_stream(ofmt_ctx[j], NULL);
if (!out_vstream[j])
{
printf("Failed allocating output1 video stream\n");
ret = AVERROR_UNKNOWN;
return -1;
}
else
{
//copy the settings of AVCodecContext;
if (avcodec_copy_context(out_vstream[j]->codec, ifmt_ctx->streams[i]->codec) < 0)
{
printf( "Failed to copy context from input to output stream codec context\n");
return -1;
}
out_vstream[j]->codec->codec_tag = 0;
if(ofmt_ctx[j]->oformat->flags & AVFMT_GLOBALHEADER)
{
out_vstream[j]->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;
}
}
}
}else if (ifmt_ctx->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO)
{
inAudio_StreamIndex = i;
for(int j=0;;j++)
{
if(a[j]==-1)
{
break;
}
out_astream[j] = avformat_new_stream(ofmt_ctx[j], NULL);
if (!out_astream[j])
{
printf("Failed allocating output1 audio stream\n");
ret = AVERROR_UNKNOWN;
return -1;
}
else
{
//copy the settings of AVCodecContext;
if (avcodec_copy_context(out_astream[j]->codec, ifmt_ctx->streams[i]->codec) < 0)
{
printf( "Failed to copy context from input to output stream codec context\n");
return -1;
}
out_astream[j]->codec->codec_tag = 0;
if(ofmt_ctx[j]->oformat->flags & AVFMT_GLOBALHEADER)
{
out_astream[j]->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;
}
}
}
}
}
//av_dump_format(ifmt_ctx, 0, argv[1], 0);
for(int j=0;;j++)
{
if(a[j]==-1)
{
break;
}
//av_dump_format(ofmt_ctx[j], 0, str_out_filename[j], 1);
//open output1 file
if (!(ofmt_ctx[j]->oformat->flags & AVFMT_NOFILE))
{
printf( "Could not open output file \n");
char str_out_filename[10];
sprintf(str_out_filename, "keyframe%d.flv", j);
if (avio_open(&ofmt_ctx[j]->pb, str_out_filename, AVIO_FLAG_WRITE) < 0)
{
printf( "Could not open output file");
return -1;
}
}
//write out 1 file header
if (avformat_write_header(ofmt_ctx[j], NULL) < 0)
{
printf( "Error occurred when opening video output file\n");
return -1;
}
}
int splitPtsV[MAX];//the real split video pts
int splitDtsV[MAX];
int splitPtsA[MAX];//the real split audio pts
int splitDtsA[MAX];
int videoIndex = 0;//the real video index
splitPtsV[0] = 0;
splitDtsV[0] = 0;
splitPtsA[0] = 0;
splitDtsA[0] = 0;
double splitTime = 30;//the split time (sec)
AVPacket pkt;
while(1)
{
AVFormatContext *ofmt_ctxi;
AVStream *in_stream, *out_stream;
int index; //ofmt index
if (av_read_frame(ifmt_ctx, &pkt) < 0)
{
break;
}
in_stream = ifmt_ctx->streams[pkt.stream_index];
if (pkt.stream_index == inVideo_StreamIndex)
{
videoIndex++;
double time = pkt.pts * (((double)in_stream->time_base.num) / ((double)in_stream->time_base.den));
//printf("time = %d\n", time);
//printf("time = %d pts = %d", time, pkt.pts);
for(int j=0;;j++)
{
if(a[j]==-1)
{
break;
}
if( (time >= a[j] && time < a[j+1]) || (time >= a[j] && a[j+1] == -1))
//if(time == a[j] )
{
splitTime = a[j];
//if(time == a[j]){
splitPtsV[j+1] = pkt.pts;
splitDtsV[j+1] = pkt.dts;
//}
index = j;
out_stream = ofmt_ctx[j]->streams[pkt.stream_index];
ofmt_ctxi = ofmt_ctx[j];
printf("video:time = %d pts = %d\n", time, pkt.pts);
//printf("splitTime = %d\n", splitTime);
break;
}
}
pkt.pts = pkt.pts - splitPtsV[index];
pkt.dts = pkt.dts - splitDtsV[index];
}
else if (pkt.stream_index == inAudio_StreamIndex)
{
double time = pkt.pts * (((double)in_stream->time_base.num) / ((double)in_stream->time_base.den));
for(int j=0;;j++)
{
if(a[j]==-1)
{
break;
}
if( (time >= a[j] && time < a[j+1]) || (time >= a[j] && a[j+1] == -1))
//if(time == a[j])
{
splitTime = a[j];
index = j;
//if(time == a[j]){
splitPtsA[j+1] = pkt.pts;
splitDtsA[j+1] = pkt.dts;
//}
out_stream = ofmt_ctx[j]->streams[pkt.stream_index];
//printf("audio:time = %d pts = %d index=%d\n", time, pkt.pts, index);
ofmt_ctxi = ofmt_ctx[j];
break;
}
}
pkt.pts = pkt.pts - splitPtsA[index];
pkt.dts = pkt.dts - splitDtsA[index];
}
pkt.pts = av_rescale_q_rnd(pkt.pts, in_stream->time_base, out_stream->time_base, (AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));
pkt.dts = av_rescale_q_rnd(pkt.dts, in_stream->time_base, out_stream->time_base, (AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));
pkt.duration = av_rescale_q(pkt.duration, in_stream->time_base, out_stream->time_base);
//printf("pts=%d,dts=%d,duration=%d\n",pkt.pts,pkt.dts,pkt.duration);
pkt.pos = -1;
//write into file
if (av_interleaved_write_frame(ofmt_ctxi, &pkt) < 0)
{
//printf( "Error muxing packet\n");
//break;
}
av_free_packet(&pkt);
}
for(int j=0;;j++)
{
if(a[j]==-1)
{
break;
}
av_write_trailer(ofmt_ctx[j]);
}
avformat_close_input(&ifmt_ctx);
for(int j=0;;j++)
{
if(a[j]==-1)
{
break;
}
/* close output */
if (ofmt_ctx[j] && !(ofmt_ctx[j]->oformat->flags & AVFMT_NOFILE))
avio_close(ofmt_ctx[j]->pb);
avformat_free_context(ofmt_ctx[j]);
}
return 0;
}
|