[C] 纯文本查看 复制代码
/*
* Copyright (c) 2013 Stefano Sabatini
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software\n"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
/**
* @file libavformat/libavcodec demuxing and muxing API usage example
* @example remux.c
*
* Remux streams from one container format to another. Data is copied from the
* input to the output without transcoding.
*/
#include <libavutil/timestamp.h>
#include <libavformat/avformat.h>
#include <libavutil/time.h>
#include <libavutil/rational.h>
#include <libavdevice/avdevice.h>
#include <libavcodec/avcodec.h>
#include <libswscale/swscale.h>
#include <libavutil/pixfmt.h>
#include <libswresample/swresample.h>
#include <libavutil/imgutils.h>
#include <libavutil/opt.h>
#include <libavutil/avutil.h>
#include <libavutil/audio_fifo.h>
#include <libavutil/mathematics.h>
#include <stdint.h>
#include <signal.h>
#define MAX_NAME_LEN 1024
AVFormatContext *vFormatContextInput;
AVCodecContext *vCodecContextInput;
AVFormatContext *vFormatContextOutput;
AVCodecContext *vCodecContextOutput;
enum AVPixelFormat vPixelFormatOutput;
AVFormatContext *jpegFormatContext;
AVCodecContext *jpegCodecContext;
struct SwsContext *vSWSContext;
struct SwsContext *jpegSWSContext;
AVFrame *videoFrame;
AVStream *videoStream;
char inputFile[MAX_NAME_LEN] = "/media/1.jpg";
char output[MAX_NAME_LEN] ="rtmp://47.92.75.80:1935/live/58H";
const int fps = 60;
const int width = 800;
const int height = 600;
#define CAP_SCREEN 1
#define CAP_CAMERA 2
#define CAP_JPEG 3
#define CAP_DEVICE CAP_JPEG
static void log_packet(const AVFormatContext *fmt_ctx, const AVPacket *pkt, const char *tag)
{
AVRational *time_base = &fmt_ctx->streams[pkt->stream_index]->time_base;
printf("%s: pts:%s pts_time:%s dts:%s dts_time:%s duration:%s duration_time:%s stream_index:%d\n",
tag,
av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, time_base),
av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, time_base),
av_ts2str(pkt->duration), av_ts2timestr(pkt->duration, time_base),
pkt->stream_index);
}
int g_need_seek;
int64_t g_seek_ts;
int initInput()
{
jpegFormatContext = avformat_alloc_context();
}
void initFrame()
{
int buffLen = av_image_get_buffer_size(vPixelFormatOutput, width, height, 1);
uint8_t *buffer = (uint8_t *)av_malloc(buffLen);
videoFrame = av_frame_alloc();
av_image_fill_arrays(videoFrame->data, videoFrame->linesize, buffer, vPixelFormatOutput, width, height, 1);
}
int initOutput()
{
char URL[MAX_NAME_LEN] = {0};
strcpy(URL, output);
vFormatContextOutput = NULL;
if (avformat_alloc_output_context2(&vFormatContextOutput, NULL, "flv", URL) < 0)
{
printf("[FFMPEG_TEST]initOutput avformat_alloc_output_context2 vFormatContextOutput\n");
return -1;
}
if (avio_open(&(vFormatContextOutput->pb), URL, AVIO_FLAG_WRITE) < 0)
{
printf("[FFMPEG_TEST]initOutput avio_open vFormatContextOutput\n");
return -1;
}
videoStream = avformat_new_stream(vFormatContextOutput, NULL);
vCodecContextOutput = avcodec_alloc_context3(NULL);
avcodec_parameters_to_context(vCodecContextOutput, videoStream->codecpar);
vCodecContextOutput->codec_id = vFormatContextOutput->oformat->video_codec;
vCodecContextOutput->codec_type = AVMEDIA_TYPE_VIDEO;
vCodecContextOutput->pix_fmt = vPixelFormatOutput;
vCodecContextOutput->width = width;
vCodecContextOutput->height = height;
vCodecContextOutput->bit_rate = 1 * 1024 * 1024;
vCodecContextOutput->gop_size = 10;
vCodecContextOutput->time_base.num = 1;
vCodecContextOutput->time_base.den = fps;
vCodecContextOutput->qmin = 10; //?
vCodecContextOutput->qmax = 51; //?
// vCodecContextOutput->max_b_frames = 3; //FLV��֧��
AVDictionary *vOptionsOutput = NULL;
// H.264
if (vCodecContextOutput->codec_id == AV_CODEC_ID_H264)
{
av_dict_set(&vOptionsOutput, "preset", "slow", 0);
av_dict_set(&vOptionsOutput, "tune", "zerolatency", 0);
// av_dict_set(&voutOptions, "profile", "main", 0);
}
// H.265
if (vCodecContextOutput->codec_id == AV_CODEC_ID_H265)
{
av_dict_set(&vOptionsOutput, "preset", "ultrafast", 0);
av_dict_set(&vOptionsOutput, "tune", "zero-latency", 0);
}
AVCodec *vCodecOutput = avcodec_find_encoder(vCodecContextOutput->codec_id);
if (avcodec_open2(vCodecContextOutput, vCodecOutput, &vOptionsOutput) != 0)
{
printf("[FFMPEG_TEST]initOutput avcodec_open2 vCodecContextOutput\n");
return -1;
}
avformat_write_header(vFormatContextOutput, NULL);
return 0;
}
int start()
{
vPixelFormatOutput = AV_PIX_FMT_YUV420P;
// av_register_all();
avformat_network_init();
if (initInput() < 0)
{
avformat_close_input(&jpegFormatContext);
av_free(jpegFormatContext);
return -1;
}
initFrame();
if (initOutput() < 0)
{
av_frame_free(&videoFrame);
avcodec_close(vCodecContextOutput);
avio_close(vFormatContextOutput->pb);
avformat_free_context(vFormatContextOutput);
return -1;
}
}
void stop()
{
av_write_trailer(vFormatContextOutput);
av_frame_free(&videoFrame);
avcodec_close(vCodecContextOutput);
avio_close(vFormatContextOutput->pb);
avformat_free_context(vFormatContextOutput);
avformat_close_input(&jpegFormatContext);
av_free(jpegFormatContext);
}
int openCodecContextJPEG()
{
AVDictionary *vOptionsInput = NULL;
char c_fps[8] = {0};
sprintf(c_fps, "%d", fps);
if (av_dict_set(&vOptionsInput, "frame_rate", c_fps, 0) < 0)
{
printf("[FFMPEG_TEST]openCodecContextJPEG av_dict_set vOptionsInput\n");
return -1;
}
if (avformat_open_input(&jpegFormatContext, inputFile, NULL, NULL) != 0)
{
printf("[FFMPEG_TEST]openCodecContextJPEG avformat_open_input jpegFormatContext\n");
return -1;
}
if (avformat_find_stream_info(jpegFormatContext, NULL) < 0)
{
printf("[FFMPEG_TEST]openCodecContextJPEG avformat_find_stream_info jpegFormatContext\n");
return -1;
}
int videoIndex = -1;
for (int i = 0; i < jpegFormatContext->nb_streams; ++i)
{
if (jpegFormatContext->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)
{
videoIndex = i;
break;
}
}
jpegCodecContext = avcodec_alloc_context3(NULL);
avcodec_parameters_to_context(jpegCodecContext, jpegFormatContext->streams[videoIndex]->codecpar);
AVCodec *vCodecInput = avcodec_find_decoder(jpegCodecContext->codec_id);
if (avcodec_open2(jpegCodecContext, vCodecInput, NULL) != 0)
{
printf("[FFMPEG_TEST]openCodecContextJPEG avcodec_open2 jpegCodecContext\n");
return -1;
}
jpegSWSContext = sws_getContext(width, height, jpegCodecContext->pix_fmt,
width, height, vPixelFormatOutput, SWS_BILINEAR, NULL, NULL, NULL);
return 0;
}
void closeCodecContextJPEG()
{
sws_freeContext(jpegSWSContext);
avcodec_close(jpegCodecContext);
avformat_close_input(&jpegFormatContext);
}
int catchJPEGFrame(int frameIndex)
{
if (openCodecContextJPEG() < 0)
{
return -1;
}
AVPacket *packetIn = av_packet_alloc(); // ALLOC_PACKET_IN
AVFrame *frameIn = av_frame_alloc(); // ALLOC_FRAME_IN
if (av_read_frame(jpegFormatContext, packetIn) < 0)
{
av_frame_free(&frameIn); // FREE_FRAME_IN
av_packet_unref(packetIn); // FREE_PACKET_IN
printf("[FFMPEG_TEST]catchJPEGFrame av_read_frame jpegFormatContext\n");
return -1;
}
int gotPtr;
// avcodec_decode_video2(jpegCodecContext, frameIn, &gotPtr, packetIn);
if (avcodec_send_packet(jpegCodecContext, packetIn) < 0 || (gotPtr = avcodec_receive_frame(jpegCodecContext, frameIn)) < 0)
{
av_frame_free(&frameIn); // FREE_FRAME_IN
av_packet_unref(packetIn); // FREE_PACKET_IN
printf("[FFMPEG_TEST]catchJPEGFrame avcodec_decode_video2 jpegFormatContext\n");
return -1;
}
sws_scale(jpegSWSContext, (const uint8_t *const *)frameIn->data, frameIn->linesize, 0, height,
videoFrame->data, videoFrame->linesize);
videoFrame->format = vCodecContextOutput->pix_fmt;
videoFrame->width = width;
videoFrame->height = height;
videoFrame->pts = frameIndex * (videoStream->time_base.den) / ((videoStream->time_base.num) * fps);
AVPacket *packetOut = av_packet_alloc(); // ALLOC_PACKET_OUT
// avcodec_encode_video2(vCodecContextOutput, packetOut, videoFrame, &gotPtr);
if (avcodec_send_frame(vCodecContextOutput, videoFrame) < 0 || (gotPtr = avcodec_receive_packet(vCodecContextOutput, packetOut)) < 0)
{
av_frame_free(&frameIn); // FREE_FRAME_IN
av_packet_unref(packetIn); // FREE_PACKET_IN
av_packet_unref(packetOut); // FREE_PACKET_OUT
printf("[FFMPEG_TEST]catchJPEGFrame avcodec_encode_video2 jpegFormatContext\n");
return -1;
}
packetOut->stream_index = videoStream->index;
av_write_frame(vFormatContextOutput, packetOut);
log_packet(vFormatContextOutput, packetOut, "\n");
printf("[FFMPEG_TEST]Frame = %05d, PacketOutSize = %d", frameIndex, packetOut->size);
av_frame_free(&frameIn); // FREE_FRAME_IN
av_packet_unref(packetIn); // FREE_PACKET_IN
av_packet_unref(packetOut); // FREE_PACKET_OUT
closeCodecContextJPEG();
return 0;
}
void catchVideoStart()
{
if (start() < 0) return;
int frameIndex = 0;
while (1)
{
catchJPEGFrame(frameIndex);
usleep((unsigned int)(1000.0f / 60.0f));
++frameIndex;
}
stop();
}
int main(int argc, char **argv)
{
catchVideoStart();
}