如果已经完成FFMPEG录制视频保存到本地的功能,完成RTMP推流只需要修改几行代码即可完成。
推流到RTMP服务器与保存到本地的代码基本相同,主要是输出地址不一样。保存到本地就是本地文件名称,推流到RTMP服务器,就将文件名称换成RTMP服务器地址即可。
完整项目代码下载地址(下载即可编译运行,不懂可以私信): https://download.csdn.net/download/xiaolong1126626497/19323232
主要修改的代码:
filename="rtmp://js.live-send.acg.tv/live-js/?xxxxxxxx" avformat_alloc_output_context2(&oc,nullptr,"flv",filename); //文件名称替换成网络地址 //指定编码器 fmt->video_codec=AV_CODEC_ID_H264; fmt->audio_codec=AV_CODEC_ID_AAC;
FFMPEG保存视频到本地的文章:
https://blog.csdn.net/xiaolong1126626497/article/details/104858499
video_audio_encode.cpp的完整代码:
#include "video_audio_encode.h" class VideoAudioEncode videoaudioencode; Thread_VideoAudioEncode thread_VideoenCode; //视频音频编码的线程 char audio_buffer[AUDIO_BUFFER_MAX_SIZE]; //音频缓存 int audio_buffer_r_count=0; int audio_buffer_w_count=0; //音频相关参数设置 #define AUDIO_RATE_SET 44100 //音频采样率 #define AUDIO_BIT_RATE_SET 64000 //设置码率 #define AUDIO_CHANNEL_SET AV_CH_LAYOUT_MONO //AV_CH_LAYOUT_MONO 单声道 AV_CH_LAYOUT_STEREO 立体声 #define STREAM_DURATION 60*10.0 //录制时间秒单位 #define STREAM_FRAME_RATE 30 /* 30 images/s 图片是多少帧1秒*/ #define STREAM_PIX_FMT AV_PIX_FMT_YUV420P /* default pix_fmt 视频图像格式 */ #define SCALE_FLAGS SWS_BICUBIC // 单个输出AVStream的包装器 typedef struct OutputStream { AVStream *st; AVCodecContext *enc; /* 下一帧的点数*/ int64_t next_pts; int samples_count; AVFrame *frame; AVFrame *tmp_frame; float t, tincr, tincr2; struct SwsContext *sws_ctx; struct SwrContext *swr_ctx; } OutputStream; static int write_frame(AVFormatContext *fmt_ctx, const AVRational *time_base, AVStream *st, AVPacket *pkt) { /*将输出数据包时间戳值从编解码器重新调整为流时基 */ av_packet_rescale_ts(pkt, *time_base, st->time_base); pkt->stream_index = st->index; /*将压缩的帧写入媒体文件*/ return av_interleaved_write_frame(fmt_ctx, pkt); } /* 添加输出流。 */ static void add_stream(OutputStream *ost, AVFormatContext *oc, AVCodec **codec, enum AVCodecID codec_id) { AVCodecContext *c; int i; /* find the encoder */ *codec = avcodec_find_encoder(codec_id); if (!(*codec)) { qDebug("Could not find encoder for '%s'\n",avcodec_get_name(codec_id)); exit(1); } ost->st = avformat_new_stream(oc, nullptr); if (!ost->st) { qDebug("Could not allocate stream\n"); exit(1); } ost->st->id = oc->nb_streams-1; c = avcodec_alloc_context3(*codec); if (!c) { qDebug("Could not alloc an encoding context\n"); exit(1); } ost->enc = c; switch ((*codec)->type) { case AVMEDIA_TYPE_AUDIO: //设置数据格式 //c->sample_fmt = (*codec)->sample_fmts ? (*codec)->sample_fmts[0] : AV_SAMPLE_FMT_FLTP; c->sample_fmt = AV_SAMPLE_FMT_FLTP; c->bit_rate = AUDIO_BIT_RATE_SET; //设置码率 c->sample_rate = AUDIO_RATE_SET; //音频采样率 //编码器支持的采样率 if ((*codec)->supported_samplerates) { c->sample_rate = (*codec)->supported_samplerates[0]; for (i = 0; (*codec)->supported_samplerates[i]; i++) { //判断编码器是否支持 if ((*codec)->supported_samplerates[i] == AUDIO_RATE_SET) { c->sample_rate = AUDIO_RATE_SET; } } } //设置采样通道 c->channels= av_get_channel_layout_nb_channels(c->channel_layout); c->channel_layout = AUDIO_CHANNEL_SET; //AV_CH_LAYOUT_MONO 单声道 AV_CH_LAYOUT_STEREO 立体声 if ((*codec)->channel_layouts) { c->channel_layout = (*codec)->channel_layouts[0]; for (i = 0; (*codec)->channel_layouts[i]; i++) { if ((*codec)->channel_layouts[i] == AUDIO_CHANNEL_SET) { c->channel_layout = AUDIO_CHANNEL_SET; } } } c->channels = av_get_channel_layout_nb_channels(c->channel_layout); ost->st->time_base = (AVRational){ 1, c->sample_rate }; break; case AVMEDIA_TYPE_VIDEO: c->codec_id = codec_id; //码率:影响体积,与体积成正比:码率越大,体积越大;码率越小,体积越小。 c->bit_rate = 400000; //设置码率 400kps /*分辨率必须是2的倍数。 */ c->width = 640; c->height = 480; /*时基:这是基本的时间单位(以秒为单位) *表示其中的帧时间戳。 对于固定fps内容, *时基应为1 / framerate,时间戳增量应为 *等于1。*/ ost->st->time_base = (AVRational){1,STREAM_FRAME_RATE}; c->time_base = ost->st->time_base; c->gop_size = 12; /* 最多每十二帧发射一帧内帧 */ c->pix_fmt = STREAM_PIX_FMT; if(c->codec_id == AV_CODEC_ID_MPEG2VIDEO) { /* 只是为了测试,添加了B帧 */ c->max_b_frames = 2; } if (c->codec_id == AV_CODEC_ID_MPEG1VIDEO) { /*需要避免使用其中一些系数溢出的宏块。 *普通视频不会发生这种情况,因为 *色度平面的运动与亮度平面不匹配。 */ c->mb_decision = 2; } break; default: break; } /* 某些格式希望流头分开。 */ if (oc->oformat->flags & AVFMT_GLOBALHEADER) c->flags |= AV_CODEC_FLAG_GLOBAL_HEADER; } /**************************************************************/ /* audio output */ static AVFrame *alloc_audio_frame(enum AVSampleFormat sample_fmt, uint64_t channel_layout, int sample_rate, int nb_samples) { AVFrame *frame = av_frame_alloc(); int ret; if (!frame) { qDebug("Error allocating an audio frame\n"); exit(1); } frame->format = sample_fmt; frame->channel_layout = channel_layout; frame->sample_rate = sample_rate; frame->nb_samples = nb_samples; if (nb_samples) { ret = av_frame_get_buffer(frame, 0); if (ret < 0) { qDebug("Error allocating an audio buffer\n"); exit(1); } } return frame; } static void open_audio(AVFormatContext *oc, AVCodec *codec, OutputStream *ost, AVDictionary *opt_arg) { AVCodecContext *c; int nb_samples; int ret; AVDictionary *opt = nullptr; c = ost->enc; /* open it */ av_dict_copy(&opt, opt_arg, 0); ret = avcodec_open2(c, codec, &opt); av_dict_free(&opt); if (ret < 0) { qDebug("无法打开音频编解码器\n"); exit(1); } /* 初始化信号发生器 */ ost->t = 0; ost->tincr = 2 * M_PI * 110.0 / c->sample_rate; /* 每秒增加110 Hz的频率 */ ost->tincr2 = 2 * M_PI * 110.0 / c->sample_rate / c->sample_rate; if (c->codec->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE) nb_samples = 10000; else nb_samples = c->frame_size; ost->frame = alloc_audio_frame(c->sample_fmt, c->channel_layout, c->sample_rate, nb_samples); ost->tmp_frame = alloc_audio_frame(AV_SAMPLE_FMT_S16, c->channel_layout, c->sample_rate, nb_samples); /*将流参数复制到多路复用器 */ ret = avcodec_parameters_from_context(ost->st->codecpar, c); if (ret < 0) { qDebug("无法复制流参数\n"); exit(1); } /* 创建重采样器上下文 */ ost->swr_ctx = swr_alloc(); if(!ost->swr_ctx) { qDebug("无法分配重采样器上下文\n"); exit(1); } /* 设定选项 */ av_opt_set_int (ost->swr_ctx, "in_channel_count", c->channels, 0); av_opt_set_int (ost->swr_ctx, "in_sample_rate", c->sample_rate, 0); av_opt_set_sample_fmt(ost->swr_ctx, "in_sample_fmt", AV_SAMPLE_FMT_S16, 0);//带符号16bit av_opt_set_int (ost->swr_ctx, "out_channel_count", c->channels, 0); av_opt_set_int (ost->swr_ctx, "out_sample_rate", c->sample_rate, 0); av_opt_set_sample_fmt(ost->swr_ctx, "out_sample_fmt", c->sample_fmt, 0); qDebug("音频通道数=%d\n",c->channels); qDebug("音频采样率=%d\n",c->sample_rate); /* 初始化重采样上下文 */ if ((ret = swr_init(ost->swr_ctx)) < 0) { qDebug("无法初始化重采样上下文\n"); exit(1); } } /*准备一个'frame_size'样本的16位虚拟音频帧,然后'nb_channels'频道。 */ static AVFrame *get_audio_frame(OutputStream *ost) { AVFrame *frame = ost->tmp_frame; int j, i, v; int16_t *q = (int16_t*)frame->data[0]; /* 检查我们是否要生成更多帧----用于判断是否结束*/ if(av_compare_ts(ost->next_pts, ost->enc->time_base, STREAM_DURATION, (AVRational){ 1, 1 }) >= 0) return nullptr; // qDebug("frame->nb_samples=%d\n",frame->nb_samples); //1024 // qDebug("ost->enc->channels=%d\n",ost->enc->channels); //消费者 // videoaudioencode.audio_encode_mutex.lock(); // videoaudioencode.audio_encode_Condition.wait(&videoaudioencode.audio_encode_mutex); // memcpy(audio_buffer_temp,audio_buffer,sizeof(audio_buffer)); // videoaudioencode.audio_encode_mutex.unlock(); #if 1 if(audio_buffer_r_count>=AUDIO_BUFFER_MAX_SIZE)audio_buffer_r_count=0; //音频数据赋值 for(j = 0; j<frame->nb_samples; j++) //nb_samples: 此帧描述的音频样本数(每个通道) { for(i=0;i<ost->enc->channels;i++) //channels:音频通道数 { *q++ = audio_buffer[j+audio_buffer_r_count]; //音频数据 } ost->t += ost->tincr; ost->tincr += ost->tincr2; } frame->pts = ost->next_pts; ost->next_pts += frame->nb_samples; //qDebug()<<"audio_buffer_r_count="<<audio_buffer_r_count; audio_buffer_r_count+=1024; #else for(j = 0; j<frame->nb_samples; j++) //nb_samples: 此帧描述的音频样本数(每个通道) { v=(int)(sin(ost->t) * 1000); for(i=0;i<ost->enc->channels;i++) //channels:音频通道数 { *q++ = v; //音频数据 } ost->t += ost->tincr; ost->tincr += ost->tincr2; } frame->pts = ost->next_pts; ost->next_pts += frame->nb_samples; #endif return frame; } /* *编码一个音频帧并将其发送到多路复用器 *编码完成后返回1,否则返回0 */ static int write_audio_frame(AVFormatContext *oc, OutputStream *ost) { AVCodecContext *c; AVPacket pkt = { 0 }; // data and size must be 0; AVFrame *frame; int ret; int got_packet; int dst_nb_samples; av_init_packet(&pkt); c = ost->enc; frame = get_audio_frame(ost); if(frame) { /*使用重采样器将样本从本机格式转换为目标编解码器格式*/ /*计算样本的目标数量*/ dst_nb_samples = av_rescale_rnd(swr_get_delay(ost->swr_ctx, c->sample_rate) + frame->nb_samples, c->sample_rate, c->sample_rate, AV_ROUND_UP); av_assert0(dst_nb_samples == frame->nb_samples); /*当我们将帧传递给编码器时,它可能会保留对它的引用 *内部; *确保我们不会在这里覆盖它 */ ret = av_frame_make_writable(ost->frame); if (ret < 0) exit(1); /*转换为目标格式 */ ret = swr_convert(ost->swr_ctx, ost->frame->data, dst_nb_samples, (const uint8_t **)frame->data, frame->nb_samples); if (ret < 0) { qDebug("Error while converting\n"); exit(1); } frame = ost->frame; frame->pts = av_rescale_q(ost->samples_count, (AVRational){1, c->sample_rate}, c->time_base); ost->samples_count += dst_nb_samples; } ret = avcodec_encode_audio2(c, &pkt, frame, &got_packet); if (ret < 0) { qDebug("Error encoding audio frame\n"); exit(1); } if (got_packet) { ret = write_frame(oc, &c->time_base, ost->st, &pkt); if (ret < 0) { qDebug("Error while writing audio frame\n"); exit(1); } } return (frame || got_packet) ? 0 : 1; } /**************************************************************/ /* video output */ static AVFrame *alloc_picture(enum AVPixelFormat pix_fmt, int width, int height) { AVFrame *picture; int ret; picture = av_frame_alloc(); if (!picture) return nullptr; picture->format = pix_fmt; picture->width = width; picture->height = height; /* allocate the buffers for the frame data */ ret = av_frame_get_buffer(picture, 32); if(ret < 0) { qDebug("Could not allocate frame data.\n"); exit(1); } return picture; } static void open_video(AVFormatContext *oc, AVCodec *codec, OutputStream *ost, AVDictionary *opt_arg) { int ret; AVCodecContext *c = ost->enc; AVDictionary *opt = nullptr; av_dict_copy(&opt, opt_arg, 0); /* open the codec */ ret = avcodec_open2(c, codec, &opt); av_dict_free(&opt); if (ret < 0) { qDebug("Could not open video codec\n"); exit(1); } /* allocate and init a re-usable frame */ ost->frame = alloc_picture(c->pix_fmt, c->width, c->height); if (!ost->frame) { qDebug("Could not allocate video frame\n"); exit(1); } ost->tmp_frame = nullptr; /* 将流参数复制到多路复用器 */ ret = avcodec_parameters_from_context(ost->st->codecpar, c); if (ret < 0) { qDebug("Could not copy the stream parameters\n"); exit(1); } } /* 准备图像数据 YUV422占用内存空间 = w * h * 2 YUV420占用内存空间 = width*height*3/2 */ static int fill_yuv_image(AVFrame *pict, int frame_index,int width, int height) { unsigned int y_size=width*height; //消费者 while(videoaudioencode.void_data_queue.isEmpty()) { QThread::msleep(10); if(videoaudioencode.run_flag==0) //停止编码 { return -1; } } videoaudioencode.video_encode_mutex.lock(); QByteArray byte=videoaudioencode.void_data_queue.dequeue(); //qDebug()<<"out="<<videoaudioencode.void_data_queue.size(); videoaudioencode.video_encode_mutex.unlock(); //将YUV数据拷贝到缓冲区 y_size=wXh memcpy(pict->data[0],byte.data(),y_size); memcpy(pict->data[1],byte.data()+y_size,y_size/4); memcpy(pict->data[2],byte.data()+y_size+y_size/4,y_size/4); return 0; } static AVFrame *get_video_frame(OutputStream *ost) { AVCodecContext *c = ost->enc; /* 检查我们是否要生成更多帧---判断是否结束录制 */ if(av_compare_ts(ost->next_pts, c->time_base,STREAM_DURATION, (AVRational){ 1, 1 }) >= 0) return nullptr; /*当我们将帧传递给编码器时,它可能会保留对它的引用 *内部; 确保我们在这里不覆盖它*/ if (av_frame_make_writable(ost->frame) < 0) exit(1); //制作虚拟图像 //DTS(解码时间戳)和PTS(显示时间戳) int err=fill_yuv_image(ost->frame, ost->next_pts, c->width, c->height); if(err)return nullptr; ost->frame->pts = ost->next_pts++; return ost->frame; } /* *编码一个视频帧并将其发送到多路复用器 *编码完成后返回1,否则返回0 */ static int write_video_frame(AVFormatContext *oc, OutputStream *ost) { int ret; AVCodecContext *c; AVFrame *frame; int got_packet = 0; AVPacket pkt = {0}; c=ost->enc; //获取一帧数据 frame = get_video_frame(ost); if(frame==nullptr)return 1; av_init_packet(&pkt); /* 编码图像 */ ret=avcodec_encode_video2(c, &pkt, frame, &got_packet); if(ret < 0) { qDebug("Error encoding video frame\n"); exit(1); } if(got_packet) { ret=write_frame(oc, &c->time_base, ost->st, &pkt); } else { ret = 0; } if(ret < 0) { qDebug("Error while writing video frame\n"); exit(1); } return (frame || got_packet) ? 0 : 1; } static void close_stream(AVFormatContext *oc, OutputStream *ost) { avcodec_free_context(&ost->enc); av_frame_free(&ost->frame); av_frame_free(&ost->tmp_frame); sws_freeContext(ost->sws_ctx); swr_free(&ost->swr_ctx); } int Thread_VideoAudioEncode::StartUp_VideoAudioEncode() { OutputStream video_st = {0}, audio_st = { 0 }; AVOutputFormat *fmt; AVFormatContext *oc; AVCodec *audio_codec, *video_codec; int ret; int have_video = 0, have_audio = 0; int encode_video = 0, encode_audio = 0; AVDictionary *opt = nullptr; QDateTime dateTime(QDateTime::currentDateTime()); //时间效果: 2020-03-05 16:25::04 周四 QString qStr=""; qStr+=SAVE_FILE_PATH; //Android 手机的照相机文件夹 qStr+=dateTime.toString("yyyy-MM-dd-hh-mm-ss"); qStr+=".mp4"; qStr="rtmp://js.live-send.acg.tv/live-js/?streamname=live_68130189_71037877&key=b95d4cfda0c196518f104839fe5e7573"; char filename[500]; strcpy(filename,qStr.toLatin1().data()); emit LogSend(tr("当前的文件名称:%1\n").arg(filename)); /* 分配输出环境 */ //avformat_alloc_output_context2(&oc,nullptr,nullptr,filename); //存放到文件 avformat_alloc_output_context2(&oc,nullptr,"flv",filename); //发布到网络 if(!oc) { emit LogSend("code error.\n"); return -1; } fmt=oc->oformat; fmt->video_codec=AV_CODEC_ID_H264; fmt->audio_codec=AV_CODEC_ID_AAC; /*使用默认格式的编解码器添加音频和视频流,初始化编解码器。 */ if(fmt->video_codec != AV_CODEC_ID_NONE) { add_stream(&video_st,oc,&video_codec,fmt->video_codec); have_video = 1; encode_video = 1; } if(fmt->audio_codec != AV_CODEC_ID_NONE) { add_stream(&audio_st, oc, &audio_codec, fmt->audio_codec); have_audio = 1; encode_audio = 1; } /*现在已经设置了所有参数,可以打开音频视频编解码器,并分配必要的编码缓冲区。 */ if (have_video) open_video(oc, video_codec, &video_st, opt); if (have_audio) open_audio(oc, audio_codec, &audio_st, opt); av_dump_format(oc, 0, filename, 1); /* 打开输出文件(如果需要) */ if(!(fmt->flags & AVFMT_NOFILE)) { qDebug()<<"存放视频到文件.\n"; ret = avio_open(&oc->pb, filename, AVIO_FLAG_WRITE); if (ret < 0) { qDebug("Could not open '%s'\n",filename); return -1; } } /* 编写流头(如果有)*/ ret=avformat_write_header(oc,&opt); if(ret<0) { qDebug("Error occurred when opening output file\n"); return -1; } while(encode_video || encode_audio) { /* 选择要编码的流*/ if(encode_video &&(!encode_audio || av_compare_ts(video_st.next_pts, video_st.enc->time_base,audio_st.next_pts, audio_st.enc->time_base) <= 0)) { QElapsedTimer mstimer; mstimer.start(); encode_video = !write_video_frame(oc,&video_st); double time = (double)mstimer.nsecsElapsed()/(double)1000000; qDebug() <<"video_encode:"<< time<<"ms"; } else { QElapsedTimer mstimer; mstimer.start(); encode_audio = !write_audio_frame(oc,&audio_st); double time = (double)mstimer.nsecsElapsed()/(double)1000000; qDebug() <<"audio_encode:"<< time<<"ms"; } } /*编写预告片(如果有)。 预告片必须在之前写好 *关闭在编写标头时打开的CodecContext; 除此以外 * av_write_trailer()可能会尝试使用已释放的内存 * av_codec_close()。 */ av_write_trailer(oc); /* Close each codec. */ if (have_video) close_stream(oc, &video_st); if (have_audio) close_stream(oc, &audio_st); if (!(fmt->flags & AVFMT_NOFILE)) /* Close the output file. */ avio_closep(&oc->pb); /* free the stream */ avformat_free_context(oc); qDebug("编码完成.线程退出.\n"); return 0; } //编码 void Thread_VideoAudioEncode ::run() { while(1) { qDebug()<<"编码线程开始运行."; audio_buffer_r_count=0; audio_buffer_w_count=0; StartUp_VideoAudioEncode(); //启动视频音频编码 if(videoaudioencode.run_flag==0) //判断是否停止编码 { break; } } }