1.导入我们所需要的头文件
extern"C" { //SDL//FFMPEG};
2.设置音频最大缓冲区
#define AUDIO_BUFF_MAX_SIZE (uint64_t)(1024 * 10)
3.相关状态
//播放状态enum_PLAY_STATE { //初始化PLAY_INIT=-1, //打开PLAY_OPEN=0, //播放PLAY_PLAY=1, //暂停PLAY_PAUSE=2, //停止PLAY_STOP=3, }; //播放速度enum_PLAY_SPEED { //0.5倍速PLAY_SLOW=0, //正常速度PLAY_NORMAL=1, //二倍速度PLAY_FAST2=2, //四倍数度PLAY_FAST4=4, }; //前进后退状态enum_SEEK_FORWARD { //后退SEEK_BACKWARD=-1, SEEK_NORMAL=0, //前进SEEK_FORWARD=1, };
//解码视频数据structDecodeVideoData { int64_tptsTime; //单位:msAVFrame*frame; }; //解码音频速度structDecodeAudioData { int64_tptsTime; //单位:msintbuffSize; Uint8*buff; };
4.头文件主代码
classDataInfo : publicQObject{ Q_OBJECTpublic: explicitDataInfo(WIdid,QObject*parent=nullptr); ~DataInfo(); SDL_Window*getSDLWind(); voidupdateInfo(AVFormatContext*fCtx, AVCodecContext*vCtx, AVCodecContext*aCtx, SwrContext*sCtx, AVSampleFormatsFmt, intvIndex, intaIndex); AVFormatContext*getFormatCtx(); AVCodecContext*getVideoCtx(); AVCodecContext*getAudioCtx(); SwrContext*getSwrCtx(); AVSampleFormatgetSampleFmt(); intgetSDLFmt(); intgetVideoIndex(); intgetAudioIndex(); int64_tgetLength(); voidsetPlayTime(int64_tptsTime); int64_tgetPlayTime(); voidsetDecodeState(boolisStart); boolgetDecodeState(); voidsetPlayState(intstate); intgetPlayState(); voidsetPlaySpeed(intspeed); intgetPlaySpeed(); voidsetPlayVolume(intvolume); intgetPlayVolume(); voidsetSeekTime(int64_tptsTime); int64_tgetSeekTime(); voidsetSeekForward(intforward); intgetSeekForward(); voidsetSeekFlag(boolflag); boolgetSeekFlag(); voidsetDecodeCache(intsize); voidvideoPush(AVFrame*frame, int64_tptsTime); DecodeVideoDatavideoPop(); boolvideoIsEmpty(); boolvideoIsFull(); voidaudioPush(intbuffSize, Uint8*buff, int64_tptsTime); DecodeAudioDataaudioPop(); DecodeAudioDataaudioFirst(); boolaudioIsEmpty(); boolaudioIsFull(); voiddataListRelease(); voidsetAudioClock(int64_ttime); int64_tgetAudioClock(); private: //结构体,描述了一个窗体对象,表示的是会呈现在设备上一个窗体,所有图像的载体SDL_Window*mSDLWind=nullptr; //AVFormatContext是存储音视频封装格式中包含的信息的结构体,也是FFmpeg中统领全局的结构体,//对文件的封装、编码操作从这里开始。AVFormatContext*mFormatCtx=nullptr; //AVCodecContext是FFmpeg编解码上下文的结构体,而AVCodec是编解码参数的结构体。//AVCodecContex内部有包含AVCodec、AVCodecInternal、AVRational等结构体,//包含AVCodecID、AVMediaType、AVPixelFormat、AVSampleFormat等枚举类型,//包含视频的width、height、framerate、bitrate等关键参数,//包含音频的samplerate、channels等参数。AVCodecContext*mVideoCtx=nullptr; AVCodecContext*mAudioCtx=nullptr; //重采样SwrContext*mSwrCtx=nullptr; /*AVSampleFormat是FFmpeg多媒体处理库中定义的一个枚举类型,用于表示音频样本的数据格式。它定义了不同的样本格式,包括整型和浮点型等。这些格式可以描述音频样本的位深度、通道数以及存储方式。在FFmpeg中,AVSampleFormat枚举类型定义了以下常见的音频样本格式:AV_SAMPLE_FMT_NONE:无效的样本格式AV_SAMPLE_FMT_U8:无符号8位整型AV_SAMPLE_FMT_S16:有符号16位整型AV_SAMPLE_FMT_S32:有符号32位整型AV_SAMPLE_FMT_FLT:32位浮点型AV_SAMPLE_FMT_DBL:64位浮点型AV_SAMPLE_FMT_U8P:无符号8位整型(平面布局)AV_SAMPLE_FMT_S16P:有符号16位整型(平面布局)AV_SAMPLE_FMT_S32P:有符号32位整型(平面布局)AV_SAMPLE_FMT_FLTP:32位浮点型(平面布局)AV_SAMPLE_FMT_DBLP:64位浮点型(平面布局)*/AVSampleFormatmSampleFormat=AV_SAMPLE_FMT_NONE; intmSDLFormat=0; intmVideoIndex=-1; intmAudioIndex=-1; boolmDecodeState=false; //解码状态QMutexmPlayMutex; int64_tmPlayTime=0; //播放位置intmPlayState=PLAY_INIT; //播放状态intmPlaySpeed=PLAY_NORMAL; //播放速度intmPlayVolume=SDL_MIX_MAXVOLUME; //播放音量:0-128QMutexmSeekMutex; int64_tmSeekTime=-1; //跳转播放intmSeekForward=SEEK_NORMAL; //快进快退boolmSeekFlag=false; QMutexmVideoListMutex; QMutexmAudioListMutex; intmDateListSize=30; QList<DecodeVideoData>mVideoList; QList<DecodeAudioData>mAudioList; QMutexmAudioClockMutex; int64_tmAudioClock=0; signals: };
5.构造函数代码
DataInfo::DataInfo(WIdid,QObject*parent) : QObject{parent} { //初始化SDL库SDL_Init(SDL_INIT_AUDIO|SDL_INIT_VIDEO); //从原生窗口创建SDL窗口mSDLWind=SDL_CreateWindowFrom((void*)id); //如果创建成功则显示窗口if (mSDLWind) { SDL_ShowWindow(mSDLWind); } }
6.资源操作
DataInfo::~DataInfo(){ //销毁SDL窗口SDL_DestroyWindow(mSDLWind); //清空所有SDL占用资源,并退出SDL_Quit(); } //获得窗口SDL_Window*DataInfo::getSDLWind() { returnmSDLWind; }
7.更新参数
voidDataInfo::updateInfo(AVFormatContext*fCtx, AVCodecContext*vCtx, AVCodecContext*aCtx, SwrContext*sCtx, AVSampleFormatsFmt, intvIndex, intaIndex) { mFormatCtx=fCtx; mVideoCtx=vCtx; mAudioCtx=aCtx; mSwrCtx=sCtx; mSampleFormat=sFmt; switch (mSampleFormat) { caseAV_SAMPLE_FMT_U8: mSDLFormat=AUDIO_U8; break; caseAV_SAMPLE_FMT_S16: mSDLFormat=AUDIO_S16SYS; break; caseAV_SAMPLE_FMT_S32: mSDLFormat=AUDIO_S32SYS; break; caseAV_SAMPLE_FMT_FLT: mSDLFormat=AUDIO_F32SYS; break; default: mSDLFormat=AUDIO_U8; break; } mVideoIndex=vIndex; mAudioIndex=aIndex; }
8.设置返回设置值
AVFormatContext*DataInfo::getFormatCtx() { returnmFormatCtx; } AVCodecContext*DataInfo::getVideoCtx() { returnmVideoCtx; } AVCodecContext*DataInfo::getAudioCtx() { returnmAudioCtx; } SwrContext*DataInfo::getSwrCtx() { returnmSwrCtx; } AVSampleFormatDataInfo::getSampleFmt() { returnmSampleFormat; } intDataInfo::getSDLFmt() { returnmSDLFormat; } intDataInfo::getVideoIndex() { returnmVideoIndex; } intDataInfo::getAudioIndex() { returnmAudioIndex; } int64_tDataInfo::getLength() { //获得时间基AVRationaltimeBase=mFormatCtx->streams[mVideoIndex]->time_base; //计算视频总长度,毫秒为单位returnmFormatCtx->streams[mVideoIndex]->duration*1000*av_q2d(timeBase); } voidDataInfo::setPlayTime(int64_tptsTime) { mPlayMutex.lock(); mPlayTime=ptsTime; mPlayMutex.unlock(); } int64_tDataInfo::getPlayTime() { mPlayMutex.lock(); int64_ttime=mPlayTime; mPlayMutex.unlock(); returntime; } voidDataInfo::setDecodeState(boolisStart) { mDecodeState=isStart; } boolDataInfo::getDecodeState() { returnmDecodeState; } voidDataInfo::setPlayState(intstate) { mPlayState=state; } intDataInfo::getPlayState(){ returnmPlayState; } voidDataInfo::setPlaySpeed(intspeed) { if (speed==PLAY_SLOW||speed==PLAY_NORMAL||speed==PLAY_FAST2||speed==PLAY_FAST4) mPlaySpeed=speed; } intDataInfo::getPlaySpeed() { returnmPlaySpeed; } voidDataInfo::setPlayVolume(intvolume) { if (volume>=0&&volume<=SDL_MIX_MAXVOLUME) mPlayVolume=volume; } intDataInfo::getPlayVolume() { returnmPlayVolume; } voidDataInfo::setSeekTime(int64_tptsTime) { mSeekTime=ptsTime; } int64_tDataInfo::getSeekTime() { returnmSeekTime; } voidDataInfo::setSeekForward(intforward) { mSeekMutex.lock(); mSeekForward=forward; mSeekMutex.unlock(); } intDataInfo::getSeekForward() { mSeekMutex.lock(); intflag=mSeekForward; mSeekMutex.unlock(); returnflag; } voidDataInfo::setSeekFlag(boolflag) { mSeekMutex.lock(); mSeekFlag=flag; mSeekMutex.unlock(); } boolDataInfo::getSeekFlag() { mSeekMutex.lock(); boolflag=mSeekFlag; mSeekMutex.unlock(); returnflag; } voidDataInfo::setDecodeCache(intsize) { if (size>=10&&size<=100) mDateListSize=size; } voidDataInfo::videoPush(AVFrame*frame, int64_tptsTime) { mVideoListMutex.lock(); DecodeVideoDatadata; data.ptsTime=ptsTime; //Fmpeg库中用于克隆AVFrame对象的函数。AVFrame是FFmpeg中表示视频或音频帧的结构体,av_frame_clone函数可以创建一个新的AVFrame,并将源AVFrame的数据复制到新的AVFrame中data.frame=av_frame_clone(frame); mVideoList.append(data); mVideoListMutex.unlock(); } DecodeVideoDataDataInfo::videoPop() { mVideoListMutex.lock(); DecodeVideoDatadata; data.ptsTime=-1; if (!mVideoList.isEmpty()) data=mVideoList.takeFirst(); mVideoListMutex.unlock(); returndata; } boolDataInfo::videoIsEmpty() { mVideoListMutex.lock(); boolflag=mVideoList.isEmpty(); mVideoListMutex.unlock(); returnflag; } boolDataInfo::videoIsFull() { mVideoListMutex.lock(); boolflag= (mVideoList.size() >=mDateListSize); mVideoListMutex.unlock(); returnflag; } voidDataInfo::audioPush(intbuffSize, Uint8*buff, int64_tptsTime) { mAudioListMutex.lock(); DecodeAudioDatadata; data.ptsTime=ptsTime; data.buffSize=buffSize; data.buff=buff; mAudioList.append(data); mAudioListMutex.unlock(); } DecodeAudioDataDataInfo::audioPop() { mAudioListMutex.lock(); DecodeAudioDatadata; data.ptsTime=-1; if (!mAudioList.isEmpty()) //删除首个元素并获取data=mAudioList.takeFirst(); mAudioListMutex.unlock(); returndata; } DecodeAudioDataDataInfo::audioFirst() { mAudioListMutex.lock(); DecodeAudioDatadata; data.ptsTime=-1; if (!mAudioList.isEmpty()) //获取首个元素data=mAudioList.first(); mAudioListMutex.unlock(); returndata; } boolDataInfo::audioIsEmpty() { mAudioListMutex.lock(); boolflag=mAudioList.isEmpty(); mAudioListMutex.unlock(); returnflag; } boolDataInfo::audioIsFull() { mAudioListMutex.lock(); boolflag= (mAudioList.size() >=mDateListSize*2); mAudioListMutex.unlock(); returnflag; } voidDataInfo::dataListRelease() { mVideoListMutex.lock(); for (auto&data : mVideoList) { av_frame_free(&data.frame); data.frame=nullptr; } mVideoList.clear(); mVideoListMutex.unlock(); mAudioListMutex.lock(); for (auto&data : mAudioList) { //av_free 是FFmpeg库中的一个函数,用于释放AVCodecContext、AVFormatContext//和其他相关结构体的内存。它可以在不需要这些结构体时,主动释放它们所占用的内存,//以便节省资源并避免内存泄漏。使用 av_free 函数时,需要确保传入的指针有效,//并且已经完成了相应的初始化和使用av_free(data.buff); data.buff=nullptr; } mAudioList.clear(); mAudioListMutex.unlock(); } voidDataInfo::setAudioClock(int64_ttime) { mAudioClockMutex.lock(); mAudioClock=time; mAudioClockMutex.unlock(); } int64_tDataInfo::getAudioClock() { mAudioClockMutex.lock(); int64_ttime=mAudioClock; mAudioClockMutex.unlock(); returntime; }