ffmpeg实战将视频转换为图片

简介: ffmpeg实战将视频转换为图片

视频可以是文件或者从服务器拉取的流,流程如下:打开封装->解码->yuv->rgb->写入文件

在yuv->rgb时是用的ffmpeg的像素格式转换函数:

sws_scale(img_convert_ctx, frame->data, frame->linesize,
          0, h, pFrameRGB->data, pFrameRGB->linesize);


具体实现如下:

#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <libavformat/avformat.h>
#include <libavcodec/avcodec.h>
#include <libswscale/swscale.h>
#define INBUF_SIZE 4096
#define WORD uint16_t
#define DWORD uint32_t
#define LONG int32_t
#pragma pack(2)
typedef struct tagBITMAPFILEHEADER {
  WORD  bfType;
  DWORD bfSize;
  WORD  bfReserved1;
  WORD  bfReserved2;
  DWORD bfOffBits;
} BITMAPFILEHEADER, *PBITMAPFILEHEADER;
typedef struct tagBITMAPINFOHEADER {
  DWORD biSize;
  LONG  biWidth;
  LONG  biHeight;
  WORD  biPlanes;
  WORD  biBitCount;
  DWORD biCompression;
  DWORD biSizeImage;
  LONG  biXPelsPerMeter;
  LONG  biYPelsPerMeter;
  DWORD biClrUsed;
  DWORD biClrImportant;
} BITMAPINFOHEADER, *PBITMAPINFOHEADER;
void saveBMP(struct SwsContext *img_convert_ctx, AVFrame *frame, char *filename)
{
    //1 先进行转换,  YUV420=>RGB24:
    int w = frame->width;
    int h = frame->height;
    int numBytes=avpicture_get_size(AV_PIX_FMT_BGR24, w, h);
    uint8_t *buffer=(uint8_t *)av_malloc(numBytes*sizeof(uint8_t));
    AVFrame *pFrameRGB = av_frame_alloc();
     /* buffer is going to be written to rawvideo file, no alignment */
    /*
    if (av_image_alloc(pFrameRGB->data, pFrameRGB->linesize,
                              w, h, AV_PIX_FMT_BGR24, pix_fmt, 1) < 0) {
        fprintf(stderr, "Could not allocate destination image\n");
        exit(1);
    }
    */
    avpicture_fill((AVPicture *)pFrameRGB, buffer, AV_PIX_FMT_BGR24, w, h);
    sws_scale(img_convert_ctx, frame->data, frame->linesize,
              0, h, pFrameRGB->data, pFrameRGB->linesize);
    //2 构造 BITMAPINFOHEADER
    BITMAPINFOHEADER header;
    header.biSize = sizeof(BITMAPINFOHEADER);
    header.biWidth = w;
    header.biHeight = h*(-1);
    header.biBitCount = 24;
    header.biCompression = 0;
    header.biSizeImage = 0;
    header.biClrImportant = 0;
    header.biClrUsed = 0;
    header.biXPelsPerMeter = 0;
    header.biYPelsPerMeter = 0;
    header.biPlanes = 1;
    //3 构造文件头
    BITMAPFILEHEADER bmpFileHeader = {0,};
    //HANDLE hFile = NULL;
    DWORD dwTotalWriten = 0;
    DWORD dwWriten;
    bmpFileHeader.bfType = 0x4d42; //'BM';
    bmpFileHeader.bfSize = sizeof(BITMAPFILEHEADER) + sizeof(BITMAPINFOHEADER)+ numBytes;
    bmpFileHeader.bfOffBits=sizeof(BITMAPFILEHEADER)+sizeof(BITMAPINFOHEADER);
    FILE* pf = fopen(filename, "wb");
    fwrite(&bmpFileHeader, sizeof(BITMAPFILEHEADER), 1, pf);
    fwrite(&header, sizeof(BITMAPINFOHEADER), 1, pf);
    fwrite(pFrameRGB->data[0], 1, numBytes, pf);
    fclose(pf);
    //释放资源
    //av_free(buffer);
    av_freep(&pFrameRGB[0]);
    av_free(pFrameRGB);
}
static void pgm_save(unsigned char *buf, int wrap, int xsize, int ysize,
                     char *filename)
{
    FILE *f;
    int i;
    f = fopen(filename,"w");
    fprintf(f, "P5\n%d %d\n%d\n", xsize, ysize, 255);
    for (i = 0; i < ysize; i++)
        fwrite(buf + i * wrap, 1, xsize, f);
    fclose(f);
}
static int decode_write_frame(const char *outfilename, AVCodecContext *avctx,
                              struct SwsContext *img_convert_ctx, AVFrame *frame, int *frame_count, AVPacket *pkt, int last)
{
    int len, got_frame;
    char buf[1024];
    len = avcodec_decode_video2(avctx, frame, &got_frame, pkt);
    if (len < 0) {
        fprintf(stderr, "Error while decoding frame %d\n", *frame_count);
        return len;
    }
    if (got_frame) {
        printf("Saving %sframe %3d\n", last ? "last " : "", *frame_count);
        fflush(stdout);
        /* the picture is allocated by the decoder, no need to free it */
        snprintf(buf, sizeof(buf), "%s-%d.bmp", outfilename, *frame_count);
        /*
        pgm_save(frame->data[0], frame->linesize[0],
                 frame->width, frame->height, buf);
        */
        saveBMP(img_convert_ctx, frame, buf);
        (*frame_count)++;
    }
    if (pkt->data) {
        pkt->size -= len;
        pkt->data += len;
    }
    return 0;
}
int main(int argc, char **argv)
{
    int ret;
    FILE *f;
    const char *filename, *outfilename;
    AVFormatContext *fmt_ctx = NULL;
    const AVCodec *codec;
    AVCodecContext *c= NULL;
    AVStream *st = NULL;
    int stream_index;
    int frame_count;
    AVFrame *frame;
    struct SwsContext *img_convert_ctx;
    //uint8_t inbuf[INBUF_SIZE + AV_INPUT_BUFFER_PADDING_SIZE];
    AVPacket avpkt;
    if (argc <= 2) {
        fprintf(stderr, "Usage: %s <input file> <output file>\n", argv[0]);
        exit(0);
    }
    filename    = argv[1];
    outfilename = argv[2];
    /* register all formats and codecs */
    av_register_all();
    /* open input file, and allocate format context */
    if (avformat_open_input(&fmt_ctx, filename, NULL, NULL) < 0) {
        fprintf(stderr, "Could not open source file %s\n", filename);
        exit(1);
    }
    /* retrieve stream information */
    if (avformat_find_stream_info(fmt_ctx, NULL) < 0) {
        fprintf(stderr, "Could not find stream information\n");
        exit(1);
    }
    /* dump input information to stderr */
    av_dump_format(fmt_ctx, 0, filename, 0);
    av_init_packet(&avpkt);
    /* set end of buffer to 0 (this ensures that no overreading happens for damaged MPEG streams) */
    //memset(inbuf + INBUF_SIZE, 0, AV_INPUT_BUFFER_PADDING_SIZE);
    //
    ret = av_find_best_stream(fmt_ctx, AVMEDIA_TYPE_VIDEO, -1, -1, NULL, 0);
    if (ret < 0) {
        fprintf(stderr, "Could not find %s stream in input file '%s'\n",
                av_get_media_type_string(AVMEDIA_TYPE_VIDEO), filename);
        return ret;
    }
    stream_index = ret;
    st = fmt_ctx->streams[stream_index];
    /* find decoder for the stream */
    codec = avcodec_find_decoder(st->codecpar->codec_id);
    if (!codec) {
        fprintf(stderr, "Failed to find %s codec\n",
                av_get_media_type_string(AVMEDIA_TYPE_VIDEO));
        return AVERROR(EINVAL);
    }
    /* find the MPEG-1 video decoder */
    /*
    codec = avcodec_find_decoder(AV_CODEC_ID_MPEG1VIDEO);
    if (!codec) {
        fprintf(stderr, "Codec not found\n");
        exit(1);
    }
    */
    c = avcodec_alloc_context3(NULL);
    if (!c) {
        fprintf(stderr, "Could not allocate video codec context\n");
        exit(1);
    }
    /* Copy codec parameters from input stream to output codec context */
    if ((ret = avcodec_parameters_to_context(c, st->codecpar)) < 0) {
        fprintf(stderr, "Failed to copy %s codec parameters to decoder context\n",
                av_get_media_type_string(AVMEDIA_TYPE_VIDEO));
        return ret;
    }
    /*
    if (codec->capabilities & AV_CODEC_CAP_TRUNCATED)
        c->flags |= AV_CODEC_FLAG_TRUNCATED; // we do not send complete frames
    */
    /* For some codecs, such as msmpeg4 and mpeg4, width and height
       MUST be initialized there because this information is not
       available in the bitstream. */
    /* open it */
    if (avcodec_open2(c, codec, NULL) < 0) {
        fprintf(stderr, "Could not open codec\n");
        exit(1);
    }
    /*
    f = fopen(filename, "rb");
    if (!f) {
        fprintf(stderr, "Could not open %s\n", filename);
        exit(1);
    }
    */
    img_convert_ctx = sws_getContext(c->width, c->height,
                                     c->pix_fmt,
                                     c->width, c->height,
                                     AV_PIX_FMT_RGB24,
                                     SWS_BICUBIC, NULL, NULL, NULL);
    if (img_convert_ctx == NULL)
    {
        fprintf(stderr, "Cannot initialize the conversion context\n");
        exit(1);
    }
    frame = av_frame_alloc();
    if (!frame) {
        fprintf(stderr, "Could not allocate video frame\n");
        exit(1);
    }
    frame_count = 0;
    while (av_read_frame(fmt_ctx, &avpkt) >= 0) {
        /*
        avpkt.size = fread(inbuf, 1, INBUF_SIZE, f);
        if (avpkt.size == 0)
            break;
        */
        /* NOTE1: some codecs are stream based (mpegvideo, mpegaudio)
           and this is the only method to use them because you cannot
           know the compressed data size before analysing it.
           BUT some other codecs (msmpeg4, mpeg4) are inherently frame
           based, so you must call them with all the data for one
           frame exactly. You must also initialize 'width' and
           'height' before initializing them. */
        /* NOTE2: some codecs allow the raw parameters (frame size,
           sample rate) to be changed at any frame. We handle this, so
           you should also take care of it */
        /* here, we use a stream based decoder (mpeg1video), so we
           feed decoder and see if it could decode a frame */
        //avpkt.data = inbuf;
        //while (avpkt.size > 0)
        if(avpkt.stream_index == stream_index){
            if (decode_write_frame(outfilename, c, img_convert_ctx, frame, &frame_count, &avpkt, 0) < 0)
                exit(1);
        }
        av_packet_unref(&avpkt);
    }
    /* Some codecs, such as MPEG, transmit the I- and P-frame with a
       latency of one frame. You must do the following to have a
       chance to get the last frame of the video. */
    avpkt.data = NULL;
    avpkt.size = 0;
    decode_write_frame(outfilename, c, img_convert_ctx, frame, &frame_count, &avpkt, 1);
    fclose(f);
    avformat_close_input(&fmt_ctx);
    sws_freeContext(img_convert_ctx);
    avcodec_free_context(&c);
    av_frame_free(&frame);
    return 0;
}


image.png

相关文章
|
3月前
|
XML 编解码 JSON
FFmpeg常用命令讲解及实战二(2)
FFmpeg常用命令讲解及实战二
68 0
|
12天前
|
编解码 Linux
CentOS安装ffmpeg并转码视频为mp4
CentOS安装ffmpeg并转码视频为mp4
|
2月前
|
Python
Python使用ffmpeg下载m3u8拼接为视频
Python使用ffmpeg下载m3u8拼接为视频
105 1
|
6天前
|
机器学习/深度学习 编解码 API
【机器学习】FFmpeg+Whisper:二阶段法视频理解(video-to-text)大模型实战
【机器学习】FFmpeg+Whisper:二阶段法视频理解(video-to-text)大模型实战
18 0
|
2月前
|
Web App开发 安全 Linux
FFmpeg开发笔记(二十六)Linux环境安装ZLMediaKit实现视频推流
《FFmpeg开发实战》书中介绍轻量级流媒体服务器MediaMTX,但其功能有限,不适合生产环境。推荐使用国产开源的ZLMediaKit,它支持多种流媒体协议和音视频编码标准。以下是华为欧拉系统下编译安装ZLMediaKit和FFmpeg的步骤,包括更新依赖、下载源码、配置、编译、安装以及启动MediaServer服务。此外,还提供了通过FFmpeg进行RTSP和RTMP推流,并使用VLC播放器拉流的示例。
115 3
FFmpeg开发笔记(二十六)Linux环境安装ZLMediaKit实现视频推流
|
2月前
|
编解码 Linux 计算机视觉
python 调用ffmpeg使用usb摄像头录制视频,输出h264格式,自动获取摄像头的最佳帧率和最大画面尺寸
使用 Python 调用 FFmpeg 进行 USB 摄像头视频录制,需先确保安装 FFmpeg 和 Python 的 `subprocess` 模块。代码示例展示了如何自动获取摄像头的最佳帧率和最大分辨率,然后录制视频。首先通过 FFmpeg 列出摄像头格式获取信息,解析出帧率和分辨率,选择最优值。之后调用 FFmpeg 命令录制视频,设置帧率、分辨率等参数。注意 `/dev/video0` 是 Linux 的摄像头设备路径,Windows 系统需相应调整。代码中未直接实现自动获取最佳参数,通常需要借助其他库如 OpenCV。
165 3
|
2月前
|
存储 编解码 Linux
rodert教你学FFmpeg实战这一篇就够了 - 音视频处理入门篇
rodert教你学FFmpeg实战这一篇就够了 - 音视频处理入门篇
39 1
|
2月前
|
Linux 开发工具
Linux下视频截取命令 使用【ffmpeg】使用
Linux下视频截取命令 使用【ffmpeg】使用
29 1
|
3月前
|
Web App开发 编解码 vr&ar
使用FFmpeg从音视频处理到流媒体技术的探索和实战应用
使用FFmpeg从音视频处理到流媒体技术的探索和实战应用
161 2
|
3月前
|
编解码 C语言
FFMPEG 获取视频PTS
FFMPEG 获取视频PTS
35 0