c语言编程笔录

首页 >   > 笔记大全

笔记大全

利用c opencv ffmpeg实现图片序列化功能

更新时间:2023-10-28

前言

C++和OpenCV是广泛使用的计算机视觉框架和编程语言,在处理图像和视频方面提供强大的支持。针对序列化,我们将结合使用FFmpeg将多个图片合成为一个视频,以及OpenCV中的序列化模块来实现图片序列化的功能。

处理图像序列化

我们可以利用OpenCV中的序列化模块来做到图像序列化,下面是序列化一张图片的示例代码:

#include 

using namespace cv;

int main()
{
  Mat img = imread("image.jpg");
  FileStorage fs("image.yml", FileStorage::WRITE);
  fs << "img" << img;
  fs.release();
  
  return 0;
}

上面的代码会将一张名为image.jpg的图片序列化到名为image.yml的文件中。这里用到了OpenCV中的FileStorage类,用于生成XML或YAML格式的文件。将图片写入到文件中需要将Mat类型的图像数据写入,同时需要指定key的名字,这里我们指定为“img”。

使用FFmpeg实现多张图片的合成为一个视频文件

我们可以使用FFmpeg将多张图片合成为一个视频文件,下面是一个C++实现的示例代码:

#include 

extern "C" {
#include 
#include 
#include 
#include 
}

int main(int argc, char* argv[])
{
  AVFormatContext* formatCtx = nullptr;
  AVCodecContext* codecCtx = nullptr;
  AVCodec* codec = nullptr;
  AVStream* stream = nullptr;
  AVPacket packet;
  int ret = 0, frameCount = 0;

  const char* outFilename = argv[1];
  const int width = 640, height = 480;
  const int fps = 25, timebase = 1;

  ret = avformat_alloc_output_context2(&formatCtx, nullptr, nullptr, outFilename);
  if (ret < 0)
  {
    std::cout << "Could not allocate output format context." << std::endl;
    return -1;
  }

  codec = avcodec_find_encoder(formatCtx->oformat->video_codec);
  if (!codec)
  {
    std::cout << "Could not find encoder." << std::endl;
    return -1;
  }

  stream = avformat_new_stream(formatCtx, codec);
  if (!stream)
  {
    std::cout << "Could not create new stream." << std::endl;
    return -1;
  }

  codecCtx = avcodec_alloc_context3(codec);
  codecCtx->width = width;
  codecCtx->height = height;
  codecCtx->bit_rate = 1000000;
  codecCtx->pix_fmt = AV_PIX_FMT_YUV420P;
  codecCtx->time_base = (AVRational){timebase,fps};
  codecCtx->framerate = (AVRational){fps, timebase};

  ret = avcodec_open2(codecCtx, codec, nullptr);
  if (ret < 0)
  {
    std::cout << "Could not open codec." << std::endl;
    return -1;
  }

  ret = avformat_write_header(formatCtx, nullptr);
  if (ret < 0)
  {
    std::cout << "Could not write header." << std::endl;
    return -1;
  }

  while (true)
  {
    std::string filename = std::string("frame_") + std::to_string(frameCount++) + ".jpg";
    AVFrame* frame = av_frame_alloc();
    frame->format = codecCtx->pix_fmt;
    frame->width = codecCtx->width;
    frame->height = codecCtx->height;
    ret = av_frame_get_buffer(frame, 0);
    if (ret < 0)
    {
      std::cout << "Could not allocate video frame." << std::endl;
      break;
    }

    ret = av_image_fill_arrays(frame->data, frame->linesize, nullptr, codecCtx->pix_fmt, codecCtx->width, codecCtx->height, 1);
    if (ret < 0)
    {
      av_frame_unref(frame);
      std::cout << "Could not fill image arrays." << std::endl;
      break;
    }

    AVCodecParameters* codecpar = stream->codecpar;
    codecpar->width = codecCtx->width;
    codecpar->height = codecCtx->height;
    codecpar->format = codecCtx->pix_fmt;

    std::cout << "Reading frame #" << frameCount << ": " << filename << std::endl;

    Mat img = imread(filename, IMREAD_COLOR);
    Mat imgResized;
    resize(img, imgResized, Size(codecCtx->width, codecCtx->height));
    Mat imgYuv;
    cvtColor(imgResized, imgYuv, COLOR_BGR2YUV_I420);

    int nbytes = av_image_get_buffer_size(codecCtx->pix_fmt, codecCtx->width, codecCtx->height, 1);
    uint8_t* buffer = (uint8_t*)av_malloc(nbytes);
    av_image_fill_arrays(frame->data, frame->linesize, buffer, codecCtx->pix_fmt, codecCtx->width, codecCtx->height, 1);
    memcpy(frame->data[0], imgYuv.data, nbytes);

    frame->pts = frameCount;
    ret = avcodec_send_frame(codecCtx, frame);
    av_frame_unref(frame);
    if (ret < 0)
    {
      std::cout << "Error sending a frame to the codec." << std::endl;
      break;
    }

    while (ret >= 0)
    {
      ret = avcodec_receive_packet(codecCtx, &packet);
      if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
      {
        break;
      }
      else if (ret < 0)
      {
        std::cout << "Error receiving packet from codec." << std::endl;
        break;
      }

      av_packet_rescale_ts(&packet, codecCtx->time_base, stream->time_base);
      packet.stream_index = stream->index;

      ret = av_interleaved_write_frame(formatCtx, &packet);
      av_packet_unref(&packet);
      if (ret < 0)
      {
        std::cout << "Error during writing to output file." << std::endl;
        return -1;
      }
    }

    if (!img.data)
      break;
  }

  av_write_trailer(formatCtx); 
  avcodec_free_context(&codecCtx);
  avformat_free_context(formatCtx);

  return 0;
}

上面的代码使用了FFmpeg的libavcodec、libavformat、libavutil、libswscale库来进行视频编解码和生成。这里我们使用H.264编码器来生成视频,同时使用YUV420P格式来进行像素数据的传输。需要注意的是,创建并添加视频流到输出格式上以及设置相关参数后,每次循环需要将图片转化到YUV格式并填充帧数据后写入视频流。使用avcodec_send_frame()函数将帧数据发送给编码器后,使用avcodec_receive_packet()函数获取编码后的数据,并将数据写入到视频文件中。

图片序列化

我们可以结合上面的两个模块来实现小片段的限制时间的图片序列化,下面是一个使用OpenCV和FFmpeg序列化一小段时间内一系列图片的示例代码:

#include 

extern "C" {
#include 
#include 
#include 
#include 
}

using namespace cv;

int main()
{
  AVFormatContext* formatCtx = nullptr;
  AVCodecContext* codecCtx = nullptr;
  AVCodec* codec = nullptr;
  AVStream* stream = nullptr;
  AVPacket packet;
  int ret = 0, frameCount = 0;

  const char* outFilename = "out.mp4";
  const int width = 640, height = 480;
  const int fps = 25, timebase = 1;

  ret = avformat_alloc_output_context2(&formatCtx, nullptr, nullptr, outFilename);
  if (ret < 0)
  {
    std::cout << "Could not allocate output format context." << std::endl;
    return -1;
  }

  codec = avcodec_find_encoder(formatCtx->oformat->video_codec);
  if (!codec)
  {
    std::cout << "Could not find encoder." << std::endl;
    return -1;
  }

  stream = avformat_new_stream(formatCtx, codec);
  if (!stream)
  {
    std::cout << "Could not create new stream." << std::endl;
    return -1;
  }

  codecCtx = avcodec_alloc_context3(codec);
  codecCtx->width = width;
  codecCtx->height = height;
  codecCtx->bit_rate = 1000000;
  codecCtx->pix_fmt = AV_PIX_FMT_YUV420P;
  codecCtx->time_base = (AVRational){timebase,fps};
  codecCtx->framerate = (AVRational){fps, timebase};

  AVCodecParameters* codecpar = stream->codecpar;
  codecpar->width = codecCtx->width;
  codecpar->height = codecCtx->height;
  codecpar->format = codecCtx->pix_fmt;

  ret = avcodec_open2(codecCtx, codec, nullptr);
  if (ret < 0)
  {
    std::cout << "Could not open codec." << std::endl;
    return -1;
  }

  ret = avformat_write_header(formatCtx, nullptr);
  if (ret < 0)
  {
    std::cout << "Could not write header." << std::endl;
    return -1;
  }

  double duration = 5.0;
  double fpsInterval = 1.0 / fps;
  double currTime = 0;
  int currFrame = 0;

  while (currTime < duration)
  {
    std::string filename = std::string("frame_") + std::to_string(currFrame++) + ".jpg";
    AVFrame* frame = av_frame_alloc();
    frame->format = codecCtx->pix_fmt;
    frame->width = codecCtx->width;
    frame->height = codecCtx->height;
    ret = av_frame_get_buffer(frame, 0);
    if (ret < 0)
    {
      std::cout << "Could not allocate video frame." << std::endl;
      break;
    }

    ret = av_image_fill_arrays(frame->data, frame->linesize, nullptr, codecCtx->pix_fmt, codecCtx->width, codecCtx->height, 1);
    if (ret < 0)
    {
      av_frame_unref(frame);
      std::cout << "Could not fill image arrays." << std::endl;
      break;
    }

    std::cout << "Reading frame #" << currFrame << ": " << filename << std::endl;

    Mat img = imread(filename, IMREAD_COLOR);
    Mat imgResized;
    resize(img, imgResized, Size(codecCtx->width, codecCtx->height));
    Mat imgYuv;
    cvtColor(imgResized, imgYuv, COLOR_BGR2YUV_I420);

    int nbytes = av_image_get_buffer_size(codecCtx->pix_fmt, codecCtx->width, codecCtx->height, 1);
    uint8_t* buffer = (uint8_t*)av_malloc(nbytes);
    av_image_fill_arrays(frame->data, frame->linesize, buffer, codecCtx->pix_fmt, codecCtx->width, codecCtx->height, 1);
    memcpy(frame->data[0], imgYuv.data, nbytes);

    frame->pts = (int64_t)(currTime / fpsInterval);
    currTime = currFrame * fpsInterval;
    ret = avcodec_send_frame(codecCtx, frame);
    av_frame_unref(frame);
    if (ret < 0)
    {
      std::cout << "Error sending a frame to the codec." << std::endl;
      break;
    }

    while (ret >= 0)
    {
      ret = avcodec_receive_packet(codecCtx, &packet);
      if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
      {
        break;
      }
      else if (ret < 0)
      {
        std::cout << "Error receiving packet from codec." << std::endl;
        break;
      }

      av_packet_rescale_ts(&packet, codecCtx->time_base, stream->time_base);
      packet.stream_index = stream->index;

      ret = av_interleaved_write_frame(formatCtx, &packet);
      av_packet_unref(&packet);
      if (ret < 0)
      {
        std::cout << "Error during writing to output file." << std::endl;
        return -1;
      }
    }

    if (!img.data)
      break;
  }

  av_write_trailer(formatCtx);
  avcodec_free_context(&codecCtx);
  avformat_free_context(formatCtx);

  return 0;
}

上面的代码实现了一个小片段的5秒钟内的图片序列化,每秒钟将25张图片写入到视频帧中。在功能实现上,我们可以接收视频文件的数字容器,在一定时段内压缩一定数量的帧数,并在输出文件中生成可视化的视频,该部分使用了相应的AVPacket和AVFrame相关API和FFmpeg库中较多的音视频编码类API。

总结

通过结合OpenCV的序列化模块与FFmpeg实现视频编解码,我们可以方便地完成对多张图片的序列化有效地实现,可以轻松生成实际的多媒体文件或备份单张图片。同时,也可以实现小片段视频序列化输出,满足不同场景下的需求。此外,上面的示例代码只是基于C++的一个简单实现,读者可以结合实际项目的需求进行改进,以满足更多场景中的需求。