欢迎来到尧图网

客户服务 关于我们

您的位置:首页 > 教育 > 锐评 > linux c++ ffmpeg推流

linux c++ ffmpeg推流

2025/11/11 16:22:50 来源:https://blog.csdn.net/jbjhzstsl/article/details/144714397  浏览:    关键词:linux c++ ffmpeg推流

一.ffmpeg的推流流程

1. 初始化及配置参数,全局头信息

1.1 初始化FFmpeg的网络模块。
1.2 创建输出上下文,指定H.264编码器。
avformat_alloc_output_context2(&fmt_ctx, nullptr, "rtsp", rtsp_url.c_str())
codec = avcodec_find_encoder(AV_CODEC_ID_H264);
video_stream = avformat_new_stream(fmt_ctx, codec);
codec_ctx = avcodec_alloc_context3(codec);
1.3 配置编码器参数以及格式上下文。
  codec_ctx->codec_id = codec->id;codec_ctx->codec_type = AVMEDIA_TYPE_VIDEO;codec_ctx->width = width;codec_ctx->height = height;codec_ctx->pix_fmt = AV_PIX_FMT_YUV420P;// codec_ctx->pix_fmt = AV_PIX_FMT_YUYV422;codec_ctx->time_base = {1, fps};codec_ctx->gop_size = 12;
avcodec_open2(codec_ctx, codec, nullptr)// 将编码器上下文的参数复制到流的编码参数中avcodec_parameters_from_context(video_stream->codecpar, codec_ctx);
1.4 全局信息置于流开头,将格式上下文的头信息写入输出流,设置视频流参数
  if (fmt_ctx->oformat->flags & AVFMT_GLOBALHEADER) {codec_ctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;}av_opt_set(fmt_ctx->priv_data, "rtsp_transport", "tcp", 0);  // 使用TCP传输av_opt_set(fmt_ctx->priv_data, "max_delay", "500000",0); 
avformat_write_header(fmt_ctx, nullptr)
1.5 申请frame用于后续推送,初始化用于颜色格式转换的SWS上下文。
av_frame = av_frame_alloc();
av_frame_get_buffer(av_frame, 32) sws_ctx_422_to_420 =sws_getContext(width, height, AV_PIX_FMT_YUYV422,  // 源格式:YUV422width, height, AV_PIX_FMT_YUV420P,  // 目标格式:YUV420pSWS_BICUBIC,  // 缩放算法:可以选择其他算法,比如// SWS_FAST_BILINEAR、SWS_BICUBIC 等nullptr, nullptr, nullptr);

2. 编码及推送一帧数据,计算pts

  // 设置PTS,确保与时间基匹配av_frame->pts =av_rescale_q(frame_count++, (AVRational){1, 25}, video_stream->time_base);avcodec_send_frame(codec_ctx, av_frame) av_init_packet(&pkt);avcodec_receive_packet(codec_ctx, &pkt)av_interleaved_write_frame(fmt_ctx, &pkt)av_packet_unref(&pkt);

二.需要注意的点

  1. avio_open函数不需要调用,rtmp有flv协议,但是rtsp并没有规定具体协议,在申请输出控制上下文时,ffmpeg将自动创建该io上下文。解答
  2. 设置PTS,确保与时间基匹配
  3. AVPacket的释放 av_packet_unref

三.后续改进

四.源码

#ifndef FFMPEGRTSPSTREAMER_H
#define FFMPEGRTSPSTREAMER_H#pragma onceextern "C" {
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libavutil/avutil.h>
#include <libavutil/opt.h>
#include <libswscale/swscale.h>
}#include <cstring>
#include <iostream>struct YUVFrame {unsigned char* data;int width;int height;
};class FFmpegRTSPStreamer {public:FFmpegRTSPStreamer(const std::string& rtsp_url, int width, int height,int fps);~FFmpegRTSPStreamer();bool init();bool push_frame(const YUVFrame& frame);void cleanup();void YUV422ToYUV420p(const unsigned char* yuv422, unsigned char* yuv420,int width, int height);void YUV422ToYUV420pBySWS(const uint8_t* yuv422, uint8_t* yuv420, int width,int height);void SaveYUV420pToFile(const uint8_t* yuv420p, int width, int height,const std::string& filename);void SaveYUV422pToFile(const uint8_t* yuv422p, int width, int height,const std::string& filename);void ConvertYUYVToYUV420P(const unsigned char* yuyv, unsigned char* yuv420p,int width, int height);private:std::string rtsp_url;int width;int height;int fps;unsigned long long frame_count = 0;AVFormatContext* fmt_ctx = nullptr;AVStream* video_stream = nullptr;AVCodecContext* codec_ctx = nullptr;AVCodec* codec = nullptr;AVFrame* av_frame = nullptr;SwsContext* sws_ctx_422_to_420 = nullptr;
};#endif
#include "FFmpegRTSPStreamer.h"#include <fstream>
#include <vector>FFmpegRTSPStreamer::FFmpegRTSPStreamer(const std::string& rtsp_url, int width,int height, int fps): rtsp_url(rtsp_url),width(width),height(height),fps(fps),fmt_ctx(nullptr),video_stream(nullptr),codec_ctx(nullptr),codec(nullptr),av_frame(nullptr) {}FFmpegRTSPStreamer::~FFmpegRTSPStreamer() { cleanup(); }bool FFmpegRTSPStreamer::init() {// av_register_all();avformat_network_init();if (avformat_alloc_output_context2(&fmt_ctx, nullptr, "rtsp",rtsp_url.c_str()) < 0) {std::cerr << "Could not allocate output context." << std::endl;return false;}codec = avcodec_find_encoder(AV_CODEC_ID_H264);if (!codec) {std::cerr << "H264 codec not found." << std::endl;return false;}video_stream = avformat_new_stream(fmt_ctx, codec);if (!video_stream) {std::cerr << "Failed to create video stream." << std::endl;return false;}codec_ctx = avcodec_alloc_context3(codec);if (!codec_ctx) {std::cerr << "Failed to allocate codec context." << std::endl;return false;}codec_ctx->codec_id = codec->id;codec_ctx->codec_type = AVMEDIA_TYPE_VIDEO;codec_ctx->width = width;codec_ctx->height = height;codec_ctx->pix_fmt = AV_PIX_FMT_YUV420P;// codec_ctx->pix_fmt = AV_PIX_FMT_YUYV422;codec_ctx->time_base = {1, fps};codec_ctx->gop_size = 12;if (fmt_ctx->oformat->flags & AVFMT_GLOBALHEADER) {codec_ctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;}if (avcodec_open2(codec_ctx, codec, nullptr) < 0) {std::cerr << "Failed to open codec." << std::endl;return false;}// 将编码器上下文的参数复制到流的编码参数中avcodec_parameters_from_context(video_stream->codecpar, codec_ctx);/*int ret = avio_open(&fmt_ctx->pb, rtsp_url.c_str(), AVIO_FLAG_WRITE);if (ret < 0) {char errbuf[AV_ERROR_MAX_STRING_SIZE];av_strerror(ret, errbuf, sizeof(errbuf));std::cerr << "Could not open RTSP stream: " << errbuf << std::endl;return false;}*/av_opt_set(fmt_ctx->priv_data, "rtsp_transport", "tcp", 0);  // 使用TCP传输av_opt_set(fmt_ctx->priv_data, "max_delay", "500000",0);  // 设置最大延迟为 500ms// 格式上下文的头信息写入输出流if (avformat_write_header(fmt_ctx, nullptr) < 0) {std::cerr << "Error occurred when writing header." << std::endl;return false;}av_frame = av_frame_alloc();av_frame->format = codec_ctx->pix_fmt;av_frame->width = width;av_frame->height = height;if (av_frame_get_buffer(av_frame, 32) < 0) {std::cerr << "Could not allocate frame buffer." << std::endl;return false;}sws_ctx_422_to_420 =sws_getContext(width, height, AV_PIX_FMT_YUYV422,  // 源格式:YUV422width, height, AV_PIX_FMT_YUV420P,  // 目标格式:YUV420pSWS_BICUBIC,  // 缩放算法:可以选择其他算法,比如// SWS_FAST_BILINEAR、SWS_BICUBIC 等nullptr, nullptr, nullptr);if (!sws_ctx_422_to_420) {std::cerr << "Error initializing the conversion context sws_ctx_422_to_420."<< std::endl;return false;}return true;
}bool FFmpegRTSPStreamer::push_frame(const YUVFrame& frame) {if (!av_frame || !frame.data) {std::cerr << "Invalid frame or uninitialized streamer." << std::endl;return false;}memcpy(av_frame->data[0], frame.data, width * height);  // Y planememcpy(av_frame->data[1], frame.data + width * height,width * height / 4);  // U planememcpy(av_frame->data[2], frame.data + width * height * 5 / 4,width * height / 4);  // V plane// 设置PTS,确保与时间基匹配av_frame->pts =av_rescale_q(frame_count++, (AVRational){1, 25}, video_stream->time_base);if (avcodec_send_frame(codec_ctx, av_frame) < 0) {std::cerr << "Failed to send frame to ""encoder."<< std::endl;return false;}AVPacket pkt;av_init_packet(&pkt);pkt.data = nullptr;pkt.size = 0;if (avcodec_receive_packet(codec_ctx, &pkt) == 0) {pkt.stream_index = video_stream->index;// 打印调试信息,检查PTS和DTSstd::cout << "frame_count:" << frame_count << " PTS: " << pkt.pts<< " DTS: " << pkt.dts << " size: " << pkt.size << std::endl;if (av_interleaved_write_frame(fmt_ctx, &pkt) < 0) {std::cerr << "Failed to write frame. PTS: " << pkt.pts<< " DTS: " << pkt.dts << " size: " << pkt.size << std::endl;av_packet_unref(&pkt);return false;}av_packet_unref(&pkt);}return true;
}void FFmpegRTSPStreamer::cleanup() {if (fmt_ctx) {av_write_trailer(fmt_ctx);if (fmt_ctx->pb) {avio_close(fmt_ctx->pb);}avformat_free_context(fmt_ctx);fmt_ctx = nullptr;}if (codec_ctx) {avcodec_free_context(&codec_ctx);codec_ctx = nullptr;}if (av_frame) {av_frame_free(&av_frame);av_frame = nullptr;}if (sws_ctx_422_to_420) {sws_freeContext(sws_ctx_422_to_420);sws_ctx_422_to_420 = nullptr;}
}void FFmpegRTSPStreamer::YUV422ToYUV420p(const unsigned char* yuv422,unsigned char* yuv420, int width,int height) {int y_size = width * height;int uv_size = width * height / 4;  // U 和 V 的大小// YUV422 数据的偏移量const unsigned char* y_data = yuv422;           // Y 数据起始位置const unsigned char* u_data = yuv422 + y_size;  // U 数据起始位置const unsigned char* v_data =u_data + (width * height) / 2;  // V 数据起始位置unsigned char* y_out = yuv420;           // Y 数据输出位置unsigned char* u_out = yuv420 + y_size;  // U 数据输出位置unsigned char* v_out = u_out + uv_size;  // V 数据输出位置// 复制 Y 数据(不做处理)memcpy(y_out, y_data, y_size);// 对 U 和 V 数据进行下采样(每 2 个像素计算一个 U 和一个 V)for (int i = 0; i < height / 2; i++) {for (int j = 0; j < width / 2; j++) {// 获取 YUV422 中相邻 2 个像素的 U 和 V 值int index422 = 2 * (i * width + j);  // YUV422 中每 2 个像素对照的 U 和 V// 对应 YUV420p 中的 U 和 V 计算int index420_u = i * (width / 2) + j;int index420_v = index420_u;// 对 U 和 V 数据进行下采样u_out[index420_u] = (u_data[index422] + u_data[index422 + 1]) / 2;v_out[index420_v] = (v_data[index422] + v_data[index422 + 1]) / 2;}}
}void FFmpegRTSPStreamer::YUV422ToYUV420pBySWS(const uint8_t* yuv422,uint8_t* yuv420, int width,int height) {// 2. 设置源和目标图像缓冲区uint8_t* src_data[4] = {const_cast<uint8_t*>(yuv422), nullptr, nullptr,nullptr};                        // YUV422 数据int src_linesize[4] = {width, width / 2, width / 2, 0};  // YUV422 的行间距uint8_t* dst_data[4] = {yuv420, nullptr, nullptr, nullptr};  // YUV420p 数据int dst_linesize[4] = {width, width / 2, width / 2, 0};  // YUV420p 的行间距// 3. 调用 sws_scale 进行格式转换int ret = sws_scale(sws_ctx_422_to_420,  // 上下文src_data,            // 源图像数据src_linesize,        // 源图像行间距0,                   // 源图像起始行height,              // 图像高度dst_data,            // 目标图像数据dst_linesize         // 目标图像行间距);if (ret < 0) {std::cerr << "Error during conversion." << std::endl;}
}void FFmpegRTSPStreamer::SaveYUV420pToFile(const uint8_t* yuv420p, int width,int height,const std::string& filename) {// 创建并打开二进制文件std::ofstream file(filename, std::ios::binary);if (!file) {std::cerr << "无法打开文件:" << filename << std::endl;return;}// YUV420p 数据:Y平面 + U平面 + V平面int y_size = width * height;  // Y平面大小int uv_size =width * height / 4;  // U和V平面大小(每个平面是 Y 大小的四分之一)// 写入 Y、U、V 数据file.write(reinterpret_cast<const char*>(yuv420p), y_size);  // Y平面file.write(reinterpret_cast<const char*>(yuv420p + y_size),uv_size);  // U平面file.write(reinterpret_cast<const char*>(yuv420p + y_size + uv_size),uv_size);  // V平面file.close();std::cout << "YUV 数据已保存为 " << filename << std::endl;
}void FFmpegRTSPStreamer::SaveYUV422pToFile(const uint8_t* yuv422p, int width,int height,const std::string& filename) {// 创建并打开二进制文件std::ofstream file(filename, std::ios::binary);if (!file) {std::cerr << "无法打开文件:" << filename << std::endl;return;}// YUV420p 数据:Y平面 + U平面 + V平面int y_size = width * height;  // Y平面大小int uv_size = width * height / 2;// 写入 Y、U、V 数据file.write(reinterpret_cast<const char*>(yuv422p), y_size);  // Y平面file.write(reinterpret_cast<const char*>(yuv422p + y_size),uv_size);  // U平面file.write(reinterpret_cast<const char*>(yuv422p + y_size + uv_size),uv_size);  // V平面file.close();std::cout << "YUV 数据已保存为 " << filename << std::endl;
}void FFmpegRTSPStreamer::ConvertYUYVToYUV420P(const unsigned char* yuyv,unsigned char* yuv420p, int width,int height) {int frameSize = width * height;// Pointers for Y, U, and V planes in YUV420P formatunsigned char* yPlane = yuv420p;                      // Start of Y planeunsigned char* uPlane = yuv420p + frameSize;          // Start of U planeunsigned char* vPlane = yuv420p + frameSize * 5 / 4;  // Start of V planememset(yPlane, 0, frameSize);memset(uPlane, 0, frameSize / 4);memset(vPlane, 0, frameSize / 4);// Iterate through each pixel in the YUYV bufferfor (int j = 0; j < height; j++) {for (int i = 0; i < width; i += 2) {// Calculate the index in the YUYV bufferint yuyvIndex = (j * width + i) * 2;// Extract Y, U, and V valuesunsigned char y1 = yuyv[yuyvIndex];      // Y1unsigned char u = yuyv[yuyvIndex + 1];   // Uunsigned char y2 = yuyv[yuyvIndex + 2];  // Y2unsigned char v = yuyv[yuyvIndex + 3];   // V// Write Y values directly to the Y planeyPlane[j * width + i] = y1;yPlane[j * width + i + 1] = y2;// Write U and V values to their respective planes (downsampled)if (j % 2 == 0 && i % 2 == 0) {int uvIndex = (j / 2) * (width / 2) + (i / 2);uPlane[uvIndex] = u;vPlane[uvIndex] = v;}}}
}

版权声明:

本网仅为发布的内容提供存储空间,不对发表、转载的内容提供任何形式的保证。凡本网注明“来源:XXX网络”的作品,均转载自其它媒体,著作权归作者所有,商业转载请联系作者获得授权,非商业转载请注明出处。

我们尊重并感谢每一位作者,均已注明文章来源和作者。如因作品内容、版权或其它问题,请及时与我们联系,联系邮箱:809451989@qq.com,投稿邮箱:809451989@qq.com

热搜词