FFmpeg解码提速之libyuv
1. FFmpeg解码瓶颈
经测试发现,FFmpeg解码瓶颈在YUV转RGB上,在12MP视频的环境下,单帧转换时间超过40ms,效率无法满足要求
FFmpeg的YUV转RGB代码:
sws_scale(img_convert_ctx, static_cast<uint8_t const * const *> (pFrame->data),pFrame->linesize, 0, pCodecCtx->height, dst_data, dst_linesize);
2. 使用libyuv提升解码效率
调用libyuv库函数,将yuv转换成RGB,可以明显改善解码效率
代码:
libyuv::I420ToABGR(pFrame->data[0], pFrame->linesize[0],pFrame->data[2], pFrame->linesize[2],pFrame->data[1], pFrame->linesize[1],pFrameRGB->data[0], pFrameRGB->linesize[0],pCodecCtx->width, pCodecCtx->height);
3. 完整代码
#include "FFmpegRtsp.h"extern "C"
{
#include "libavcodec/avcodec.h"
#include "libavformat/avformat.h"
#include "libavutil/pixfmt.h"
#include "libswscale/swscale.h"
#include "libavutil/time.h"
#include "libavutil/mathematics.h"
#include "libavutil/imgutils.h"
}
#include "libyuv.h"void FFmpegRtsp::readVideoFrame()
{AVFormatContext* pFormatCtx;AVCodecContext* pCodecCtx;AVCodec* pCodec;AVFrame* pFrame;AVFrame* pFrameRGB;AVPacket* pPacket;//static struct SwsContext *img_convert_ctx;int videoStream = -1;int ret;avformat_network_init(); //初始化FFmpeg网络模块 pFormatCtx = avformat_alloc_context();//Allocate an AVFormatContext.AVDictionary* avdic = nullptr;char option_key[] = "rtsp_transport";char option_value[] = "tcp";av_dict_set(&avdic, option_key, option_value, 0);char option_key2[] = "max_delay";char option_value2[] = "2000";av_dict_set(&avdic, option_key2, option_value2, 0);//打开流if (avformat_open_input(&pFormatCtx, _url.c_str(), nullptr, &avdic) != 0) {printf("can't open input url = %s\\n",_url.c_str());return;}//循环查找视频中包含的流信息,直到找到视频类型的流//便将其记录下来 保存到videoStream变量中//这里只处理视频流 音频流先不管他if (avformat_find_stream_info(pFormatCtx, nullptr) < 0) {printf("Could't find stream infomation.\\n");return;}for (unsigned int i = 0; i < pFormatCtx->nb_streams; i++) {if (pFormatCtx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {videoStream = static_cast<int>(i);}}if (videoStream == -1) {printf("Didn't find a video stream.\\n");return;}//查找解码器pCodec = avcodec_find_decoder(pFormatCtx->streams[videoStream]->codecpar->codec_id);pCodecCtx = avcodec_alloc_context3(pCodec);avcodec_parameters_to_context(pCodecCtx, pFormatCtx->streams[videoStream]->codecpar);pCodecCtx->bit_rate = 0; //初始化为0pCodecCtx->time_base.num = 1; //下面两行:一秒钟25帧pCodecCtx->time_base.den = 10;pCodecCtx->frame_number = 1; //每包一个视频帧if (pCodec == nullptr) {printf("Codec not found.\\n");return;}//打开解码器if (avcodec_open2(pCodecCtx, pCodec, nullptr) < 0) {printf("Could not open codec.\\n");return;}pFrame = av_frame_alloc();pFrameRGB = av_frame_alloc();int numBytes = av_image_get_buffer_size(AV_PIX_FMT_BGRA, pCodecCtx->width, pCodecCtx->height, 1);uint8_t* out_buffer = static_cast<uint8_t*>(av_malloc(static_cast<size_t>(static_cast<uint64_t>(numBytes) * sizeof(uint8_t))));uint8_t* dst_data[4];int dst_linesize[4];av_image_fill_arrays(dst_data, dst_linesize, out_buffer, AV_PIX_FMT_BGRA, pCodecCtx->width, pCodecCtx->height, 1);//分配一个packetint y_size = pCodecCtx->width * pCodecCtx->height;pPacket = static_cast<AVPacket*>(malloc(sizeof(AVPacket)));av_new_packet(pPacket, y_size); //分配packet的数据//cv::namedWindow("frame", cv::WINDOW_KEEPRATIO);
#define FFMPEG 0
#if FFMPEGprintf("ffmpeg convert ing...\\n");// R9 数据格式转换上下文struct SwsContext *img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt,pCodecCtx->width, pCodecCtx->height, AV_PIX_FMT_BGRA,SWS_BICUBIC, NULL, NULL, NULL);
#endifwhile (m_bStop == false){if (av_read_frame(pFormatCtx, pPacket) < 0){break; //这里认为视频读取完了}if (pPacket->stream_index == videoStream){auto start = std::chrono::system_clock::now(); avcodec_send_packet(pCodecCtx, pPacket);ret = avcodec_receive_frame(pCodecCtx, pFrame);if (ret < 0){printf("receive frame from packet error.\\n");continue;}
#if FFMPEG//YUV-->RGB ffmpeg的转换方法也行,但是在12MP视频的环境下,单帧转换时间超过40ms,效率无法满足要求sws_scale(img_convert_ctx, static_cast<uint8_t const * const *> (pFrame->data),pFrame->linesize, 0, pCodecCtx->height, dst_data, dst_linesize);cv::Mat frame = cv::Mat(cv::Size(pCodecCtx->width, pCodecCtx->height), CV_8UC4);frame.data = dst_data[0];
#else//fix buffer//调用libyuv库函数,将yuv转换成RGB,效率好过ffmpeg几倍avpicture_fill((AVPicture*)pFrameRGB, //rgb帧结构地址,但只有data的首地址out_buffer, //rgb帧结构data 数据存放空间,提前分配好,分配一次即可,循环覆盖AV_PIX_FMT_BGRA,pCodecCtx->width,pCodecCtx->height);libyuv::I420ToABGR(pFrame->data[0], pFrame->linesize[0],pFrame->data[2], pFrame->linesize[2],pFrame->data[1], pFrame->linesize[1],pFrameRGB->data[0], pFrameRGB->linesize[0],pCodecCtx->width, pCodecCtx->height);cv::Mat frame = cv::Mat(cv::Size(pCodecCtx->width, pCodecCtx->height), CV_8UC4);frame.data = pFrameRGB->data[0];
#endifstd::unique_lock<std::mutex> lk(_lock);if (_frameQueue.size() > _frameLen){ cv::Mat tmp = _frameQueue.front();tmp.release();_frameQueue.pop();}_frameQueue.push(frame);_cond.notify_one();lk.unlock();}av_packet_unref(pPacket);}av_free(out_buffer);av_free(pFrameRGB);av_free(pFrame);av_packet_free(&pPacket);avcodec_close(pCodecCtx);avformat_close_input(&pFormatCtx);
}
=提示:上述代码是用来rtsp拉流,然后转Mat给opencv使用的=