最近完成了实时视频流做图像算法分析的工程,记录下整体流程和关键步骤。
1.项目中使用RTP协议H264载荷传输视频数据,流媒体服务器收到流数据解封装提取出H264数据进行解码,解码函数:
int CITStreamTrans::StreamDec(unsigned char* buffer_in, int32_t bufferLen_in, unsigned char** yuv_out,int32_t &bufferLen_out){
int ret;
AVPacket packet = { 0 };
packet.data = (uint8_t*)buffer_in;
packet.size = bufferLen_in;
packet.pts = FRAME_TIMESTAMP * m_nFrameCounter++;
ret = avcodec_send_packet(_pDecCodecContext, &packet);
//return ret;
if (avcodec_receive_frame(_pDecCodecContext, _pFrameYuv) == 0)
{
m_nCodecID = m_nCodecID == 0 ? _pDecCodecContext->codec_id: m_nCodecID;
m_nWidth = m_nWidth == 0 ? _pDecCodecContext->width : m_nWidth;
m_nHeight = m_nHeight == 0 ? _pDecCodecContext->height : m_nHeight;
m_nFps = m_nFps == 0 ? ENCODE_DEFAULT_FRAMERATE : m_nFps;
m_nMbps = m_nMbps == 0 ? ENCODE_DEAULT_BITRATES : m_nMbps;
_pFrameYuv->pts = _pFrameYuv->best_effort_timestamp;
//int height = _pCodecContext->height;
//int width = _pCodecContext->width;
av_packet_unref(&packet);
if(false){
//禁用硬解码
AVFrame *tmp_frame = NULL;
if (_pFrameYuv->format == hw_pix_fmt)
{
/* retrieve data from GPU to CPU */
if ((ret = av_hwframe_transfer_data(m_pSWFrame, _pFrameYuv, 0)) < 0)
{
printf("Error transferring the data to system memory\n");
return -1;
}
//tmp_frame = m_pSWFrame;
}
//else
// tmp_frame = m_pSrcFrame;
if(alloc_rgb_buf_size ==0)
alloc_rgb_buf_size = av_image_alloc(dst_data, dst_linesize,m_nWidth, m_nHeight, AV_PIX_FMT_BGR24, 1);
if (yuvtorgb_ctx == NULL) {
yuvtorgb_ctx = sws_getContext(m_nWidth, m_nHeight,(AVPixelFormat)m_pSWFrame->format, m_nWidth, m_nHeight, AV_PIX_FMT_BGR24,
SWS_BICUBIC, NULL, NULL, NULL);
}
sws_scale(yuvtorgb_ctx, (const uint8_t* const*)m_pSWFrame->data,m_pSWFrame->linesize,0, m_nHeight, dst_data, dst_linesize);
*yuv_out = dst_data[0];
//*yuv_out = m_pSWFrame->data[0];
bufferLen_out = m_nWidth*m_nHeight*3;
return 0;
}
else{
if(alloc_rgb_buf_size ==0)
alloc_rgb_buf_size = av_image_alloc(dst_data, dst_linesize,m_nWidth, m_nHeight, AV_PIX_FMT_BGR24, 1);
if (yuvtorgb_ctx == NULL) {
//yuvtorgb_ctx = sws_getContext(m_nWidth, m_nHeight,AV_PIX_FMT_YUV420P, m_nWidth, m_nHeight, AV_PIX_FMT_BGR24,
// SWS_BICUBIC, NULL, NULL, NULL);
yuvtorgb_ctx = sws_getContext(m_nWidth, m_nHeight,AV_PIX_FMT_YUV420P, m_nWidth, m_nHeight, AV_PIX_FMT_BGR24,
SWS_BICUBIC, NULL, NULL, NULL);
}
sws_scale(yuvtorgb_ctx, (const uint8_t* const*)_pFrameYuv->data,_pFrameYuv->linesize,0, m_nHeight, dst_data, dst_linesize);
*yuv_out = dst_data[0];
bufferLen_out = m_nWidth*m_nHeight*3;
//memcpy(yuv_out,_pFrameRgb->data,bufferLen_out);
return 0;
}
}else{
return -1;
}
}
由于算法分析使用RGB数据,上述函数中直接将YUV数据转成RGB输出。
2.实时视频的图像算法处理分析,算法分析需要耗时。所以为了避免出现线程阻塞和累计视频延迟问题,需要将发送分析和接受分析分开处理。异步模式需要处理好数据同步。
3.取到算法分析后的数据后,进行滤镜处理最后编码发送
int CITStreamTrans::StreamEnc(unsigned char * buffer_in, int32_t bufferLen_in, unsigned char ** pkt_out, int32_t & bufferLen_out,bool bfilter){
av_image_fill_arrays(av_frame_enc_src->data, av_frame_enc_src->linesize, buffer_in, AV_PIX_FMT_BGR24, m_nWidth,\
m_nHeight, 1);
if (rgbtoyuv_ctx == NULL) {
rgbtoyuv_ctx = sws_getContext(m_nWidth, m_nHeight, AV_PIX_FMT_BGR24, m_nWidth, m_nHeight, \
AV_PIX_FMT_YUV420P, SWS_FAST_BILINEAR, NULL, NULL, NULL);
}
sws_scale(rgbtoyuv_ctx,(const uint8_t* const*)av_frame_enc_src->data,av_frame_enc_src->linesize,0,m_nHeight,av_frame_enc->data,av_frame_enc->linesize);
int ret = -1;
if(bfilter)
{
Filterinit(av_frame_enc->width,av_frame_enc->height);
av_frame_enc->pts = av_frame_enc->best_effort_timestamp;
if (av_buffersrc_add_frame_flags(buffersrc_ctx, av_frame_enc, AV_BUFFERSRC_FLAG_KEEP_REF) < 0) {
av_log(NULL, AV_LOG_ERROR, "Error while feeding the filtergraph\n");
return -1;
}
while(1){
ret = av_buffersink_get_frame(buffersink_ctx, filter_frame_out);
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
return 1;
if (ret < 0){
return -1;
}
//encoder
av_init_packet(&_pEncpkt);
ret=avcodec_send_frame(_pEncCodecContext, filter_frame_out);
while(ret>=0){
ret = avcodec_receive_packet(_pEncCodecContext, &_pEncpkt);
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
return 1;
else if (ret < 0) {
return -1;
}
_pEncpkt.pts = av_rescale_q_rnd(_pEncpkt.pts, _pEncCodecContext->time_base, _pEncCodecContext->time_base, (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
_pEncpkt.dts = av_rescale_q_rnd(_pEncpkt.dts, _pEncCodecContext->time_base, _pEncCodecContext->time_base, (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
_pEncpkt.duration = av_rescale_q(_pEncpkt.duration, _pEncCodecContext->time_base, _pEncCodecContext->time_base);
_pEncpkt.pos = -1;
_pEncpkt.stream_index = 0;
memcpy(pEncBuff,_pEncpkt.data,_pEncpkt.size);
*pkt_out = pEncBuff;
bufferLen_out = _pEncpkt.size;
av_packet_unref(&_pEncpkt);
}
av_frame_unref(filter_frame_out);
}
av_frame_unref(filter_in);
}
else{
av_init_packet(&_pEncpkt);
ret=avcodec_send_frame(_pEncCodecContext, av_frame_enc);
while(ret>=0){
ret = avcodec_receive_packet(_pEncCodecContext, &_pEncpkt);
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
return 1;
else if (ret < 0) {
return -1;
}
_pEncpkt.pts = av_rescale_q_rnd(_pEncpkt.pts, _pEncCodecContext->time_base, _pEncCodecContext->time_base, (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
_pEncpkt.dts = av_rescale_q_rnd(_pEncpkt.dts, _pEncCodecContext->time_base, _pEncCodecContext->time_base, (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
_pEncpkt.duration = av_rescale_q(_pEncpkt.duration, _pEncCodecContext->time_base, _pEncCodecContext->time_base);
_pEncpkt.pos = -1;
_pEncpkt.stream_index = 0;
memcpy(pEncBuff,_pEncpkt.data,_pEncpkt.size);
*pkt_out = pEncBuff;
bufferLen_out = _pEncpkt.size;
av_packet_unref(&_pEncpkt);
//return 0;
}
}
}
同样的上述函数为了方便直接将算法的RGB数据转换成YUV数据,然后根据传参是否需要进行图片或文字叠加功能
csdn地址:https://blog.csdn.net/rose5996/article/details/115213349