[TOC]
开始前的BB
开始准备搞播放器了,还不知道怎么跟大佬们讲,头疼
想来想去,我感觉先实现一个简单的视频播放器,视频和音频自同步,来让各位大佬们先来体验一下,有个大体的脉络
老夫撸码就是一把梭
我们先粗暴的分为两个线程,一个负责音频的播放,一个负责视频的播放,根据之前的我们写过的东西,我们来改一改
在chapter_09/
中新建两个类VideoThread
和AudioThread
,一个负责视频的解码,一个负责音频的解码,渲染的话我们新建一个AVRender
,专门负责渲染以及窗口事件的管理
千言万语注释中
AVRender 渲染以及事件处理
AVRender.h
//
// Created by MirsFang on 2019-03-25.
//
#ifndef LEARNFFMPEG_AVRENDER_H
#define LEARNFFMPEG_AVRENDER_H
#define WINDOW_WIDTH 1080
#define WINDOW_HEIGHT 720
#include <iostream>
extern "C" {
#include <SDL2/SDL.h>
#include <libavcodec/avcodec.h>
}
/** 音视频渲染器 **/
class AVRender {
public:
AVRender();
~AVRender();
/**
* 打开音频
*
* @param sample_rate 采样率
* @param channel 通道数
* @param samples 采样大小(一帧的音频数据大小)
* @param userdata 用户数据
* @param fillaudio 回调函数
*/
void openAudio(int sample_rate, Uint8 channel, Uint16 samples, void *userdata,
void (*fill_audio)(void *codecContext, Uint8 *stream, int len));
/** 循环获取事件 **/
void loopEvent();
/** 渲染视频
*
* @param frame 视频帧
* @param duration 帧持续的时间
*/
void renderVideo(AVFrame *frame,Uint32 duration);
private:
/** SDL窗口 **/
SDL_Window *window;
/** SDL渲染者 **/
SDL_Renderer *render;
/** SDL纹理 **/
SDL_Texture *texture;
/** 显示区域 **/
SDL_Rect rect;
/** 自己想要的输出的音频格式 **/
SDL_AudioSpec wantSpec;
};
#endif //LEARNFFMPEG_AVRENDER_H
AVRender.cpp
//
// Created by MirsFang on 2019-03-25.
//
#include "AVRender.h"
AVRender::AVRender() {
//初始化SDL2
if (SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_EVENTS)) {
std::cout << "[error] SDL Init error !" << std::endl;
return;
}
//创建window
window = SDL_CreateWindow("LearnFFmpeg", SDL_WINDOWPOS_UNDEFINED, SDL_WINDOWPOS_UNDEFINED, WINDOW_WIDTH,
WINDOW_HEIGHT, SDL_WINDOW_OPENGL);
if (!window) {
std::cout << "[error] SDL Create window error!" << std::endl;
return;
}
//创建Render
render = SDL_CreateRenderer(window, -1, 0);
//创建Texture
texture = SDL_CreateTexture(render, SDL_PIXELFORMAT_IYUV, SDL_TEXTUREACCESS_STREAMING, WINDOW_WIDTH, WINDOW_HEIGHT);
//初始化Rect
rect.x = 0;
rect.y = 0;
rect.w = WINDOW_WIDTH;
rect.h = WINDOW_HEIGHT;
}
AVRender::~AVRender() {
SDL_CloseAudio();
SDL_Quit();
if(render)SDL_DestroyRenderer(render);
if(texture)SDL_DestroyTexture(texture);
if(window)SDL_DestroyWindow(window);
}
void AVRender::loopEvent() {
SDL_Event event;
for (;;) {
SDL_PollEvent(&event);
switch (event.type) {
case SDL_KEYDOWN:
switch (event.key.keysym.sym) {
}
break;
case SDL_QUIT:
return;
default:
break;
}
}
}
void AVRender::renderVideo(AVFrame *frame, Uint32 duration) {
if (frame == nullptr)return;
//上传YUV到Texture
SDL_UpdateYUVTexture(texture, &rect,
frame->data[0], frame->linesize[0],
frame->data[1], frame->linesize[1],
frame->data[2], frame->linesize[2]
);
SDL_RenderClear(render);
SDL_RenderCopy(render, texture, NULL, &rect);
SDL_RenderPresent(render);
SDL_Delay(duration);
}
void AVRender::openAudio(int sample_rate, Uint8 channel, Uint16 samples, void *userdata,
void (*fill_audio)(void *, Uint8 *, int)) {
//初始化SDL中自己想设置的参数
wantSpec.freq = sample_rate;
wantSpec.format = AUDIO_S16SYS;
wantSpec.channels = channel;
wantSpec.silence = 0;
wantSpec.samples = samples;
wantSpec.callback = fill_audio;
wantSpec.userdata = userdata;
//打开音频之后wantSpec的值可能会有改动,返回实际设备的参数值
if (SDL_OpenAudio(&wantSpec, NULL) < 0) {
std::cout << "[error] open audio error" << std::endl;
return;
}
SDL_PauseAudio(0);
}
VideoThread 视频解码
视频解码类VideoThread.h
//
// Created by MirsFang on 2019-03-25.
//
#ifndef LEARNFFMPEG_VIDEOTHREAD_H
#define LEARNFFMPEG_VIDEOTHREAD_H
#include <pthread.h>
#include <iostream>
#include "AVRender.h"
extern "C" {
#include <libavformat/avformat.h>
#include <libavcodec/avcodec.h>
};
/** 视频线程 **/
class VideoThread {
public:
VideoThread();
~VideoThread();
/** 设置视频路径 **/
void setUrl(const char *url);
/** 设置渲染器 **/
void setRender(AVRender *render);
/** 开始运行线程 **/
void start();
private:
AVFormatContext *format_context;
AVCodecContext *codec_context;
AVCodec *codec;
AVPacket *packet;
AVFrame *frame;
const char *url;
int video_index;
pthread_t pid;
pthread_mutex_t mutex;
AVRender *avRender;
double last_pts = 0;
/** 帧间距同步 **/
bool is_interval_sync = true;
static void *start_thread(void *arg);
void run();
/** 初始化解码器 **/
void prepare_codec();
/** 解码数据帧 **/
void decodec_frame();
/**
* 根据帧率获取显示时间
* @param frame_rate 帧率
* @return 需要显示的时长
*/
Uint32 sync_frame_rate(double frame_rate);
/**
* 根据帧间隔获取一帧显示的时长
* @param timebase
* @param pts 秒
* @return
*/
double sync_frame_interval(AVRational timebase, int pts);
};
#endif //LEARNFFMPEG_VIDEOTHREAD_H
VideoThread.cpp
//
// Created by MirsFang on 2019-03-25.
//
#include "VideoThread.h"
VideoThread::VideoThread() {
}
VideoThread::~VideoThread() {
if (format_context != nullptr) avformat_close_input(&format_context);
if (codec_context != nullptr) avcodec_free_context(&codec_context);
if (packet != nullptr) av_packet_free(&packet);
if (frame != nullptr) av_frame_free(&frame);
}
void VideoThread::start() {
prepare_codec();
if (pthread_create(&pid, NULL, start_thread, (void *) this) != 0) {
std::cout << "初始化视频线程失败!" << std::endl;
return;
}
}
void *VideoThread::start_thread(void *arg) {
VideoThread *audioThread = (VideoThread *) arg;
audioThread->run();
return nullptr;
}
void VideoThread::run() {
std::cout << "视频线程运行中..." << std::endl;
decodec_frame();
}
void VideoThread::setRender(AVRender *render) {
this->avRender = render;
}
void VideoThread::setUrl(const char *url) {
this->url = url;
}
void VideoThread::prepare_codec() {
int retcode;
//初始化FormatContext
format_context = avformat_alloc_context();
if (!format_context) {
std::cout << "[error] alloc format context error!" << std::endl;
return;
}
//打开输入流
retcode = avformat_open_input(&format_context, url, nullptr, nullptr);
if (retcode != 0) {
std::cout << "[error] open input error!" << std::endl;
return;
}
//读取媒体文件信息
retcode = avformat_find_stream_info(format_context, NULL);
if (retcode != 0) {
std::cout << "[error] find stream error!" << std::endl;
return;
}
//分配codecContext
codec_context = avcodec_alloc_context3(NULL);
if (!codec_context) {
std::cout << "[error] alloc codec context error!" << std::endl;
return;
}
//寻找到视频流的下标
video_index = av_find_best_stream(format_context, AVMEDIA_TYPE_VIDEO, -1, -1, NULL, 0);
//将视频流的的编解码信息拷贝到codecContext中
retcode = avcodec_parameters_to_context(codec_context, format_context->streams[video_index]->codecpar);
if (retcode != 0) {
std::cout << "[error] parameters to context error!" << std::endl;
return;
}
//查找解码器
codec = avcodec_find_decoder(codec_context->codec_id);
if (codec == nullptr) {
std::cout << "[error] find decoder error!" << std::endl;
return;
}
//打开解码器
retcode = avcodec_open2(codec_context, codec, nullptr);
if (retcode != 0) {
std::cout << "[error] open decodec error!" << std::endl;
return;
}
//初始化一个packet
packet = av_packet_alloc();
//初始化一个Frame
frame = av_frame_alloc();
}
void VideoThread::decodec_frame() {
int sendcode = 0;
//计算帧率
double frameRate = av_q2d(format_context->streams[video_index]->avg_frame_rate);
//计算显示的时间
Uint32 display_time_ms = 0;
if (!is_interval_sync) {
display_time_ms = sync_frame_rate(frameRate);
}
//记录帧间延迟
clock_t start = 0, finish = 0;
//读取包
while (av_read_frame(format_context, packet) == 0) {
if (packet->stream_index != video_index)continue;
//接受解码后的帧数据
while (avcodec_receive_frame(codec_context, frame) == 0) {
/**
* 如果开启帧间隔同步模式,那么是根据
*
* 显示时长 = 当前帧 - 上一帧 - 单帧解码耗时
*
* 可得出当前帧真正要显示的时间
*
* **/
if (is_interval_sync) {
//计算上一帧与当前帧的延时
display_time_ms = (Uint32) (
sync_frame_interval(format_context->streams[video_index]->time_base, frame->pts) * 1000);
//帧解码结束时间
finish = clock();
double diff_time = (finish - start) / 1000;
//减去帧间解码时差 帧解码开始时间 - 帧解码结束时间
if (display_time_ms > diff_time)display_time_ms = display_time_ms - (Uint32) diff_time;
}
//绘制图像
if (avRender)avRender->renderVideo(frame, display_time_ms);
av_frame_unref(frame);
//帧解码开始时间
start = clock();
}
//发送解码前的包数据
sendcode = avcodec_send_packet(codec_context, packet);
//根据发送的返回值判断状态
if (sendcode == 0) {
// std::cout << "[debug] " << "SUCCESS" << std::endl;
} else if (sendcode == AVERROR_EOF) {
std::cout << "[debug] " << "EOF" << std::endl;
} else if (sendcode == AVERROR(EAGAIN)) {
std::cout << "[debug] " << "EAGAIN" << std::endl;
} else {
std::cout << "[debug] " << av_err2str(AVERROR(sendcode)) << std::endl;
}
av_packet_unref(packet);
}
}
Uint32 VideoThread::sync_frame_rate(double frame_rate) {
return 1 * 1000 / frame_rate;
}
double VideoThread::sync_frame_interval(AVRational timebase, int pts) {
double display = (pts - last_pts) * av_q2d(timebase);
last_pts = pts;
std::cout << "pts : " << pts * av_q2d(timebase) << " -- display :" << display << std::endl;
return display;
}
AudioThread 音频解码
AudioThread
//
// Created by MirsFang on 2019-03-25.
//
#ifndef LEARNFFMPEG_AUDIOTHREAD_H
#define LEARNFFMPEG_AUDIOTHREAD_H
#include <pthread.h>
#include <iostream>
extern "C" {
#include <libavformat/avformat.h>
#include <libavcodec/avcodec.h>
#include <libswresample/swresample.h>
};
#include "AVRender.h"
/**
* 音频线程
*/
class AudioThread {
public:
AudioThread();
~AudioThread();
void setUrl(const char *url);
/** 开启线程 **/
void start();
/** 设置渲染器 **/
void setRender(AVRender *render);
private:
/** 重采样上下文 **/
SwrContext *convert_context;
AVFormatContext *format_context;
AVCodecContext *codec_context;
AVCodec *codec;
AVPacket *packet;
AVFrame *frame;
int audioIndex = -1;
uint64_t out_chn_layout = AV_CH_LAYOUT_STEREO; //输出的通道布局 双声道
enum AVSampleFormat out_sample_fmt = AV_SAMPLE_FMT_S16; //输出的声音格式
int out_sample_rate = 44100; //输出的采样率
int out_nb_samples = -1; //输出的音频采样
int out_channels = -1; //输出的通道数
int out_buffer_size = -1; //输出buff大小
unsigned char *outBuff = NULL;//输出的Buffer数据
uint64_t in_chn_layout = -1; //输入的通道布局
pthread_t pid;
pthread_mutex_t mutex;
AVRender *av_render;
const char *url;
static void *start_thread(void *arg);
void run();
/** 初始化解码器 **/
void prepare_codec();
};
#endif //LEARNFFMPEG_AUDIOTHREAD_H
AudioThread.cpp
//
// Created by MirsFang on 2019-03-25.
//
#include "AudioThread.h"
#define MAX_AUDIO_FRAME_SIZE 192000 // 1 second of 48khz 32bit audio 48000 * (32/8)
//一帧PCM的数据长度
unsigned int audioLen = 0;
unsigned char *audioChunk = nullptr;
//当前读取的位置
unsigned char *audioPos = nullptr;
/** 被SDL2调用的回调函数 当需要获取数据喂入硬件播放的时候调用 **/
void fill_audio(void *codecContext, Uint8 *stream, int len) {
//SDL2中必须首先使用SDL_memset()将stream中的数据设置为0
SDL_memset(stream, 0, len);
if (audioLen == 0)
return;
len = (len > audioLen ? audioLen : len);
//将数据合并到 stream 里
SDL_MixAudio(stream, audioPos, len, SDL_MIX_MAXVOLUME);
//一帧的数据控制
audioPos += len;
audioLen -= len;
}
AudioThread::AudioThread() {
}
AudioThread::~AudioThread() {
if (format_context != nullptr) avformat_close_input(&format_context);
if (codec_context != nullptr) avcodec_free_context(&codec_context);
if (packet != nullptr) av_packet_free(&packet);
if (frame != nullptr) av_frame_free(&frame);
if (convert_context != nullptr) swr_free(&convert_context);
}
void AudioThread::start() {
prepare_codec();
if (pthread_create(&pid, NULL, start_thread, (void *) this) != 0) {
std::cout << "初始化音频线程失败!" << std::endl;
return;
}
}
void *AudioThread::start_thread(void *arg) {
AudioThread *audioThread = (AudioThread *) arg;
audioThread->run();
return nullptr;
}
void AudioThread::run() {
std::cout << "音频线程已启动" << std::endl;
//循环读取packet并且解码
int sendcode = 0;
while (av_read_frame(format_context, packet) >= 0) {
if (packet->stream_index != audioIndex)continue;
//接受解码后的音频数据
while (avcodec_receive_frame(codec_context, frame) == 0) {
swr_convert(convert_context, &outBuff, MAX_AUDIO_FRAME_SIZE, (const uint8_t **) frame->data,
frame->nb_samples);
//如果没有播放完就等待1ms
while (audioLen > 0)
SDL_Delay(1);
//同步数据
audioChunk = (unsigned char *) outBuff;
audioPos = audioChunk;
audioLen = out_buffer_size;
av_frame_unref(frame);
}
//发送解码前的包数据
sendcode = avcodec_send_packet(codec_context, packet);
//根据发送的返回值判断状态
if (sendcode == 0) {
// std::cout << "[debug] " << "SUCCESS" << std::endl;
} else if (sendcode == AVERROR_EOF) {
std::cout << "[debug] " << "EOF" << std::endl;
} else if (sendcode == AVERROR(EAGAIN)) {
std::cout << "[debug] " << "EAGAIN" << std::endl;
} else {
std::cout << "[debug] " << av_err2str(AVERROR(sendcode)) << std::endl;
}
av_packet_unref(packet);
}
}
void AudioThread::setRender(AVRender *render) {
this->av_render = render;
}
void AudioThread::prepare_codec() {
int retcode;
//初始化FormatContext
format_context = avformat_alloc_context();
if (!format_context) {
std::cout << "[error] alloc format context error!" << std::endl;
return;
}
//打开输入流
retcode = avformat_open_input(&format_context, url, nullptr, nullptr);
if (retcode != 0) {
std::cout << "[error] open input error!" << std::endl;
return;
}
//读取媒体文件信息
retcode = avformat_find_stream_info(format_context, NULL);
if (retcode != 0) {
std::cout << "[error] find stream error!" << std::endl;
return;
}
//分配codecContext
codec_context = avcodec_alloc_context3(NULL);
if (!codec_context) {
std::cout << "[error] alloc codec context error!" << std::endl;
return;
}
//寻找到音频流的下标
audioIndex = av_find_best_stream(format_context, AVMEDIA_TYPE_AUDIO, -1, -1, NULL, 0);
//将视频流的的编解码信息拷贝到codecContext中
retcode = avcodec_parameters_to_context(codec_context, format_context->streams[audioIndex]->codecpar);
if (retcode != 0) {
std::cout << "[error] parameters to context error!" << std::endl;
return;
}
//查找解码器
codec = avcodec_find_decoder(codec_context->codec_id);
if (codec == nullptr) {
std::cout << "[error] find decoder error!" << std::endl;
return;
}
//打开解码器
retcode = avcodec_open2(codec_context, codec, nullptr);
if (retcode != 0) {
std::cout << "[error] open decodec error!" << std::endl;
return;
}
//初始化一个packet
packet = av_packet_alloc();
//初始化一个Frame
frame = av_frame_alloc();
/** ########## 获取实际音频的参数 ##########**/
//单个通道中的采样数
out_nb_samples = codec_context->frame_size;
//输出的声道数
out_channels = av_get_channel_layout_nb_channels(out_chn_layout);
//输出音频的布局
in_chn_layout = av_get_default_channel_layout(codec_context->channels);
/** 计算重采样后的实际数据大小,并分配空间 **/
//计算输出的buffer的大小
out_buffer_size = av_samples_get_buffer_size(NULL, out_channels, out_nb_samples, out_sample_fmt, 1);
//分配输出buffer的空间
outBuff = (unsigned char *) av_malloc(MAX_AUDIO_FRAME_SIZE * 2); //双声道
//初始化SDL中自己想设置的参数
if (av_render)av_render->openAudio(out_sample_rate, out_channels, out_nb_samples, codec_context, fill_audio);
convert_context = swr_alloc_set_opts(NULL, out_chn_layout, out_sample_fmt, out_sample_rate,
in_chn_layout, codec_context->sample_fmt, codec_context->sample_rate, 0,
NULL);
//初始化SwResample的Context
swr_init(convert_context);
}
void AudioThread::setUrl(const char *url) {
this->url = url;
}
我们在Main
方法中
#ifdef chapter_09
//实例化渲染器
AVRender* render = new AVRender();
//初始化视频线程
VideoThread *videoThread = new VideoThread();
videoThread->setRender(render);
videoThread->setUrl(url);
//初始化音频线程
AudioThread *audioThread = new AudioThread();
audioThread->setRender(render);
audioThread->setUrl(url);
//开启音视频线程
videoThread->start();
audioThread->start();
//事件循环
render->loopEvent();
#endif
如果没错,那么就应该正常的播放视频了。。。
祝各位大佬们好运
未完持续 ...