先来看看Android直播的流程图,我们根据流程图来一步一步实现:
视频的采集
- 首先添加权限
<uses-permission android:name="android.permission.CAMERA" />
<uses-feature android:name="android.hardware.camera" />
- 摄像头的采集可以通过camera类或者camera2来采集摄像头的数据,其中camera2不向下兼容,所以这里我采用camera来采集视频数据,至于显示有以下两种:
- 通过SurfaceView绘制
- 通过Opnegl的纹理Id,然后用GlSurfaceView绘制
这里我也用的是第一种方式绘制,至于第二种绘制,需要详细了解请参考:《基于GLSurfaceView实现自定义Camera》
摄像头具体的每一帧视频数据具体在下面这个函数获取:
Camera.PreviewCallback previewCallback = new Camera.PreviewCallback() {
@Override
public void onPreviewFrame(byte[] data, Camera camera) {
// 处理视频数据
}
};
视频数据的处理
上面采集到的视频数据是一个YUV数据,这个数据是原始的视频数据,它的特点就是体积相当的大,在直播的时候,用于传输是不理想的,这个时候我们一般会把这个YUV数据进行处理,将这个数据进行编码转成H264数据减少他的体积,在Android中将原始视频数据进行编码的方法有许多种,比如X264,FFmpeg这些软编码,这里为了简单高效,我们采用Android原始的硬编码MediaCodec,下面是编码的核心方法:
private void codeCYuv(byte[] data){
if (data == null) {
return;
}
ByteBuffer[] inputBuffers = mMediaCodec.getInputBuffers();
ByteBuffer[] outputBuffers = mMediaCodec.getOutputBuffers();
byte[] dst = new byte[data.length];
Camera.Size previewSize = mCamera.getParameters().getPreviewSize();
if (getDgree() == 0) {
dst = Util.rotateNV21Degree90(data, previewSize.width, previewSize.height);
} else {
dst = data;
}
try {
int bufferIndex = mMediaCodec.dequeueInputBuffer(5000000);
if (bufferIndex >= 0) {
inputBuffers[bufferIndex].clear();
mConvertor.convert(dst, inputBuffers[bufferIndex]);
mMediaCodec.queueInputBuffer(bufferIndex, 0,
inputBuffers[bufferIndex].position(),
System.nanoTime() / 1000, 0);
MediaCodec.BufferInfo bufferInfo = new MediaCodec.BufferInfo();
int outputBufferIndex = mMediaCodec.dequeueOutputBuffer(bufferInfo, 0);
while (outputBufferIndex >= 0) {
ByteBuffer outputBuffer = outputBuffers[outputBufferIndex];
byte[] outData = new byte[bufferInfo.size];
outputBuffer.get(outData);
//记录pps和sps
if (outData[0] == 0 && outData[1] == 0 && outData[2] == 0 && outData[3] == 1 && outData[4] == 103) {
mPpsSps = outData;
} else if (outData[0] == 0 && outData[1] == 0 && outData[2] == 0 && outData[3] == 1 && outData[4] == 101) {
//在关键帧前面加上pps和sps数据
byte[] iframeData = new byte[mPpsSps.length + outData.length];
System.arraycopy(mPpsSps, 0, iframeData, 0, mPpsSps.length);
System.arraycopy(outData, 0, iframeData, mPpsSps.length, outData.length);
outData = iframeData;
}
// 将数据用socket传输
writeData(outData,1);
// mPlayer.decodeH264(outData);
mMediaCodec.releaseOutputBuffer(outputBufferIndex, false);
outputBufferIndex = mMediaCodec.dequeueOutputBuffer(bufferInfo, 0);
}
} else {
Log.e("easypusher", "No buffer available !");
}
} catch (Exception e) {
StringWriter sw = new StringWriter();
PrintWriter pw = new PrintWriter(sw);
e.printStackTrace(pw);
String stack = sw.toString();
Log.e("save_log", stack);
e.printStackTrace();
} finally {
mCamera.addCallbackBuffer(dst);
}
}
音频的采集
音频的采集Android原生也提供了两种方式
- 利用AudioRecord来采集
- 利用MediaRecorder来采集
这里为了获取原始的音频数据,所以我们采用AudioRecord来采集,直接上采集的核心代码吧:
- 首先添加权限
<uses-permission android:name="android.permission.RECORD_AUDIO" />
<uses-permission android:name="android.permission.WRITE_EXTERNAL_STORAGE" />
<uses-permission android:name="android.permission.READ_EXTERNAL_STORAGE" />
2.开始录音,并且获取音频的原始数据
// 记录是否正在进行录制
private boolean isRecording = false;
//录制音频参数
private int frequence = 44100; //录制频率,单位hz.这里的值注意了,写的不好,可能实例化AudioRecord对象的时候,会出错。我开始写成11025就不行。这取决于硬件设备
private int channelConfig = AudioFormat.CHANNEL_IN_MONO;
private int audioEncoding = AudioFormat.ENCODING_PCM_16BIT;
/**
* 开始录音
*/
private void startRecord(){
isRecording = true;
new Thread() {
@Override
public void run() {
super.run();
try {
//根据定义好的几个配置,来获取合适的缓冲大小
int bufferSize = AudioRecord.getMinBufferSize(frequence, channelConfig, audioEncoding);
//实例化AudioRecord
AudioRecord record = new AudioRecord(MediaRecorder.AudioSource.MIC, frequence, channelConfig, audioEncoding, bufferSize);
//开始录制
record.startRecording();
AacEncode aacMediaEncode = new AacEncode();
//定义缓冲
byte[] buffer = new byte[bufferSize];
//定义循环,根据isRecording的值来判断是否继续录制
while (isRecording) {
//从bufferSize中读取字节,这个时候 buffer也就是原始数据
int bufferReadResult = record.read(buffer, 0, bufferSize);
}
//录制结束
record.stop();
//释放编码器
aacMediaEncode.close();
// dos.close();
} catch (Exception e) {
e.printStackTrace();
}
}
}.start();
}
音频数据的处理
拿到麦克风返回的PCM数据,这里可以对这个数据进行处理,比如变声,消除噪音等,我们这里不做这些处理,因为麦克风的原始数据相对还是非常大的,我们希望可以将这个PCM数据的体积变小点,这个时候我们就需要对数据进行编码了,这里我们将数据编码成AAC数据,这样方便网络传输,和视频数据处理一样的都是使用Android的原始硬编码技术:MediaCodec
/**
* @CreadBy :DramaScript
* @date 2017/8/22
*/
public class AacEncode {
private MediaCodec mediaCodec;
private String mediaType = "OMX.google.aac.encoder";
ByteBuffer[] inputBuffers = null;
ByteBuffer[] outputBuffers = null;
MediaCodec.BufferInfo bufferInfo;
//pts时间基数
long presentationTimeUs = 0;
//创建一个输入流用来输出转换的数据
ByteArrayOutputStream outputStream = new ByteArrayOutputStream();
public AacEncode() {
try {
mediaCodec = MediaCodec.createEncoderByType(MediaFormat.MIMETYPE_AUDIO_AAC);
//mediaCodec = MediaCodec.createByCodecName(mediaType);
} catch (IOException e) {
e.printStackTrace();
}
// 设置音频采样率,44100是目前的标准,但是某些设备仍然支持22050,16000,11025
final int kSampleRates[] = {8000, 11025, 22050, 44100, 48000};
//比特率 声音中的比特率是指将模拟声音信号转换成数字声音信号后,单位时间内的二进制数据量,是间接衡量音频质量的一个指标
final int kBitRates[] = {64000, 96000, 128000};
//初始化 此格式使用的音频编码技术、音频采样率、使用此格式的音频信道数(单声道为 1,立体声为 2)
MediaFormat mediaFormat = MediaFormat.createAudioFormat(
MediaFormat.MIMETYPE_AUDIO_AAC, kSampleRates[3], 1);
mediaFormat.setString(MediaFormat.KEY_MIME, MediaFormat.MIMETYPE_AUDIO_AAC);
mediaFormat.setInteger(MediaFormat.KEY_AAC_PROFILE,
MediaCodecInfo.CodecProfileLevel.AACObjectLC);
//比特率 声音中的比特率是指将模拟声音信号转换成数字声音信号后,单位时间内的二进制数据量,是间接衡量音频质量的一个指标
mediaFormat.setInteger(MediaFormat.KEY_BIT_RATE, kBitRates[1]);
//传入的数据大小
mediaFormat.setInteger(MediaFormat.KEY_MAX_INPUT_SIZE, 1024 * 1024);// It will
//设置相关参数
mediaCodec.configure(mediaFormat, null, null,
MediaCodec.CONFIGURE_FLAG_ENCODE);
//开始
mediaCodec.start();
inputBuffers = mediaCodec.getInputBuffers();
outputBuffers = mediaCodec.getOutputBuffers();
bufferInfo = new MediaCodec.BufferInfo();
}
/**
* 关闭释放资源
*
* @author:gj
* @date: 2017/4/25
* @time: 16:19
**/
public void close() {
try {
mediaCodec.stop();
mediaCodec.release();
outputStream.flush();
outputStream.close();
} catch (Exception e) {
e.printStackTrace();
}
}
/**
* 开始编码
*
* @author:gj
* @date: 2017/4/25
* @time: 16:19
**/
public byte[] offerEncoder(byte[] input) throws Exception {
Log.e("offerEncoder", input.length + " is coming");
int inputBufferIndex = mediaCodec.dequeueInputBuffer(-1);//其中需要注意的有dequeueInputBuffer(-1),参数表示需要得到的毫秒数,-1表示一直等,0表示不需要等,传0的话程序不会等待,但是有可能会丢帧。
if (inputBufferIndex >= 0) {
ByteBuffer inputBuffer = inputBuffers[inputBufferIndex];
inputBuffer.clear();
inputBuffer.put(input);
inputBuffer.limit(input.length);
//计算pts
long pts = computePresentationTime(presentationTimeUs);
mediaCodec
.queueInputBuffer(inputBufferIndex, 0, input.length, pts, 0);
presentationTimeUs += 1;
}
int outputBufferIndex = mediaCodec.dequeueOutputBuffer(bufferInfo, 0);
while (outputBufferIndex >= 0) {
int outBitsSize = bufferInfo.size;
int outPacketSize = outBitsSize + 7; // 7 is ADTS size
ByteBuffer outputBuffer = outputBuffers[outputBufferIndex];
outputBuffer.position(bufferInfo.offset);
outputBuffer.limit(bufferInfo.offset + outBitsSize);
//添加ADTS头
byte[] outData = new byte[outPacketSize];
addADTStoPacket(outData, outPacketSize);
outputBuffer.get(outData, 7, outBitsSize);
outputBuffer.position(bufferInfo.offset);
//写到输出流里
outputStream.write(outData);
// Log.e("AudioEncoder", outData.length + " bytes written");
mediaCodec.releaseOutputBuffer(outputBufferIndex, false);
outputBufferIndex = mediaCodec.dequeueOutputBuffer(bufferInfo, 0);
}
//输出流的数据转成byte[]
byte[] out = outputStream.toByteArray();
//写完以后重置输出流,否则数据会重复
outputStream.flush();
outputStream.reset();
//返回
return out;
}
/**
* 给编码出的aac裸流添加adts头字段
*
* @param packet 要空出前7个字节,否则会搞乱数据
* @param packetLen
*/
private void addADTStoPacket(byte[] packet, int packetLen) {
int profile = 2; //AAC LC
int freqIdx = 4; //44.1KHz
int chanCfg = 1; //CPE
packet[0] = (byte) 0xFF;
packet[1] = (byte) 0xF9;
packet[2] = (byte) (((profile - 1) << 6) + (freqIdx << 2) + (chanCfg >> 2));
packet[3] = (byte) (((chanCfg & 3) << 6) + (packetLen >> 11));
packet[4] = (byte) ((packetLen & 0x7FF) >> 3);
packet[5] = (byte) (((packetLen & 7) << 5) + 0x1F);
packet[6] = (byte) 0xFC;
}
//计算PTS,实际上这个pts对应音频来说作用并不大,设置成0也是没有问题的
private long computePresentationTime(long frameIndex) {
return frameIndex * 90000 * 1024 / 44100;
}
}
调用offerEncoder方法就可以获得编码后的AAC数据了。
网络发送
由于需要实现直播,这里对于网络的实时性要求非常高,所以从网络层来看,我们可以采用TCP或者UDP来实现,然后通过套字节进行传输。这里简单来讲讲Android中的Socket用法:
- 首先我们需要初始化一个Socket,并且希望这个Socket是一个单例的,我们可以在Application初始化,然后通过Application 来获取,因为Application在Android App中是唯一的,这里需要特别注意,所有Socket操作必须都是在子线程中处理,不可再主线程处理,要不然会报错:
public class App extends Application {
private static App sInstance;
private Socket socket;
private final String HOST = "192.168.156.72";
private final int PORT = 4321;
public Socket getSocket() {
if (socket == null) {
try {
socket = new Socket(HOST, PORT);
} catch (IOException e) {
e.printStackTrace();
}
return socket;
} else { return socket;
}
}
public static App getInstance() {
return sInstance;
}
@Override
public void onCreate() {
super.onCreate();
sInstance = this;
}
}
- 用Socket 写入数据,这里我们需要用Socket 将音视频写入到服务器中去,在这里我们需要对音视频数据进行一下加帧头的处理,为什么?因为音视频数据发送到服务器后,再由服务器发送给另外一个端的时候,往往我一帧一帧的发送数据,接收数据的却不是一帧一帧的接收,这里为了能够让接收数据更简单,获取完整一帧的数据进行播放,我们需要给音视频数据自定义一个头加入每一帧前面,下面是简单的自定义帧头处理方法:
/**
* 给每一帧添加一个头
*/
private byte[] creatHead(byte[] out,int type) {
String head = "";
if (type==1){
head = "start&video&" + System.currentTimeMillis() + "&" + out.length + "&end";
}else {
head = "start&music&" + System.currentTimeMillis() + "&" + out.length + "&end";
}
byte[] headBytes = new byte[40];
System.arraycopy(head.getBytes(), 0, headBytes, 0, head.getBytes().length);
Log.e("writeSteam", "头部长度:" + headBytes.length);
for (byte b:"start".getBytes()){
Log.e("writeSteam", "头部数据:"+b);
}
if (headBytes[0] == 0x73 && headBytes[1] == 0x74 && headBytes[2] == 0x61 && headBytes[3] == 0x72 && headBytes[4] == 0x74) {
Log.e("writeSteam", "确认是头部");
}
String outHead = new String(headBytes);
Log.e("writeSteam", "头部:" + outHead);
String[] headSplit = outHead.split("&");
for (String s : headSplit) {
Log.e("writeSteam", "截取部分:" + s);
}
Log.e("writeSteam", "加入头部前数据长度:" + out.length);
byte[] headByteOut = new byte[out.length + 40];
//将头部拷入数组
System.arraycopy(headBytes, 0, headByteOut, 0, headBytes.length);
//将帧数据拷入数组
System.arraycopy(out, 0, headByteOut, headBytes.length, out.length);
return headByteOut;
}
然后如何传输?
/**
* 将数据传输给服务器
*
* @param outData
*/
private void writeData(final byte[] outData, final int type) {
new Thread() {
@Override
public void run() {
try {
if (!socket.isClosed()) {
if (socket.isConnected()) {
outputStream = socket.getOutputStream();
//给每一帧加一个自定义的头
if (outData.length!=0){
byte[] headOut = creatHead(outData,type);
outputStream.write(headOut);
outputStream.flush();
Log.e("writeSteam", "加入头部后写入数据长度:" + headOut.length);
}
} else {
Log.e("writeSteam", "发送失败,socket断开了连接");
}
} else {
Log.e("writeSteam", "发送失败,socket关闭");
}
} catch (IOException e) {
e.printStackTrace();
Log.e("writeSteam", "写入数据失败");
}
}
}.start();
}
网络数据接收
上面代码我们完成了主播端的业务逻辑,接下来就是观看端的处理,观看主要逻辑就是接收,解码,渲染播放。下面我们来看看如何利用Socket来接收数据:
private void startSocketListener() {
threadListener = new Thread() {
@Override
public void run() {
super.run();
while (true) {
if (!socket.isClosed()) {
if (socket.isConnected()) {
try {
// 步骤1:创建输入流对象InputStream
InputStream is = socket.getInputStream();
if (is != null) {
DataInputStream input = new DataInputStream(is);
byte[] bytes = new byte[10000];
int le = input.read(bytes);
byte[] out = new byte[le];
System.arraycopy(bytes, 0, out, 0, out.length);
//处理接收数据
} else {
// Log.e("readSteam", "接受失败,socket断开了连接");
}
} else {
// Log.e("readSteam", "接受失败,socket关闭");
}
}
}
};
threadListener.start();
}
返回的数据我们知道是大小不一样的,也就是说不是完整的一帧,我们需要将其组成完整的一帧,接下来的算法就是将TCP返回的数据组成完整的一帧。
private void makeFrame(byte[] out){
if (out.length!= 0) {
byte[] addByte = new byte[out.length];
if (last != null) {
if (last.length != 0) {
for (byte b : last) {
// Log.e("last", "-剩余数据##########################" + b);
}
//将上次结余的数据拼接在新来数据前面
addByte = new byte[out.length + last.length];
System.arraycopy(last, 0, addByte, 0, last.length);
System.arraycopy(out, 0, addByte, last.length, out.length);
for (byte b : addByte) {
// Log.e("addByte", "-合并的数据++++++++++++++++++++++" + b);
}
}
} else {
addByte = new byte[out.length];
System.arraycopy(out, 0, addByte, 0, out.length);
for (byte b : addByte) {
// Log.e("addByte", "合并的数据++++++++++++++++++++++" + b);
}
}
for (int i = 0; i < addByte.length; i++) {
// Log.e("readSteam", "接收的数据" + addByte[i]);
if (i + 39 < addByte.length) {
//先截取返回字符串的前40位,判断是否是头
byte[] head = new byte[40];
// Log.e("readSteam", "所在位置:" + i);
System.arraycopy(addByte, i, head, 0, head.length);
//判读是否是帧头
if (head[0] == 0x73 && head[1] == 0x74 && head[2] == 0x61 && head[3] == 0x72 && head[4] == 0x74) {
String hd = new String(head);
String[] headSplit = hd.split("&");
for (String s : headSplit) {
// Log.e("readSteam", "截取部分:" + s);
}
String type = headSplit[1];
String time = headSplit[2];
String len = headSplit[3];
int frameLength = Integer.parseInt(len);
// index.add(i+40);
// Log.e("readSteam", "==================================================================:" + frameLength+", "+addByte.length);
if (i + 40 + frameLength <= addByte.length) {//表明还可以凑齐一帧
byte[] frameBy = new byte[frameLength];
System.arraycopy(addByte, i + 40, frameBy, 0, frameBy.length);
if (type.equals("video")) {
mPlayer.decodeH264(frameBy);
} else if (type.equals("music")) {
if (audioUtil==null){
audioUtil = new AACDecoderUtil();
audioUtil.start();
}
audioUtil.decode(frameBy,0,frameLength);
}
i = i + 38 + frameLength;
// Thread.sleep(20);
} else {
//变成结余数据
last = new byte[addByte.length - i];
System.arraycopy(addByte, i, last, 0, last.length);
break;
}
}
} else {//直接是剩余的
last = new byte[addByte.length - i];
System.arraycopy(addByte, i, last, 0, last.length);
break;
}
}
}
}
}
音视频的解码播放
- 先来看看H264数据的解码播放,注意这里渲染需要用到surfaceview进行绘制
/**
* @CreadBy :DramaScript
* @date 2017/8/29
*/
public class AvcDecode {
//解码类型
String MIME_TYPE = "video/avc";
MediaCodec mediaCodec = null;//这里是建立的解码器
ByteBuffer[] inputBuffers = null;
int m_framerate = 24;//帧率
//pts时间基数
long presentationTimeUs = 0;
public AvcDecode(int mWidth, int mHeigh, Surface surface) {
MediaFormat mediaFormat = MediaFormat.createVideoFormat(
MIME_TYPE, mWidth, mHeigh);
try {
mediaCodec = MediaCodec.createDecoderByType(MIME_TYPE);
mediaCodec.configure(mediaFormat, surface, null, 0);//注意上面编码器的注释,看看区别
mediaCodec.start();
} catch (IOException e) {
e.printStackTrace();
}
inputBuffers = mediaCodec.getInputBuffers();
}
public boolean decodeH264(byte[] h264) {
// Get input buffer index
ByteBuffer[] inputBuffers = mediaCodec.getInputBuffers();
int inputBufferIndex = mediaCodec.dequeueInputBuffer(100);//-1表示等待
if (inputBufferIndex >= 0) {
ByteBuffer inputBuffer = inputBuffers[inputBufferIndex];
inputBuffer.clear();
inputBuffer.put(h264);
//计算pts
long pts = computePresentationTime(presentationTimeUs);
mediaCodec.queueInputBuffer(inputBufferIndex, 0, h264.length, pts, 0);
presentationTimeUs += 1;
} else {
return false;
}
// Get output buffer index
MediaCodec.BufferInfo bufferInfo = new MediaCodec.BufferInfo();
int outputBufferIndex = mediaCodec.dequeueOutputBuffer(bufferInfo, 100);
while (outputBufferIndex >= 0) {
mediaCodec.releaseOutputBuffer(outputBufferIndex, true);//到这里为止应该有图像显示了
outputBufferIndex = mediaCodec.dequeueOutputBuffer(bufferInfo, 0);
}
Log.e("Media", "onFrame end");
return true;
}
/**
* 计算pts
*/
private long computePresentationTime(long frameIndex) {
return 132 + frameIndex * 1000000 / m_framerate;
}
}
- 音频数据的解码播放:
/**
* @CreadBy :DramaScript
* @date 2017/9/20
*/
public class AACDecoderUtil {
private static final String TAG = "AACDecoderUtil";
//声道数
private static final int KEY_CHANNEL_COUNT = 2;
//采样率
private static final int KEY_SAMPLE_RATE = 44100;
//用于播放解码后的pcm
private MyAudioTrack mPlayer;
//解码器
private MediaCodec mDecoder;
//用来记录解码失败的帧数
private int count = 0;
/**
* 初始化所有变量
*/
public void start() {
prepare();
}
/**
* 初始化解码器
*
* @return 初始化失败返回false,成功返回true
*/
public boolean prepare() {
// 初始化AudioTrack
mPlayer = new MyAudioTrack(KEY_SAMPLE_RATE, AudioFormat.CHANNEL_OUT_MONO, AudioFormat.ENCODING_PCM_16BIT);
mPlayer.init();
try {
//需要解码数据的类型
String mine = "audio/mp4a-latm";
//初始化解码器
mDecoder = MediaCodec.createDecoderByType(mine);
//MediaFormat用于描述音视频数据的相关参数
MediaFormat mediaFormat = new MediaFormat();
//数据类型
mediaFormat.setString(MediaFormat.KEY_MIME, mine);
//声道个数
mediaFormat.setInteger(MediaFormat.KEY_CHANNEL_COUNT, KEY_CHANNEL_COUNT);
//采样率
mediaFormat.setInteger(MediaFormat.KEY_SAMPLE_RATE, KEY_SAMPLE_RATE);
//比特率
mediaFormat.setInteger(MediaFormat.KEY_BIT_RATE, 96000);
//用来标记AAC是否有adts头,1->有
mediaFormat.setInteger(MediaFormat.KEY_IS_ADTS, 1);
//用来标记aac的类型
mediaFormat.setInteger(MediaFormat.KEY_AAC_PROFILE, MediaCodecInfo.CodecProfileLevel.AACObjectLC);
//ByteBuffer key(暂时不了解该参数的含义,但必须设置)
byte[] data = new byte[]{(byte) 0x11, (byte) 0x90};
ByteBuffer csd_0 = ByteBuffer.wrap(data);
mediaFormat.setByteBuffer("csd-0", csd_0);
//解码器配置
mDecoder.configure(mediaFormat, null, null
, 0);
} catch (IOException e) {
e.printStackTrace();
return false;
}
if (mDecoder == null) {
return false;
}
mDecoder.start();
return true;
}
/**
* aac解码+播放
*/
public void decode(byte[] buf, int offset, int length) {
//输入ByteBuffer
ByteBuffer[] codecInputBuffers = mDecoder.getInputBuffers();
//输出ByteBuffer
ByteBuffer[] codecOutputBuffers = mDecoder.getOutputBuffers();
//等待时间,0->不等待,-1->一直等待
long kTimeOutUs = 0;
try {
//返回一个包含有效数据的input buffer的index,-1->不存在
int inputBufIndex = mDecoder.dequeueInputBuffer(kTimeOutUs);
if (inputBufIndex >= 0) {
//获取当前的ByteBuffer
ByteBuffer dstBuf = codecInputBuffers[inputBufIndex];
//清空ByteBuffer
dstBuf.clear();
//填充数据
dstBuf.put(buf, offset, length);
//将指定index的input buffer提交给解码器
mDecoder.queueInputBuffer(inputBufIndex, 0, length, 0, 0);
}
//编解码器缓冲区
MediaCodec.BufferInfo info = new MediaCodec.BufferInfo();
//返回一个output buffer的index,-1->不存在
int outputBufferIndex = mDecoder.dequeueOutputBuffer(info, kTimeOutUs);
if (outputBufferIndex < 0) {
//记录解码失败的次数
count++;
}
ByteBuffer outputBuffer;
while (outputBufferIndex >= 0) {
//获取解码后的ByteBuffer
outputBuffer = codecOutputBuffers[outputBufferIndex];
//用来保存解码后的数据
byte[] outData = new byte[info.size];
outputBuffer.get(outData);
//清空缓存
outputBuffer.clear();
Util.save(outData,0,outData.length, Environment.getExternalStorageDirectory() + "/save2.pcm",true);
//播放
mPlayer.playAudioTrack(outData, 0, info.size);
//释放已经解码的buffer
mDecoder.releaseOutputBuffer(outputBufferIndex, false);
//解码未解完的数据
outputBufferIndex = mDecoder.dequeueOutputBuffer(info, kTimeOutUs);
}
} catch (Exception e) {
Log.e(TAG, e.toString());
e.printStackTrace();
}
}
//返回解码失败的次数
public int getCount() {
return count;
}
/**
* 释放资源
*/
public void stop() {
try {
if (mPlayer != null) {
mPlayer.release();
mPlayer = null;
}
if (mDecoder != null) {
mDecoder.stop();
mDecoder.release();
}
} catch (Exception e) {
e.printStackTrace();
}
}
}
- 音频的播放,这里使用AudioTrack来进行播放:
/**
* @CreadBy :DramaScript
* @date 2017/9/19
*/
public class MyAudioTrack {
private int mFrequency;// 采样率
private int mChannel;// 声道
private int mSampBit;// 采样精度
private AudioTrack mAudioTrack;
public MyAudioTrack(int frequency, int channel, int sampbit) {
mFrequency = frequency;
mChannel = channel;
mSampBit = sampbit;
}
/**
* 初始化
*/
public void init() {
if (mAudioTrack != null) {
release();
}
// 获得构建对象的最小缓冲区大小
int minBufSize = getMinBufferSize();
mAudioTrack = new AudioTrack(AudioManager.STREAM_MUSIC,
mFrequency, mChannel, mSampBit, minBufSize, AudioTrack.MODE_STREAM);
mAudioTrack.play();
}
/**
* 释放资源
*/
public void release() {
if (mAudioTrack != null) {
mAudioTrack.stop();
mAudioTrack.release();
}
}
/**
* 将解码后的pcm数据写入audioTrack播放
*
* @param data 数据
* @param offset 便宜
* @param length 需要播放的长度
*/
public void playAudioTrack(byte[] data, int offset, int length) {
if (data == null || data.length == 0) {
return;
}
try {
mAudioTrack.write(data, offset, length);
} catch (Exception e) {
Log.e("MyAudioTrack", "AudioTrack Exception : " + e.toString());
e.printStackTrace();
}
}
public int getMinBufferSize() {
return AudioTrack.getMinBufferSize(mFrequency,
mChannel, mSampBit);
}
}
end,到这里基于Socket的直播就完了,至于视频与音频的同步问题下节再进行讲解。demo中视频与音频亲测延迟应该在一个半秒左右。代码后面放在我的github上面,希望可以帮助在这块钻研的初学者,让你少走弯路:DramaScript