一、需求描述
在和固件设备进行P2P通话时,想录制某一段音视频,但是在官方的webrtc中,只能拿到一帧一帧视频原始数据,拿不到音频数据,故需要实现音视频录制必须修改webrtc源码回调音视频原始数据.
二、webrtc源码新增音频回调
代码在sdk/objc/native/src/audio/audio_device_ios.mm中,找到OnGetPlayoutData方法,增加代码:
AudioBuffer* audio_buffer = &io_data->mBuffers[0];
RTC_OBJC_TYPE(RTCTAudioData) *audioData = [[RTC_OBJC_TYPE(RTCTAudioData) alloc] init];
audioData.audioBufferList = *io_data;
audioData.timeStamp = *time_stamp;
audioData.busNumber = bus_number;
audioData.numFrames = num_frames;
RTC_OBJC_TYPE(RTCAudioSession)* session = [RTC_OBJC_TYPE(RTCAudioSession) sharedInstance];
[session lockForConfiguration];
if (session) {
session.audioData = audioData;
}
[session unlockForConfiguration];
RTC_DCHECK_EQ(1, audio_buffer->mNumberChannels);
音频就通过RTCAudioSession回调到业务层去了,再使用逐帧编码写入到文件.
三、音视频帧写入文件
业务层监听webrtc音频流及视频流
[[RTCAudioSession sharedInstance] addObserver:self forKeyPath:@"audioData" options:NSKeyValueObservingOptionNew context:@"RTCAudioData"];
[remoteRederer addObserver:self forKeyPath:@"RTCFrame" options:NSKeyValueObservingOptionNew context:@"RTCVideoFrame"];
监听到流处理
if (context == @"RTCAudioData") {
RTCTAudioData *audioData = change[@"new"];
dispatch_async(dispatch_get_main_queue(), ^{
if (self.delegate && [self.delegate respondsToSelector:@selector(webRTCClientAudioStream:audioData:)]) {
[self.delegate webRTCClientAudioStream:self audioData:audioData];
}
});
} else if (context == @"RTCVideoFrame") {
RTCVideoFrame *frame = change[@"new"];
dispatch_async(dispatch_get_main_queue(), ^{
if (frame && self.delegate && [self.delegate respondsToSelector:@selector(webRTCClientVideoStream:videoFrame:)]) {
[self.delegate webRTCClientVideoStream:self videoFrame:frame];
}
});
}
视频写入
if (videoFrame) {
RTCCVPixelBuffer *buff = videoFrame.buffer;
if (buff) {
CVPixelBufferRef buffRef = buff.pixelBuffer;
if (buffRef) {
if (self.startRecordPts == 0) {
self.startRecordPts = videoFrame.timeStampNs;
}
if (self.writeManager) {
if (!self.writeManager.canWrite) {
self.writeManager.outputSize = CGSizeMake(buff.width, buff.height);
[self.writeManager startWrite];
}
int64_t pts = videoFrame.timeStampNs - self.startRecordPts;
[self.writeManager appendCVPixelBuffer:buffRef pts:pts / 1000];
}
}
}
}
音频写入
int sampleRate = [RTCAudioSession sharedInstance].sampleRate;
AudioBufferList buffList = audioData.audioBufferList;
int channels = buffList.mBuffers[0].mNumberChannels;
int len = buffList.mBuffers[0].mDataByteSize;
NSLog(@"%d", channels);
NSLog(@"%d", len);
AudioStreamBasicDescription asbd;
asbd.mSampleRate = sampleRate;
asbd.mFormatID = kAudioFormatLinearPCM;
asbd.mFormatFlags = kLinearPCMFormatFlagIsPacked | kLinearPCMFormatFlagIsSignedInteger;
asbd.mChannelsPerFrame = channels;
asbd.mBitsPerChannel = 16;
asbd.mFramesPerPacket = 1;
asbd.mBytesPerFrame = asbd.mBitsPerChannel / 8 * asbd.mChannelsPerFrame;
asbd.mBytesPerPacket = asbd.mBytesPerFrame * asbd.mFramesPerPacket;
OSStatus error = 0;
static CMFormatDescriptionRef format = NULL;
error = CMAudioFormatDescriptionCreate(kCFAllocatorDefault, &asbd, 0, NULL, 0, NULL, NULL, &format);
if (error) {
NSLog(@"CMAudioFormatDescriptionCreate returned error: %ld", (long)error);
return;
}
CMSampleBufferRef buff = NULL;
CMSampleTimingInfo timing = {CMTimeMake(1, sampleRate), kCMTimeZero, kCMTimeInvalid};
error = CMSampleBufferCreate(kCFAllocatorDefault, NULL, false, NULL, NULL, format, (CMItemCount)len / (2 * channels), 1, &timing, 0, NULL, &buff);
if (error) {
NSLog(@"CMSampleBufferCreate returned error: %ld", (long)error);
return;
}
CFRelease(format);
error = CMSampleBufferSetDataBufferFromAudioBufferList(buff, kCFAllocatorDefault, kCFAllocatorDefault, 0, &buffList);
if (error) {
NSLog(@"CMSampleBufferSetDataBufferFromAudioBufferList returned error: %ld", (long)error);
return;
}
[self.writeManager appendSampleBuffer:buff ofMediaType:AVMediaTypeAudio];
CFRelease(buff);