1.PCM介绍
脉冲编码调制(Pulse Code Modulation,PCM),由A.里弗斯于bai1937年提出的,这一概念为数字du通信奠zhi定了基础,60年代它开始应用于市内电话网以扩充容量,使已有音频电缆的大部分芯线的传输容量扩大24~48倍。
2.XBEchoCancellation 使用
3.** 使用方法**
XBEchoCancellation.h
#import <Foundation/Foundation.h>
#import <AVFoundation/AVFoundation.h>
#import <AudioToolbox/AudioToolbox.h>
typedef enum : NSUInteger {
XBEchoCancellationRate_8k = 8000,
XBEchoCancellationRate_20k = 20000,
XBEchoCancellationRate_44k = 44100,
XBEchoCancellationRate_96k = 96000
} XBEchoCancellationRate;
#define kRate (XBEchoCancellationRate_8k) //采样率
#define kChannels (1)//声道数
#define kBits (16)//位数
typedef enum : NSUInteger {
XBEchoCancellationStatus_open,
XBEchoCancellationStatus_close
} XBEchoCancellationStatus;
typedef void (^XBEchoCancellation_inputBlock)(AudioBufferList *bufferList);
typedef void (^XBEchoCancellation_outputBlock)(AudioBufferList *bufferList,UInt32 inNumberFrames);
@interface XBEchoCancellation : NSObject
///是否开启了回声消除
@property (nonatomic,assign,readonly) XBEchoCancellationStatus echoCancellationStatus;
@property (nonatomic,assign,readonly) AudioStreamBasicDescription streamFormat;
///录音的回调,回调的参数为从麦克风采集到的声音
@property (nonatomic,copy) XBEchoCancellation_inputBlock bl_input;
///播放的回调,回调的参数 buffer 为要向播放设备(扬声器、耳机、听筒等)传的数据,在回调里把数据传给 buffer
@property (nonatomic,copy) XBEchoCancellation_outputBlock bl_output;
+ (instancetype)shared;
- (void)startInput;
- (void)stopInput;
- (void)startOutput;
- (void)stopOutput;
- (void)openEchoCancellation;
- (void)closeEchoCancellation;
///开启服务,需要另外去开启 input 或者 output 功能
- (void)startService;
///停止所有功能(包括录音和播放)
- (void)stop;
// 音量控制
// output: para1 输出数据
// input : para2 输入数据
// para3 输入长度
// para4 音量控制参数,有效控制范围[0,100]
// 超过100,则为倍数,倍数为in_vol减去98的数值
+ (void)volume_controlOut_buf:(short *)out_buf in_buf:(short *)in_buf in_len:(int)in_len in_vol:(float)in_vol;
@end
XBEchoCancellation.m
#import "XBEchoCancellation.h"
typedef struct MyAUGraphStruct{
AUGraph graph;
AudioUnit remoteIOUnit;
} MyAUGraphStruct;
@interface XBEchoCancellation ()
{
MyAUGraphStruct myStruct;
}
@property (nonatomic,assign) BOOL isCloseService; //没有声音服务
@property (nonatomic,assign) BOOL isNeedInputCallback; //需要录音回调(获取input即麦克风采集到的声音回调)
@property (nonatomic,assign) BOOL isNeedOutputCallback; //需要播放回调(output即向发声设备传递声音回调)
@end
@implementation XBEchoCancellation
@synthesize streamFormat;
+ (instancetype)shared
{
return [self new];
}
+ (instancetype)allocWithZone:(struct _NSZone *)zone
{
static XBEchoCancellation *cancel = nil;
static dispatch_once_t onceToken;
dispatch_once(&onceToken, ^{
cancel = [super allocWithZone:zone];
});
return cancel;
}
- (instancetype)init
{
if (self = [super init])
{
static dispatch_once_t onceToken;
dispatch_once(&onceToken, ^{
_echoCancellationStatus = XBEchoCancellationStatus_close;
self.isCloseService = YES;
[self startService];
});
}
return self;
}
- (void)startInput
{
[self startService];
self.isNeedInputCallback = YES;
}
- (void)stopInput
{
self.isNeedInputCallback = NO;
}
- (void)startOutput
{
[self startService];
self.isNeedOutputCallback = YES;
}
- (void)stopOutput
{
self.isNeedOutputCallback = NO;
}
- (void)startService
{
if (self.isCloseService == NO)
{
return;
}
[self setupSession];
[self createAUGraph:&myStruct];
[self setupRemoteIOUnit:&myStruct];
[self startGraph:myStruct.graph];
AudioOutputUnitStart(myStruct.remoteIOUnit);
self.isCloseService = NO;
NSLog(@"startService完成");
}
- (void)stop
{
self.bl_input = nil;
self.bl_output = nil;
[self stopGraph:myStruct.graph];
}
- (void)openEchoCancellation
{
if (self.isCloseService == YES)
{
return;
}
[self openOrCloseEchoCancellation:0];
}
- (void)closeEchoCancellation
{
if (self.isCloseService == YES)
{
return;
}
[self openOrCloseEchoCancellation:1];
}
///0 开启,1 关闭
-(void)openOrCloseEchoCancellation:(UInt32)newEchoCancellationStatus
{
if (self.isCloseService == YES)
{
return;
}
UInt32 echoCancellation;
UInt32 size = sizeof(echoCancellation);
CheckError(AudioUnitGetProperty(myStruct.remoteIOUnit,
kAUVoiceIOProperty_BypassVoiceProcessing,
kAudioUnitScope_Global,
0,
&echoCancellation,
&size),
"kAUVoiceIOProperty_BypassVoiceProcessing failed");
if (newEchoCancellationStatus == echoCancellation)
{
return;
}
CheckError(AudioUnitSetProperty(myStruct.remoteIOUnit,
kAUVoiceIOProperty_BypassVoiceProcessing,
kAudioUnitScope_Global,
0,
&newEchoCancellationStatus,
sizeof(newEchoCancellationStatus)),
"AudioUnitSetProperty kAUVoiceIOProperty_BypassVoiceProcessing failed");
_echoCancellationStatus = newEchoCancellationStatus == 0 ? XBEchoCancellationStatus_open : XBEchoCancellationStatus_close;
}
-(void)startGraph:(AUGraph)graph
{
CheckError(AUGraphInitialize(graph),
"AUGraphInitialize failed");
CheckError(AUGraphStart(graph),
"AUGraphStart failed");
_echoCancellationStatus = XBEchoCancellationStatus_open;
}
- (void)stopGraph:(AUGraph)graph
{
if (self.isCloseService == YES)
{
return;
}
CheckError(AUGraphUninitialize(graph),
"AUGraphUninitialize failed");
CheckError(AUGraphStop(graph),
"AUGraphStop failed");
self.isCloseService = YES;
_echoCancellationStatus = XBEchoCancellationStatus_close;
}
-(void)createAUGraph:(MyAUGraphStruct*)augStruct{
//Create graph
CheckError(NewAUGraph(&augStruct->graph),
"NewAUGraph failed");
//Create nodes and add to the graph
AudioComponentDescription inputcd = {0};
inputcd.componentType = kAudioUnitType_Output;
inputcd.componentSubType = kAudioUnitSubType_VoiceProcessingIO;
inputcd.componentManufacturer = kAudioUnitManufacturer_Apple;
AUNode remoteIONode;
//Add node to the graph
CheckError(AUGraphAddNode(augStruct->graph,
&inputcd,
&remoteIONode),
"AUGraphAddNode failed");
//Open the graph
CheckError(AUGraphOpen(augStruct->graph),
"AUGraphOpen failed");
//Get reference to the node
CheckError(AUGraphNodeInfo(augStruct->graph,
remoteIONode,
&inputcd,
&augStruct->remoteIOUnit),
"AUGraphNodeInfo failed");
}
-(void)setupRemoteIOUnit:(MyAUGraphStruct*)augStruct{
//Open input of the bus 1(input mic)
UInt32 inputEnableFlag = 1;
CheckError(AudioUnitSetProperty(augStruct->remoteIOUnit,
kAudioOutputUnitProperty_EnableIO,
kAudioUnitScope_Input,
1,
&inputEnableFlag,
sizeof(inputEnableFlag)),
"Open input of bus 1 failed");
//Open output of bus 0(output speaker)
UInt32 outputEnableFlag = 1;
CheckError(AudioUnitSetProperty(augStruct->remoteIOUnit,
kAudioOutputUnitProperty_EnableIO,
kAudioUnitScope_Output,
0,
&outputEnableFlag,
sizeof(outputEnableFlag)),
"Open output of bus 0 failed");
UInt32 mFramesPerPacket = 1;
UInt32 mBytesPerFrame = kChannels * kBits / 8;
//Set up stream format for input and output
streamFormat.mFormatID = kAudioFormatLinearPCM;
streamFormat.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
streamFormat.mSampleRate = kRate;
streamFormat.mFramesPerPacket = mFramesPerPacket;
streamFormat.mBytesPerFrame = mBytesPerFrame;
streamFormat.mBytesPerPacket = mBytesPerFrame * mFramesPerPacket;
streamFormat.mBitsPerChannel = kBits;
streamFormat.mChannelsPerFrame = kChannels;
CheckError(AudioUnitSetProperty(augStruct->remoteIOUnit,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Input,
0,
&streamFormat,
sizeof(streamFormat)),
"kAudioUnitProperty_StreamFormat of bus 0 failed");
CheckError(AudioUnitSetProperty(augStruct->remoteIOUnit,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Output,
1,
&streamFormat,
sizeof(streamFormat)),
"kAudioUnitProperty_StreamFormat of bus 1 failed");
// UInt32 maxFramesPerSlice = 4096;
// CheckError(AudioUnitSetProperty(augStruct->remoteIOUnit,
// kAudioUnitProperty_MaximumFramesPerSlice,
// kAudioUnitScope_Global,
// 0,
// &maxFramesPerSlice,
// sizeof(UInt32)),
// "couldn't set max frames per slice on kAudioUnitSubType_RemoteIO");
// Float32 preferredBufferSize = 0.01;
// AudioSessionSetProperty(kAudioSessionProperty_PreferredHardwareIOBufferDuration,
// sizeof(preferredBufferSize),
// &preferredBufferSize);
//
// AudioSessionSetActive(true);
AURenderCallbackStruct input;
input.inputProc = InputCallback_xb;
input.inputProcRefCon = (__bridge void *)(self);
CheckError(AudioUnitSetProperty(augStruct->remoteIOUnit,
kAudioOutputUnitProperty_SetInputCallback,
kAudioUnitScope_Output,
1,
&input,
sizeof(input)),
"couldnt set remote i/o render callback for output");
AURenderCallbackStruct output;
output.inputProc = outputRenderTone_xb;
output.inputProcRefCon = (__bridge void *)(self);
CheckError(AudioUnitSetProperty(augStruct->remoteIOUnit,
kAudioUnitProperty_SetRenderCallback,
kAudioUnitScope_Input,
0,
&output,
sizeof(output)),
"kAudioUnitProperty_SetRenderCallback failed");
// UInt32 flag=0;
// CheckError(AudioUnitSetProperty(augStruct->remoteIOUnit,
// kAudioUnitProperty_ShouldAllocateBuffer,
// kAudioUnitScope_Output,
// 1,
// &flag,
// sizeof(flag)),
// "couldn't set property for ShouldAllocateBuffer");
}
-(void)createRemoteIONodeToGraph:(AUGraph*)graph
{
}
-(void)setupSession
{
NSError *error = nil;
AVAudioSession* session = [AVAudioSession sharedInstance];
[session setCategory:AVAudioSessionCategoryPlayAndRecord withOptions:AVAudioSessionCategoryOptionDefaultToSpeaker error:&error];
[session setActive:YES error:nil];
}
#pragma mark - 其他方法
static void CheckError(OSStatus error, const char *operation)
{
if (error == noErr) return;
char errorString[20];
// See if it appears to be a 4-char-code
*(UInt32 *)(errorString + 1) = CFSwapInt32HostToBig(error);
if (isprint(errorString[1]) && isprint(errorString[2]) &&
isprint(errorString[3]) && isprint(errorString[4])) {
errorString[0] = errorString[5] = '\'';
errorString[6] = '\0';
} else
// No, format it as an integer
sprintf(errorString, "%d", (int)error);
fprintf(stderr, "Error: %s (%s)\n", operation, errorString);
exit(1);
}
OSStatus InputCallback_xb(void *inRefCon,
AudioUnitRenderActionFlags *ioActionFlags,
const AudioTimeStamp *inTimeStamp,
UInt32 inBusNumber,
UInt32 inNumberFrames,
AudioBufferList *ioData){
XBEchoCancellation *echoCancellation = (__bridge XBEchoCancellation*)inRefCon;
if (echoCancellation.isNeedInputCallback == NO)
{
// NSLog(@"没有开启声音输入回调");
return noErr;
}
MyAUGraphStruct *myStruct = &(echoCancellation->myStruct);
AudioBufferList bufferList;
bufferList.mNumberBuffers = 1;
bufferList.mBuffers[0].mData = NULL;
bufferList.mBuffers[0].mDataByteSize = 0;
AudioUnitRender(myStruct->remoteIOUnit,
ioActionFlags,
inTimeStamp,
1,
inNumberFrames,
&bufferList);
// AudioBuffer buffer = bufferList.mBuffers[0];
if (echoCancellation.bl_input)
{
echoCancellation.bl_input(&bufferList);
}
// NSLog(@"InputCallback");
return noErr;
}
OSStatus outputRenderTone_xb(
void *inRefCon,
AudioUnitRenderActionFlags *ioActionFlags,
const AudioTimeStamp *inTimeStamp,
UInt32 inBusNumber,
UInt32 inNumberFrames,
AudioBufferList *ioData)
{
//TODO: implement this function
memset(ioData->mBuffers[0].mData, 0, ioData->mBuffers[0].mDataByteSize);
XBEchoCancellation *echoCancellation = (__bridge XBEchoCancellation*)inRefCon;
if (echoCancellation.isNeedOutputCallback == NO)
{
// NSLog(@"没有开启声音输出回调");
return noErr;
}
if (echoCancellation.bl_output)
{
echoCancellation.bl_output(ioData,inNumberFrames);
}
// NSLog(@"outputRenderTone");
return 0;
}
+ (void)volume_controlOut_buf:(short *)out_buf in_buf:(short *)in_buf in_len:(int)in_len in_vol:(float)in_vol
{
volume_control(out_buf, in_buf, in_len, in_vol);
}
// 音量控制
// output: para1 输出数据
// input : para2 输入数据
// para3 输入长度
// para4 音量控制参数,有效控制范围[0,100]
// 超过100,则为倍数,倍数为in_vol减去98的数值
int volume_control(short* out_buf,short* in_buf,int in_len, float in_vol)
{
int i,tmp;
// in_vol[0,100]
float vol = in_vol - 98;
if(-98 < vol && vol <0 )
{
vol = 1/(vol*(-1));
}
else if(0 <= vol && vol <= 1)
{
vol = 1;
}
/*else if(1 < vol && vol <= 2)
{
vol = vol;
}*/
else if(vol <= -98)
{
vol = 0;
}
// else if(2 = vol)
// {
// vol = 2;
// }
for(i=0; i<in_len/2; i++)
{
tmp = in_buf[i]*vol;
if(tmp > 32767)
{
tmp = 32767;
}
else if( tmp < -32768)
{
tmp = -32768;
}
out_buf[i] = tmp;
}
return 0;
}
@end
接收麦克风信号
//开启语音流
-(void)openOutputStream{
__weak __typeof__(self) weakSelf = self;
if ([XBEchoCancellation shared].bl_input == nil)
{
[XBEchoCancellation shared].bl_input = ^(AudioBufferList *bufferList1) {
AudioBuffer buffer = bufferList1->mBuffers[0];
NSData *pcmData = [NSData dataWithBytes:buffer.mData length:buffer.mDataByteSize];
};
}
[[XBEchoCancellation shared] startInput];
}
播放PCM
if ([XBEchoCancellation shared].bl_output == nil)
{
[XBEchoCancellation shared].bl_output = ^(AudioBufferList *bufferList, UInt32 inNumberFrames) {
AudioBuffer buffer = bufferList->mBuffers[0];
if(self->cacheData.length<buffer.mDataByteSize){
return;
}
NSData *subData = [[NSData alloc]init];
//截取缓存数据
subData=[self->cacheData subdataWithRange:NSMakeRange(0, buffer.mDataByteSize)];
// NSLog(@"subData=%@",subData);
Byte *tempByte = (Byte *)[subData bytes];
memcpy(buffer.mData, tempByte, buffer.mDataByteSize);
//移除已播放完成的数据
[self->cacheData replaceBytesInRange:NSMakeRange(0, buffer.mDataByteSize) withBytes:NULL length:0];
};
}
[[XBEchoCancellation shared] startOutput];
});