一、录音
//Recorder.h
#import <Foundation/Foundation.h>
#import <AudioToolbox/AudioToolbox.h>
#import <CoreAudio/CoreAudioTypes.h>
//设置录音的数据的结构体
typedef struct AQCallbackStruct{
AudioStreamBasicDescription mDataFormat; //音频的格式
AudioQueueRef queue; //音频队列对象AudioQueueRef
AudioQueueBufferRef mBuffers[3];//缓冲区
AudioFileID outputFile;
unsigned long frameSize;//音频的长度
int run; //是否正在录音
} AQCallbackStruct;
@interfaceRecorder :NSObject{
long _audioDataLength;//录音数据的长度
Byte _audioByte[999999];
long _audioDataIndex; //录音数据的起始位置
}
@property(nonatomic,assign)AQCallbackStruct aqc;
- (id)init;
- (void)start;
- (void)stop;
- (void)pause;
- (Byte*)getBytes;
- (long)getLength;
- (void)processAudioBuffer:(AudioQueueBufferRef)buffer withQueue:(AudioQueueRef)queue;
@end
//Recorder.m
//录音回调
static void AQInputCallback (void* inUserData, AudioQueueRef inAudioQueue, AudioQueueBufferRef inBuffer, const AudioTimeStamp* inStartTime, unsigned long inNumPackets, const AudioStreamPacketDescription* inPacketDesc){
Recorder*engine = (__bridge Recorder *)inUserData;
if(inNumPackets >0) {
[engine processAudioBuffer:inBuffer withQueue:inAudioQueue];
}
if(engine.aqc.run) {
AudioQueueEnqueueBuffer(engine.aqc.queue, inBuffer,0,NULL);
}
}
- (id)init{
self = [super init];
if(self){
_aqc.mDataFormat.mSampleRate = 48000; //设置采样率为48000
_aqc.mDataFormat.mFormateID = kAudioFormatLinearPCM;//设置采样格式为PCM格式
_aqc.mDataFormat.mFormateFlags = kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsPacked;
_aqc.mDataFormat.mFramePerPacket = 1;
_aqc.mDataFormat.mChannelsPerFrame = 1;//单声道
_aqc.mDataFormat.mBitsPerChannel=16;//语音采样点占用的位数
_aqc.mDataFormat.mBytesPerFrame=2;//每帧的Byte数
_aqc.mDataFormat.mBytesPerPacket=2;//每个数据包bytes总数 = 每个数据包的帧数 * 每帧的Bytes总数
_aqc.frameSize=1024*8;
AudioQueueNewInput(&_aqc.mDataFormat, (AudioQueueInputCallback)AQInputCallback, (__bridgevoid*)self,NULL,kCFRunLoopCommonModes,0, &_aqc.queue);
for(inti=0;i
{
AudioQueueAllocateBuffer(_aqc.queue,_aqc.frameSize, &_aqc.mBuffers[i]);
AudioQueueEnqueueBuffer(_aqc.queue,_aqc.mBuffers[i],0,NULL);
}
_aqc.recPtr=0;
_aqc.run=1;
}
return self;
}
- (void) dealloc{
AudioQueueStop(_aqc.queue,true);
_aqc.run=0;
AudioQueueDispose(_aqc.queue,true);
}
- (void)start{
AudioQueueStart(_aqc.queue,NULL);
}
- (void)stop{
AudioQueueStop(_aqc.queue,true);
}
- (void)pause{
AudioQueuePause(_aqc.queue);
}
- (Byte*)getBytes{
return _audioByte;
}
- (long)getLength{
return _audioDataLength;
}
- (void)processAudioBuffer:(AudioQueueBufferRef)buffer withQueue:(AudioQueueRef)queue{
NSLog(@"processAudioData :%u",(unsignedint)buffer->mAudioDataByteSize);
memcpy(_audioByte+_audioDataIndex, buffer->mAudioData, buffer->mAudioDataByteSize);
_audioDataIndex+= buffer->mAudioDataByteSize;
_audioDataLength=_audioDataIndex;
}
@end
二、播放
//Player.h
#import <AudioToolbox/AudioToolbox.h>
#define EVERY_READ_LENGTH20
typedefstructplayState
{
AudioStreamBasicDescriptiondataFormat;//输出音频的格式
AudioQueueRefqueue;//音频队列的对象AudioQueueRef
AudioQueueBufferRefbuffer[3];//缓冲区
UInt32bufferByteSize;//缓冲区大小
BOOLplaying;//是否正在播放
}PlayState;
@interfacePlayer :NSObject{
PlayState_playState;
}
- (BOOL)playData:(NSData*)data;
@end
#import"Player.h"
#import <AudioToolbox/AudioQueue.h>
#import <MediaPlayer/MediaPlayer.h>
#import <AVFoundation/AVAudioSession.h>
static UInt32 gBufferSizeBytes =0x100000;
@interfacePlayer(){
Byte*_audioByte;
long _audioDataIndex;
long _audioDataLenth;
}
@end
@implementationPlayer
static void BufferCallback(void* inUserData,AudioQueueRef inAQ,AudioQueueBufferRef buffer){
Player* player=(__bridgePlayer*)inUserData;
AudioQueueStop(player->_playState.queue,NO);
for(inti =0; i <3; i++)
{
AudioQueueFreeBuffer(player->_playState.queue, player->_playState.buffer[i]);
}
AudioQueueDispose(player->_playState.queue,YES);
}
- (instancetype)init
{
self= [super init];
if(self) {
[[AVAudioSession sharedInstance] setCategory:AVAudioSessionCategoryPlayAndRecord error:nil];
}
return self;
}
- (BOOL)playData:(NSData*)data{
if(data ==nil|| [dataisKindOfClass:[NSNullclass]] || data.length<=0) {
return NO;
}
_playState.dataFormat.mSampleRate=44100;
_playState.dataFormat.mFormatID=kAudioFormatLinearPCM;
_playState.dataFormat.mFormatFlags=kAudioFormatFlagIsSignedInteger|kLinearPCMFormatFlagIsPacked;
_playState.dataFormat.mBytesPerPacket=2;
_playState.dataFormat.mFramesPerPacket=1;
_playState.dataFormat.mBytesPerFrame=2;
_playState.dataFormat.mChannelsPerFrame=1;
_playState.dataFormat.mBitsPerChannel=16;
_playState.dataFormat.mReserved=0;
OSStatusstatus =AudioQueueNewOutput(&_playState.dataFormat, (AudioQueueOutputCallback)BufferCallback, (__bridgevoid*)self,nil,nil,0, &_playState.queue);
if(status !=noErr) {
return NO;
}
status =AudioQueueAllocateBuffer(_playState.queue,gBufferSizeBytes, &_playState.buffer[0]);
if(status !=noErr) {
return NO;
}
_playState.buffer[0]->mAudioDataByteSize= (int)[datalength];
memcpy(_playState.buffer[0]->mAudioData, [databytes], [datalength]);
status = AudioQueueEnqueueBuffer(_playState.queue,_playState.buffer[0],0,nil);
if(status !=noErr) {
return NO;
}
float value;
AudioQueueGetParameter(_playState.queue,kAudioQueueParam_Volume, &value);
AudioQueueSetParameter(_playState.queue,kAudioQueueParam_Volume,1.0);
status = AudioQueueStart(_playState.queue,nil);
if(status !=noErr) {
return NO;
}
return YES;
}
@end