//
// DemoTextViewVC.m
// XMUI_OC
//
//
#import "DemoTextViewVC.h"
#import "XMTextView.h"
#import <AVFoundation/AVFoundation.h>
#import<Speech/Speech.h>
@interface DemoTextViewVC ()<SFSpeechRecognitionTaskDelegate>
@property (nonatomic, strong) XMTextView *textV;
@property (nonatomic,strong)SFSpeechRecognizer *recognizer;
@property(nonatomic,strong)SFSpeechAudioBufferRecognitionRequest * recognitionRequest;
@property(nonatomic,strong)SFSpeechRecognitionTask * recognitionTask ;
@property (nonatomic,strong)AVAudioEngine * audioEngine;
@end
@implementation DemoTextViewVC
- (void)viewDidLoad {
[super viewDidLoad];
[self.customNaviView setTitleStr:@"XMTextView"];
self.textV = [[XMTextView alloc] initWithFrame: CGRectMake(10, 120, kScreenWidth_XM - 20, 500)];
[self.view addSubview:self.textV];
self.textV.placeholder = @"请输入内容";
self.textV.placeholerFont = [UIFont systemFontOfSize:16];
self.textV.font = [UIFont systemFontOfSize:16];
self.textV.textColor = [UIColor redColor];
self.textV.placeholderColor = [UIColor grayColor];
self.textV.backgroundColor = [UIColor lightGrayColor];
// self.textV.contentInset = UIEdgeInsetsMake(10, 10, 10, 10);
NSLocale *locale = [[NSLocale alloc] initWithLocaleIdentifier:@"zh-CN"];
_recognizer = [[SFSpeechRecognizer alloc] initWithLocale:locale];
//把语音识别的代理设置为 self
_recognizer.delegate = self;
[SFSpeechRecognizer requestAuthorization:^(SFSpeechRecognizerAuthorizationStatus status) {
if (status == SFSpeechRecognizerAuthorizationStatusAuthorized) {
NSLog(@"认证成功");
[self startRecording];
} else {
NSLog(@"失败");
}
}];
}
-(void)startRecording{
if (self.recognitionTask) {
[self.recognitionTask cancel];
self.recognitionTask = nil;
}
self.audioEngine = [[AVAudioEngine alloc] init];
AVAudioSession *audioSession = [AVAudioSession sharedInstance];
bool audioBool = [audioSession setCategory:AVAudioSessionCategoryRecord error:nil];
bool audioBool1= [audioSession setMode:AVAudioSessionModeMeasurement error:nil];
bool audioBool2= [audioSession setActive:true withOptions:AVAudioSessionSetActiveOptionNotifyOthersOnDeactivation error:nil];
if (audioBool || audioBool1|| audioBool2) {
NSLog(@"可以使用");
}else{
NSLog(@"这里说明有的功能不支持");
}
self.recognitionRequest = [[SFSpeechAudioBufferRecognitionRequest alloc]init];
AVAudioInputNode *inputNode = self.audioEngine.inputNode;
self.recognitionRequest.shouldReportPartialResults = true;
// ios13后支持离线识别了。
if (@available(iOS 16, *)) { // 标点符号。
self.recognitionRequest.addsPunctuation = YES;
} else {
// Fallback on earlier versions
}
//开始识别任务
self.recognitionTask = [self.recognizer recognitionTaskWithRequest:self.recognitionRequest resultHandler:^(SFSpeechRecognitionResult * _Nullable result, NSError * _Nullable error) {
bool isFinal = false;
if (result) {
NSLog(@"result==== %@", result.bestTranscription.formattedString);
//语音转文本
isFinal = [result isFinal];
self.textV.text = result.bestTranscription.formattedString;
}
if (error || isFinal) {
[self.audioEngine stop];
[inputNode removeTapOnBus:0];
self.recognitionRequest = nil;
self.recognitionTask = nil;
}
}];
AVAudioFormat *recordingFormat = [inputNode outputFormatForBus:0];
[inputNode installTapOnBus:0 bufferSize:1024 format:recordingFormat block:^(AVAudioPCMBuffer * _Nonnull buffer, AVAudioTime * _Nonnull when) {
[self.recognitionRequest appendAudioPCMBuffer:buffer];
}];
[self.audioEngine prepare];
NSError *error;
bool audioEngineBool = [self.audioEngine startAndReturnError:&error];
NSLog(@"%d === %@",audioEngineBool, error);
}
//当语音识别操作可用性发生改变时会被调用
- (void)speechRecognizer:(SFSpeechRecognizer *)speechRecognizer availabilityDidChange:(BOOL)available{
if (available) {
}else{
}
}
@end
ios 语音识别调研 SFSpeechRecognizer
©著作权归作者所有,转载或内容合作请联系作者
- 文/潘晓璐 我一进店门,熙熙楼的掌柜王于贵愁眉苦脸地迎上来,“玉大人,你说我怎么就摊上这事。” “怎么了?”我有些...
- 文/花漫 我一把揭开白布。 她就那样静静地躺着,像睡着了一般。 火红的嫁衣衬着肌肤如雪。 梳的纹丝不乱的头发上,一...
- 文/苍兰香墨 我猛地睁开眼,长吁一口气:“原来是场噩梦啊……” “哼!你这毒妇竟也来了?” 一声冷哼从身侧响起,我...
推荐阅读更多精彩内容
- 实践背景 随着人工智能的发展与语音识别技术的成熟,近年来已有不少语音识别辅助庭审的案例。早在2016年,广西省南宁...
- 问题简述:Unity发布的xcode工程(已接入科大讯飞的语音识别),在语音识别操作结束后,游戏声音就莫名消失了。...
- 前言当前的APP的查询都是使用手动输入,不仅效率低,而且查询的语句的限制比较大,不能够方便的扩展。 如果能方便的扩...