系统语音识别API,支持iOS10级以上的版本,需要麦克风权限和语音识别权限。

一、系统类

1. 导入系统库

import Speech

2. SFSpeechRecognizer声音处理器

SFSpeechRecognizer(locale: Locale(identifier: "zh-CN"))

根据传入的语言简称来返回一个声音处理器

3. SFSpeechAudioBufferRecognitionRequest 语音识别器

  lazy var recognitionRequest: SFSpeechAudioBufferRecognitionRequest = {
        let recognitionRequest = SFSpeechAudioBufferRecognitionRequest()
        // 结果报告
        recognitionRequest.shouldReportPartialResults = true
        return recognitionRequest
    }()

4. AVAudioEngine 处理声音的数据

    private let audioEngine = AVAudioEngine()

5. SFSpeechRecognitionTask 语言识别任务管理器

启用和关闭任务

speechTask = speechRecognizer.recognitionTask(with: recognitionRequest) { (speechResult, error) in
}
speechTask?.cancel()

二、代码整理

1. 初始化属性

// 声音处理器
    private var speechRecognizer: SFSpeechRecognizer?
    // 语言识别任务管理器
    private var speechTask: SFSpeechRecognitionTask?
    // 语音识别器
    lazy var recognitionRequest: SFSpeechAudioBufferRecognitionRequest = {
        let recognitionRequest = SFSpeechAudioBufferRecognitionRequest()
        // 结果报告
        recognitionRequest.shouldReportPartialResults = true
        return recognitionRequest
    }()
    
    // 处理声音数据
    private let audioEngine = AVAudioEngine()

2. 判断权限

// 判断语音识别权限
    private func checkRecognizerAuthorization(_ recongStatus: @escaping (_ resType: Bool) -> Void) {
        let authorStatus = SFSpeechRecognizer.authorizationStatus()
        if authorStatus == .authorized {
            recongStatus(true)
        } else if authorStatus == .notDetermined {
            SFSpeechRecognizer.requestAuthorization { (status) in
                if status == .authorized {
                    recongStatus(true)
                } else {
                    recongStatus(false )
                }
            }
        } else {
            recongStatus(false)
        }
    }
    
    // 检测麦克风
    private func checkmicroPhoneAuthorization(_ authoStatus: @escaping (_ resultStatus: Bool) -> Void) {
        let microPhoneStatus = AVCaptureDevice.authorizationStatus(for: .audio)

        if microPhoneStatus == .authorized {
            authoStatus(true)
        } else if microPhoneStatus == .notDetermined {
            AVCaptureDevice.requestAccess(for: .audio, completionHandler: {(res) in
                if res {
                    authoStatus(true)
                } else {
                    authoStatus(false)
                }
            })
        } else {
            authoStatus(false)
        }
    }

3. 开始语音识别

// 开始进行
    private func startDictating() {
        self.speechRecognizer = SFSpeechRecognizer(locale: Locale(identifier: "zh-CN"))
        guard let speechRecognizer = self.speechRecognizer else {
            GXAlertManager.showDefaultAlert(title: "温馨提示", msg: "抱歉,暂不支持当前地区使用语音输入")
            return
        }
        let audioSession = AVAudioSession.sharedInstance()
        
        do {
            try audioSession.setCategory(.record)
            try audioSession.setMode(.measurement)
            try audioSession.setActive(true, options: .notifyOthersOnDeactivation)
        } catch {
            fatalError("会话建立失败")
        }
        setStatusCallBack(type: .start, text: nil)
        do {
            try audioEngine.start()
            // 开启授权任务
            speechTask = speechRecognizer.recognitionTask(with: recognitionRequest) { (speechResult, error) in
                // 识别结果,识别后的操作
                guard let speechResult = speechResult, error == nil else {
                    return
                }
                self.setStatusCallBack(type: .finished, text: speechResult.bestTranscription.formattedString)
            }
        } catch {
            print(error)
            self.setStatusCallBack(type: .finished, text: nil)
        }
    }

4. 停止语音识别

// 停止声音处理器,停止语音识别请求进程
    func stopDictating() {
        setStatusCallBack(type: .stop, text: nil)
        recognitionRequest.endAudio()
        audioEngine.stop()
        speechTask?.cancel()
    }

5. 调用方法

func startSpeech() {
        
        checkmicroPhoneAuthorization { (microStatus) in
            guard microStatus == true else {
                // 麦克风没有授权
                GXAlertManager.showDefaultAlert(title: "温馨提示", msg: "您已取消授权使用麦克风,如果需要使用语音识别功能,可以到设置中重新开启!")
                self.setStatusCallBack(type: .authDenied, text: nil)
                return
            }
            self.checkRecognizerAuthorization { recStatus in
                guard recStatus == true else {
                    GXAlertManager.showDefaultAlert(title: "温馨提示", msg: "您已取消授权使用语音识别,如果需要使用语音识别功能,可以到设置中重新开启!")
                    self.setStatusCallBack(type: .authDenied, text: nil)
                    return
                }
                // 初始化语音处理器的输入模式 语音处理器准备就绪(会为一些audioEngine启动时所必须的资源开辟内存)
                let inputNode = self.audioEngine.inputNode
                
                self.audioEngine.prepare()
                if self.speechTask?.state == .running {   // 如果当前进程状态是进行中
                    // 停止语音识别
                   self.stopDictating()
                } else {   // 进程状态不在进行中
                    // 开启语音识别
                    self.startDictating()
                }
            }
        }
    }
Logo

CSDN联合极客时间,共同打造面向开发者的精品内容学习社区,助力成长!

更多推荐