IOS应用程序崩溃,并显示“所需条件为假:当选择话筒时,IsFormatSampleRateAndChannelCountValid(format)'“

iqxoj9l9  于 2022-11-26  发布在  iOS
关注(0)|答案(2)|浏览(407)

我得到了我的应用程序崩溃时,我正在使用麦克风在我的情况下,微软团队在后台,并试图录制音频内我的应用程序。
由于未捕获的异常'com.apple.coreaudio. avfaudio',正在终止应用程序,原因:'所需条件为假:是否为有效格式采样速率和通道计数(格式)'
请参考下面的代码:

func startRecording() {
        
        // Clear all previous session data and cancel task
        if recognitionTask != nil {
            recognitionTask?.cancel()
            recognitionTask = nil
        }

        // Create instance of audio session to record voice
        let audioSession = AVAudioSession.sharedInstance()
        do {
            try audioSession.setCategory(AVAudioSession.Category.record, mode: AVAudioSession.Mode.measurement, options: AVAudioSession.CategoryOptions.defaultToSpeaker)
            try audioSession.setActive(true, options: .notifyOthersOnDeactivation)
        } catch {
            print("audioSession properties weren't set because of an error.")
        }
    
        self.recognitionRequest = SFSpeechAudioBufferRecognitionRequest()

        let inputNode = audioEngine.inputNode

        guard let recognitionRequest = recognitionRequest else {
            fatalError("Unable to create an SFSpeechAudioBufferRecognitionRequest object")
        }

        recognitionRequest.shouldReportPartialResults = true

        self.recognitionTask = speechRecognizer?.recognitionTask(with: recognitionRequest, resultHandler: { (result, error) in

            var isFinal = false

            if result != nil {

                self.textField.text = result?.bestTranscription.formattedString
                isFinal = (result?.isFinal)!
            }

            if error != nil || isFinal {

                self.audioEngine.stop()
                inputNode.removeTap(onBus: 0)

                self.recognitionRequest = nil
                self.recognitionTask = nil

                self.micButton.isEnabled = true
            }
        })
    
        let recordingFormat = inputNode.outputFormat(forBus: 0)

    
        inputNode.installTap(onBus: 0, bufferSize: 1024, format: recordingFormat) { (buffer, when) in
            self.recognitionRequest?.append(buffer)
        }

        self.audioEngine.prepare()

        do {
            try self.audioEngine.start()
        } catch {
            print("audioEngine couldn't start because of an error.")
        }

        self.textField.text = ""
    }

我很确定问题出在这里的某个地方,但不知道如何解决。

let recordingFormat = inputNode.outputFormat(forBus: 0)
        inputNode.installTap(onBus: 0, bufferSize: 1024, format: recordingFormat) { (buffer, when) in
            self.recognitionRequest?.append(buffer)
        }
6jygbczu

6jygbczu1#

所以应用程序崩溃,因为我没有应用正确的麦克风通道。
第1步在导入后的代码顶部创建一个协议,以表示文件中的错误,其中u:设音频引擎= AVAudioEngine()

protocol FeedbackViewDelegate : AnyObject {
    func showFeedbackError(title: String, message: String)
    func audioDidStart(forType type : FeedbackViewType)
}

第2步在开始的时候在你的函数中添加返回值boolean

func startRecording() -> Bool {
}

第3步在sharedInstanceCatch部分添加这行代码(它将防止崩溃)

let audioSession = AVAudioSession.sharedInstance()
            do {
                try audioSession.setCategory(AVAudioSession.Category.playAndRecord, mode: AVAudioSession.Mode.measurement, options: AVAudioSession.CategoryOptions.defaultToSpeaker)
                try audioSession.setActive(true, options: .notifyOthersOnDeactivation)
            } catch {
                print("audioSession properties weren't set because of an error.")
                delegate?.showFeedbackError(title: "Sorry", message: "Mic is busy")
                return false
            }

以上返回将阻止执行代码...
步骤4在视图控制器中创建扩展

extension codeFileName : name of the protocol in my case its a FeedbackViewDelegate {
        func showFeedbackError(title: String, message: String) {
         
        }

在这里输入代码(网络上有数百万个示例)在函数内部,您可以创建一个警报,并在“in”部分使用self

xqkwcwgp

xqkwcwgp2#

fileprivate let NibName = "FeedbackView"
protocol FeedbackViewDelegate : AnyObject {
    func showFeedbackError(title: String, message: String)
    func audioDidStart(forType type : FeedbackViewType)
}

enum FeedbackViewType {
    
    case feedbackView, rootcauseView, suggestionView, actionView
    
}

class FeedbackView: UIControl, ViewLoadable, SFSpeechRecognizerDelegate {
    
    @IBOutlet weak var textField: UITextField!
    
    static var nibName: String = NibName
    
    var feedbackViewType : FeedbackViewType = .feedbackView
    
    @IBOutlet var contentView: UIView!
    
    @IBOutlet weak var micButton: UIButton!
    
    @IBOutlet weak var micView: DefaultCardView!
    
    @IBOutlet weak var micImageView: UIImageView!
    
    weak var delegate : FeedbackViewDelegate?
    var allowTextEntry = true
    
    let speechRecognizer        = SFSpeechRecognizer(locale: Locale(identifier: "en-US"))

       var recognitionRequest      : SFSpeechAudioBufferRecognitionRequest?
       var recognitionTask         : SFSpeechRecognitionTask?
       let audioEngine             = AVAudioEngine()
    
    override init(frame: CGRect) {
        super.init(frame: frame)
        commonInit()
    }
    
    required public init?(coder aDecoder: NSCoder) {
        super.init(coder: aDecoder)
        commonInit()
    }
    
    init() {
        super.init(frame: CGRect.zero)
        commonInit()
    }
    
    private func commonInit() {
        Bundle(for: type(of: self)).loadNibNamed(NibName, owner: self, options: nil)
        backgroundColor = .clear
        addSubview(contentView)
        contentView.frame = self.bounds
        contentView.autoresizingMask = [.flexibleHeight, .flexibleWidth]
      
    }
    
    func configure(text: String, placeholder:String, contentType: UITextContentType,keyboardType:UIKeyboardType) {
        
        print("Did configure keyboard")
        self.textField.textContentType = contentType
        self.textField.isSecureTextEntry = (contentType == .password)
        self.textField.keyboardType = keyboardType
        self.textField.delegate = self
        self.textField.placeholder = placeholder
        if(!text.isEmpty) {
            self.textField.text = text
        }
    }
    
    
    @IBAction func btnStartSpeechToText(_ sender: UIButton) {
//        allowTextEntry = false
        if audioEngine.isRunning {
            let audioText = textField.text
                  self.audioEngine.stop()
            DispatchQueue.main.asyncAfter(deadline: .now() + 0.2) {
                self.textField.text = audioText
//                self.allowTextEntry = true
            }
            textField.text = audioText
                  self.micButton.isEnabled = true
                  self.micImageView.image = UIImage(named: "mic")
              } else {
                  print("Audio did start")
                  self.delegate?.audioDidStart(forType: self.feedbackViewType)
                  self.setupSpeech()
                  if self.startRecording() {
                      self.micImageView.image = UIImage(named: "micRed")

                  }
              }
    }
    
    func stopRecording() {
//        allowTextEntry = false
        let audioText = textField.text
        self.audioEngine.stop()
        self.recognitionRequest?.endAudio()
        DispatchQueue.main.asyncAfter(deadline: .now() + 0.2) {
            self.textField.text = audioText
//            self.allowTextEntry = true
        }
        self.micButton.isEnabled = true
        self.micImageView.image = UIImage(named: "mic")
    }
    
    func setupSpeech() {
        
//           self.micButton.isEnabled = false
           self.speechRecognizer?.delegate = self

           SFSpeechRecognizer.requestAuthorization { (authStatus) in

               var isButtonEnabled = false

               switch authStatus {
               case .authorized:
                   isButtonEnabled = true

               case .denied:
                   isButtonEnabled = false
                   print("User denied access to speech recognition")

               case .restricted:
                   isButtonEnabled = false
                   print("Speech recognition restricted on this device")

               case .notDetermined:
                   isButtonEnabled = false
                   print("Speech recognition not yet authorized")
               }

               OperationQueue.main.addOperation() {
//                   self.micButton.isEnabled = isButtonEnabled
               }
           }
       }
    
//    func audioInputIsBusy(recordingFormat: AVAudioFormat) -> Bool {
//        guard recordingFormat.sampleRate == 0 || recordingFormat.channelCount == 0 else {
//            return false
//        }
//        return true
//    }
    
    func startRecording() -> Bool {
            
            // Clear all previous session data and cancel task
            if recognitionTask != nil {
                recognitionTask?.cancel()
                recognitionTask = nil
            }

            // Create instance of audio session to record voice
            let audioSession = AVAudioSession.sharedInstance()
            do {
                try audioSession.setCategory(AVAudioSession.Category.playAndRecord, mode: AVAudioSession.Mode.measurement, options: AVAudioSession.CategoryOptions.defaultToSpeaker)
                try audioSession.setActive(true, options: .notifyOthersOnDeactivation)
            } catch {
                print("audioSession properties weren't set because of an error.")
                delegate?.showFeedbackError(title: "Sorry", message: "Mic is busy")
                return false
            }
        
            self.recognitionRequest = SFSpeechAudioBufferRecognitionRequest()

            let inputNode = audioEngine.inputNode

            guard let recognitionRequest = recognitionRequest else {
                fatalError("Unable to create an SFSpeechAudioBufferRecognitionRequest object")
            }

            recognitionRequest.shouldReportPartialResults = true

            self.recognitionTask = speechRecognizer?.recognitionTask(with: recognitionRequest, resultHandler: { (result, error) in

                var isFinal = false

                if result != nil {

                    self.textField.text = result?.bestTranscription.formattedString
                    isFinal = (result?.isFinal)!
                }

                if error != nil || isFinal {

                    self.audioEngine.stop()
                    inputNode.removeTap(onBus: 0)
                    self.recognitionRequest = nil
                    self.recognitionTask = nil
                    self.micButton.isEnabled = true
                }
            })
        
            let recordingFormat = inputNode.outputFormat(forBus: 0)
            inputNode.installTap(onBus: 0, bufferSize: 1024, format: recordingFormat) { (buffer, when) in
                self.recognitionRequest?.append(buffer)
            }

            self.audioEngine.prepare()

            do {
                try self.audioEngine.start()
            } catch {
                print("audioEngine couldn't start because of an error.")
                delegate?.showFeedbackError(title: "Sorry", message: "Your microphone is used somewhere else")
                return false
            }

            self.textField.text = ""
        return true
        }
    
    func speechRecognizer(_ speechRecognizer: SFSpeechRecognizer, availabilityDidChange available: Bool) {
        if available {
            self.micButton.isEnabled = true
        } else {
            self.micButton.isEnabled = false
        }
    }
    

}

extension FeedbackView: UITextFieldDelegate {
    
    func textFieldShouldReturn(_ textField: UITextField) -> Bool {
        self.endEditing(true)
        return false
    }
    
    func textField(_ textField: UITextField, shouldChangeCharactersIn range: NSRange, replacementString string: String) -> Bool {
        return allowTextEntry
    }
}

相关问题