Getting error SetOutputFormat: required condition is false: format.sampleRate == hwFormat.sampleRate not found any reference stucked in the app

import UIKit

import Speech

class ViewController: UIViewController, SFSpeechRecognizerDelegate ,AVSpeechSynthesizerDelegate {


@IBOutlet weak var textView: UITextView!


private let speechRecognizer = SFSpeechRecognizer(locale: Locale.init(identifier: "en-US"))!


private var recognitionRequest: SFSpeechAudioBufferRecognitionRequest?

private var recognitionTask: SFSpeechRecognitionTask?

private let audioEngine = AVAudioEngine()


private var audio : AVAudioInputNode? = nil

var audioSession : AVAudioSession? = nil

override func viewDidLoad() {

super.viewDidLoad()

self.getSpeech(asSpeach: "how are you ?")

speechRecognizer.delegate = self

SFSpeechRecognizer.requestAuthorization { (authStatus) in

var isButtonEnabled = false

switch authStatus {

case .authorized:

isButtonEnabled = true

case .denied:

isButtonEnabled = false

print("User denied access to speech recognition")

case .restricted:

isButtonEnabled = false

print("Speech recognition restricted on this device")

case .notDetermined:

isButtonEnabled = false

print("Speech recognition not yet authorized")

}

}

}


func endAudio()

{

if audioEngine.isRunning {

AudioOutputUnitStop((audioEngine.inputNode?.audioUnit)!)

AudioUnitUninitialize((audioEngine.inputNode?.audioUnit)!)

audioEngine.stop()

}

}

func startRecording() {

if recognitionTask != nil {

recognitionTask?.cancel()

recognitionTask = nil

}

AudioOutputUnitStop((audioEngine.inputNode?.audioUnit)!)

AudioUnitUninitialize((audioEngine.inputNode?.audioUnit)!)

audioSession = AVAudioSession.sharedInstance()

do {

try audioSession?.setCategory(AVAudioSessionCategoryPlayAndRecord)

try audioSession?.setMode(AVAudioSessionModeMeasurement)

try audioSession?.setActive(true, with: .notifyOthersOnDeactivation)

} catch {

print("audioSession properties weren't set because of an error.")

}

recognitionRequest = SFSpeechAudioBufferRecognitionRequest()

guard let inputNode = audioEngine.inputNode else {

print("Audio engine has no input node")

}

audio=inputNode

audio?.removeTap(onBus: 2)

guard let recognitionRequest = recognitionRequest else {

fatalError("Unable to create an SFSpeechAudioBufferRecognitionRequest object")

}

recognitionRequest.shouldReportPartialResults = true

recognitionTask = speechRecognizer.recognitionTask(with: recognitionRequest, resultHandler: { (result, error) in

var isFinal = false

if result != nil {

self.textView.text = result?.bestTranscription.formattedString

isFinal = (result?.isFinal)!

if self.textView.text == "Fine" || self.textView.text == "Find"

{

self.audioEngine.stop()

self.textView.text = ""

DispatchQueue.main.asyncAfter(deadline: .now() + 0.2) {

self.getSpeech(asSpeach: "how are you ?")

}

}

}

if error != nil || isFinal {

self.audioEngine.stop()

inputNode.removeTap(onBus: 2)

self.recognitionRequest = nil

self.recognitionTask = nil

}

})

let recordingFormat = inputNode.outputFormat(forBus: 2)

inputNode.installTap(onBus: 2, bufferSize: 1024, format: recordingFormat) { (buffer, when) in

self.recognitionRequest?.append(buffer)

}

audioEngine.prepare()

do {

try audioEngine.start()

} catch {

print("audioEngine couldn't start because of an error.")

}

textView.text = "Say something, I'm listening!"

}


func speechRecognizer(_ speechRecognizer: SFSpeechRecognizer, availabilityDidChange available: Bool) {

if available {

//something

} else {

//else thing

}

}

func getSpeech(asSpeach:String)

{

audioSession = AVAudioSession.sharedInstance()

do {

try audioSession?.overrideOutputAudioPort(.speaker)

} catch {

print(error.localizedDescription)

}

let synthesizer = AVSpeechSynthesizer()

let utterance = AVSpeechUtterance(string: asSpeach)

utterance.rate = AVSpeechUtteranceDefaultSpeechRate

synthesizer.delegate=self

synthesizer.speak(utterance)

}

public func speechSynthesizer(_ synthesizer: AVSpeechSynthesizer, didFinish utterance: AVSpeechUtterance)

{

audioEngine.stop()

do {

try audioSession?.setActive(false)

}

catch {

print("error.localizedDescription")

print(error.localizedDescription)

}

print("speech synthesizer ended")

if(audioEngine.isRunning)

{

endAudio()

}

DispatchQueue.main.asyncAfter(deadline: .now() + 0.5) {

self.startRecording()

}

}

}



As requirement first dictet one string and if the string is a keyword (as per business rule) dictet again but getting crash think the crash is for mismatching the audioformat for dictet and speech .


The error message is

ERROR: [0x1b2a4cb40] >avae> AVAudioIONodeImpl.mm:884: SetOutputFormat: required condition is false: format.sampleRate == hwFormat.sampleRate

Terminating app due to uncaught exception 'com.apple.coreaudio.avfaudio', reason: 'required condition is false: format.sampleRate == hwFormat.sampleRate'


Think the problem is with AVAudioSession please suggest i am trying many times but dont have a luck till now

It seems that you cannot change the sampling rate.


have a look here :


h ttp://stackoverflow.com/questions/40821754/i-want-to-change-the-sample-rate-from-my-input-node-from-44100-to-8000

Hi Claude31,

This post is not helpful for me . we use AVAudioSession.sharedInstance() for two purpose once for decteting once for get the speech text from user . we use this for multiple times thats why we need to destroy the session and recreate the session again . please help me how can i arrange same sample rate for both (ie dictet and input from the user) . Any help will appreciate .

Hi guys, I got the same behavior and same bug as Elagoon.


Any one find something ?


Best regards

This is an old question, but does anyone have a solution?

I don't know if this would work for you, but it did for me, I declared the inputNode var as early as possible. (e.g: let inputNode = audioEngine.inputNode) in viewDidLoad. I think it solved the issue for me because it gives the app more time to instantiate inputNode. Which sometimes it does have enough time to do so, hence the random crashes with the error message above that I receive too. I used to declare inputNode right above installTap, which I think is too late. So when the inputNode is not instantiated properly, the sampleRate is different.

For the same error, In my case, the answer was very simple. The first time I got it, it was because I had changed to a mac mini with no inbuilt mic, so adding a USB mic worked. The second time I changed to a bluetooth mic and had to change back to the USB one. I'm using AudioKit. If anyone knows what to do about the Bluetooth mic problem (I think, changing the sample rate might help, how??) then that would really help me.

Getting error SetOutputFormat: required condition is false: format.sampleRate == hwFormat.sampleRate not found any reference stucked in the app
 
 
Q