i am creating a speech ton text app where avaudiosession in stopped and started every 40 secs so that it can listen forever unless stopped manually . Sometimes app doesnt listen? please help

@objc func recordingStart() {

self.inputContainerView.uploadImageView.setBackgroundImage( UIImage(named: "recordred") , for: .normal) // flashing mic button

self.timerToBlinkMicButton = Timer.scheduledTimer(timeInterval: TimeInterval(1), target: self, selector: #selector(recordBlink), userInfo: nil, repeats: true) timer for continuous blinking of mic button

self.fullsTring = ""

audioEngine.reset()

if recognitionTask != nil {

recognitionTask?.cancel()

recognitionTask = nil

}

let audioSession = AVAudioSession.sharedInstance()

if audioSession.availableInputs!.count > 1

{

try? audioSession.setCategory(.playAndRecord, mode: .default ,options: [.allowBluetooth, .allowBluetoothA2DP,.allowAirPlay, .defaultToSpeaker]) //for normal phone microphone

if let headphones = audioSession.availableInputs?.first(where: { $0.portType == .bluetoothHFP || $0.portType == .bluetoothA2DP || $0.portType == .headphones })

{

do {

try audioSession.setPreferredInput(headphones)

} catch {

Alert.showBasic(title: "Bluetooth", message: "Cannot connect your bluetooth", vc: self)

}

}

else

{

do {

try audioSession.setCategory(AVAudioSession.Category.playAndRecord, mode: .measurement ,options: .defaultToSpeaker)

}

catch{

Alert.showBasic(title: "Could not access microphone", message: "Try again or Reload Page", vc: self)

return

}

//Alert.showBasic(title: "Charger", message: "charger connected", vc: self)

}

}

else

{

do {

try audioSession.setCategory(AVAudioSession.Category.playAndRecord, mode: .measurement ,options: [.defaultToSpeaker, .allowBluetooth, .allowBluetoothA2DP])

}

catch

{

Alert.showBasic(title: "Could not access microphone", message: "Try again or Reload Page", vc: self)

return

}

}

do{

// try audioSession.setMode(AVAudioSession.Mode.measurement)

try audioSession.setPreferredSampleRate(44100.0)

try audioSession.setActive(true, options: .notifyOthersOnDeactivation)

print("micrange")

if audioSession.isInputGainSettable {

let error : NSErrorPointer = nil

let success = try? audioSession.setInputGain(Float(micrange))

if success == nil{

print ("audio error")

// return

}

if (success != nil) {

print("\(String(describing: error))")

}

}

else {

print("Cannot set input gain")

}

} catch {

print("audioSession properties weren't set because of an error.")

stopRecording()

}

recognitionRequest = SFSpeechAudioBufferRecognitionRequest()

if #available(iOS 13, *) {

recognitionRequest?.requiresOnDeviceRecognition = true

} else {

// Fallback on earlier versions

}

let inputNode = audioEngine.inputNode

guard let recognitionRequest = recognitionRequest else {

fatalError("Unable to create an SFSpeechAudioBufferRecognitionRequest object")

}

recognitionRequest.shouldReportPartialResults = true //6

self.timerToAgainStartRecording = Timer.scheduledTimer(timeInterval: TimeInterval(40), target: self, selector: #selector(againStartRec), userInfo: nil, repeats: false)

recognitionTask = speechRecognizer.recognitionTask(with: recognitionRequest, resultHandler: { (result, error ) in //7

var isFinal = false

if result != nil {

self.timerDidFinishTalk.invalidate()

self.timerDidFinishTalk = Timer.scheduledTimer(timeInterval: TimeInterval(self.listeningTime

), target: self, selector: #selector(self.didFinishTalk), userInfo: nil, repeats: false)

let bestString = result?.bestTranscription.formattedString

self.fullsTring = bestString!

isFinal = result!.isFinal

}

if isFinal {

self.audioEngine.stop()

inputNode.removeTap(onBus: 0)

self.recognitionRequest = nil

self.recognitionTask = nil

isFinal = false

}

if error != nil{

print(self.isButtonEnabled)

URLCache.shared.removeAllCachedResponses()

self.timerToBlinkMicButton.invalidate()

DispatchQueue.main.async {

self.inputContainerView.uploadImageView.setBackgroundImage( UIImage(named: "microphone") , for: .normal)

self.inputContainerView.uploadImageView.alpha = 1.0

}

self.recognitionRequest?.endAudio()

self.audioEngine.stop()

inputNode.removeTap(onBus: 0)

guard let task = self.recognitionTask else {

return

}

task.cancel()

task.finish()

}

})

// let recordingFormat = AVAudioFormat(standardFormatWithSampleRate: 44100, channels: 1)

audioEngine.reset()

inputNode.removeTap(onBus: 0)

audioEngine.attach(player)

let bus = 0

let recordingFormat = inputNode.inputFormat(forBus: bus)

audioEngine.connect(player, to: audioEngine.mainMixerNode, format: recordingFormat)

inputNode.installTap(onBus: 0, bufferSize: 1024, format: recordingFormat) { (buffer, when) in

self.recognitionRequest?.append(buffer)

}

audioEngine.prepare()

do {

try audioEngine.start()

} catch {

print("audioEngine couldn't start because of an error.")

}

self.timerToStartRecording.invalidate()

self.hasrecorded = true

}