Hey, I encountered the same issue with Xcode 15.3 as well. I tried fixing it, but unfortunately, it didn't work for me either.
Here is the sample code:
import XCTest
@testable import Dummy
final class CryptoDataAPIResorucesUnitTests: XCTestCase {
func test_CoinAPIResources() {
let resources = CoinAPIResources(httpUtility: HttpUtility())
let expectation = self.expectation(description: "CoinAPIResources")
resources.getCoinList { coinData, errorMessage in
XCTAssertNotNil(coinData)
XCTAssertNil(errorMessage)
expectation.fulfill()
}
waitForExpectations(timeout: 5)
}
}
Post
Replies
Boosts
Views
Activity
I'm experiencing the same issue on iOS 18, although it works fine on older versions. The problem is that I'm receiving partial results, but the text disappears and returns as empty later in the repeated callbacks.
Adding the screenshot and code for reference here.
import UIKit
import Speech
public protocol SpeechRecognizerWrapperDelegate: AnyObject {
func speechRecognitionFinished(transcription: String)
func speechRecognitionPartialResult(transcription: String)
func speechRecognitionRecordingNotAuthorized(statusMessage: String)
func speechRecognitionTimedOut()
}
public class SpeechRecognizerWrapper: NSObject, SFSpeechRecognizerDelegate {
public weak var delegate: SpeechRecognizerWrapperDelegate?
private let speechRecognizer = SFSpeechRecognizer(locale: Locale(identifier: (LocalData.sharedInstance.UPAppLanguage == LanguageCode.Hindi.rawValue) ? "hi-IN" : "en-IN"))!
private var recognitionRequest: SFSpeechAudioBufferRecognitionRequest?
private var recognitionTask: SFSpeechRecognitionTask?
private let audioEngine = AVAudioEngine()
var notAuthorise = true
var noAuthStatus = ""
var allPermissionGranted:(()->())?
public override init() {
super.init()
setupSpeechRecognition()
}
private func setupSpeechRecognition() {
speechRecognizer.delegate = self
}
func requestAuthorization() {
if SFSpeechRecognizer.authorizationStatus() == .authorized && AVAudioSession.sharedInstance().recordPermission == .granted {
self.notAuthorise = false
return
}
self.notAuthorise = true
SFSpeechRecognizer.requestAuthorization { [weak self] authStatus in
guard let self = self else { return }
/*
The callback may not be called on the main thread. Add an
operation to the main queue to update the record button's state.
*/
OperationQueue.main.addOperation {
if authStatus != .authorized {
self.notAuthorise = true
self.noAuthStatus = ""
if authStatus == .denied {
self.noAuthStatus = "User denied access to speech recognition"
} else if authStatus == .restricted {
self.noAuthStatus = "Speech recognition restricted on this device"
}
} else {
self.checkTheRecord()
self.notAuthorise = false
}
}
}
}
func checkTheRecord() {
switch AVAudioSession.sharedInstance().recordPermission {
case AVAudioSession.RecordPermission.granted:
// self.allPermissionGranted?()
break
case AVAudioSession.RecordPermission.denied:
break
case AVAudioSession.RecordPermission.undetermined:
AVAudioSession.sharedInstance().requestRecordPermission({ [weak self] (granted) in
if granted {
// self?.allPermissionGranted?()
} else {
self?.notAuthorise = true
}
})
default:
break
}
}
private var speechRecognitionTimeout: Timer?
public var speechTimeoutInterval: TimeInterval = 2 {
didSet {
restartSpeechTimeout()
}
}
private func restartSpeechTimeout() {
speechRecognitionTimeout?.invalidate()
speechRecognitionTimeout = Timer.scheduledTimer(timeInterval: speechTimeoutInterval, target: self, selector: #selector(timedOut), userInfo: nil, repeats: false)
}
public func startRecording() throws {
if let recognitionTask = recognitionTask {
recognitionTask.cancel()
self.audioEngine.stop()
self.audioEngine.inputNode.removeTap(onBus: 0)
self.recognitionTask = nil
self.recognitionRequest = nil
self.recognitionTask = nil
}
let audioSession = AVAudioSession.sharedInstance()
try audioSession.setCategory(.record, mode: .measurement, options: .duckOthers)
try audioSession.setActive(true, options: .notifyOthersOnDeactivation)
recognitionRequest = SFSpeechAudioBufferRecognitionRequest()
let inputNode = audioEngine.inputNode
let mixerNode = AVAudioMixerNode()
audioEngine.attach(mixerNode)
audioEngine.connect(inputNode, to: mixerNode, format: nil)
guard let recognitionRequest = recognitionRequest else { return }
// Configure request so that results are returned before audio recording is finished
recognitionRequest.shouldReportPartialResults = true
// A recognition task represents a speech recognition session.
// We keep a reference to the task so that it can be cancelled.
recognitionTask = speechRecognizer.recognitionTask(with: recognitionRequest) { [weak self] result, error in
guard let self = self else { return }
var isFinal = false
if let result = result {
print("formattedString: \(result.bestTranscription.formattedString)")
isFinal = result.isFinal
self.delegate?.speechRecognitionPartialResult(transcription: result.bestTranscription.formattedString)
}
if error != nil || isFinal {
self.audioEngine.stop()
inputNode.removeTap(onBus: 0)
self.recognitionRequest = nil
self.recognitionTask = nil
}
if isFinal {
self.delegate?.speechRecognitionFinished(transcription: result!.bestTranscription.formattedString)
self.stopRecording()
} else {
if error == nil {
self.restartSpeechTimeout()
} else {
// cancel voice recognition
}
}
}
let recordingFormat = inputNode.outputFormat(forBus: 0)
inputNode.installTap(onBus: 0, bufferSize: 1024, format: recordingFormat) { [weak self] (buffer: AVAudioPCMBuffer, when: AVAudioTime) in
guard let self = self else { return }
self.recognitionRequest?.append(buffer)
}
audioEngine.prepare()
try audioEngine.start()
}
@objc private func timedOut() {
stopRecording()
self.delegate?.speechRecognitionTimedOut()
}
public func stopRecording() {
audioEngine.stop()
audioEngine.inputNode.removeTap(onBus: 0) // Remove tap on bus when stopping recording.
recognitionRequest?.endAudio()
speechRecognitionTimeout?.invalidate()
speechRecognitionTimeout = nil
}
}
I have shared the feedback via Feedback Assistant. This is feedback number(FB15307396).