I wrote a class for speech recognition with the Speech framework.
// Cancel the previous task if it's running.
if (self.recognitionTask) {
//[self.recognitionTask cancel]; // Will cause the system error and memory problems.
[self.recognitionTask finish];
}
self.recognitionTask = nil;
// Configure the audio session for the app.
NSError *error = nil;
[AVAudioSession.sharedInstance setCategory:AVAudioSessionCategoryRecord withOptions:AVAudioSessionCategoryOptionDuckOthers error:&error];
if (error)
{
[self stopWithError:error];
return;
}
[AVAudioSession.sharedInstance setActive:YES withOptions:AVAudioSessionSetActiveOptionNotifyOthersOnDeactivation error:&error];
if (error)
{
[self stopWithError:error];
return;
}
// Create and configure the speech recognition request.
self.recognitionRequest = [[SFSpeechAudioBufferRecognitionRequest alloc] init];
self.recognitionRequest.taskHint = SFSpeechRecognitionTaskHintConfirmation;
// Keep speech recognition data on device
if (@available(iOS 13, *)) {
self.recognitionRequest.requiresOnDeviceRecognition = NO;
}
// Create a recognition task for the speech recognition session.
// Keep a reference to the task so that it can be canceled.
__weak typeof(self)weakSelf = self;
self.recognitionTask = [self.speechRecognizer recognitionTaskWithRequest:self.recognitionRequest resultHandler:^(SFSpeechRecognitionResult * _Nullable result, NSError * _Nullable error) {
// ...
if (error != nil || result.final)
{
}
}];
I want to know if the Speech framework supports background tasks. If support, how do I modify the iOS code?