Hi all,
I'm getting a strange SwiftData error at runtime in my voice recorder app. Whenever I attempt to generate and cache a samples array so that my app can visualize the waveform of the audio, the app crashes and the following pops up in Xcode:
{
@storageRestrictions(accesses: _$backingData, initializes: _samples)
init(initialValue) {
_$backingData.setValue(forKey: \.samples, to: initialValue)
_samples = _SwiftDataNoType()
}
get {
_$observationRegistrar.access(self, keyPath: \.samples)
return self.getValue(forKey: \.samples)
}
set {
_$observationRegistrar.withMutation(of: self, keyPath: \.samples) {
self.setValue(forKey: \.samples, to: newValue)
}
}
}
With an execution breakpoint on the line _$observationRegistrar.withMutation(of: self, keyPath: \.samples).
Here is my model class:
import Foundation
import SwiftData
@Model final class Recording {
var id : UUID?
var name : String?
var date : Date?
var samples : [Float]? = nil
init(name: String) {
self.id = UUID()
self.name = name
self.date = Date.now
}
}
And here is where the samples are being generated (sorry for the long code):
private func processSamples(from audioFile: AVAudioFile) async throws -> [Float] {
let sampleCount = 128
let frameCount = Int(audioFile.length)
let samplesPerSegment = frameCount / sampleCount
let buffer = try createAudioBuffer(for: audioFile, frameCapacity: AVAudioFrameCount(samplesPerSegment))
let channelCount = Int(buffer.format.channelCount)
let audioData = try readAudioData(from: audioFile, into: buffer, sampleCount: sampleCount, samplesPerSegment: samplesPerSegment, channelCount: channelCount)
let processedResults = try await processAudioSegments(audioData: audioData, sampleCount: sampleCount, samplesPerSegment: samplesPerSegment, channelCount: channelCount)
var samples = createSamplesArray(from: processedResults, sampleCount: sampleCount)
samples = applyNoiseFloor(to: samples, noiseFloor: 0.01)
samples = normalizeSamples(samples)
return samples
}
private func createAudioBuffer(for audioFile: AVAudioFile, frameCapacity: AVAudioFrameCount) throws -> AVAudioPCMBuffer {
guard let buffer = AVAudioPCMBuffer(pcmFormat: audioFile.processingFormat, frameCapacity: frameCapacity) else {
throw Errors.AudioProcessingError
}
return buffer
}
private func readAudioData(from audioFile: AVAudioFile, into buffer: AVAudioPCMBuffer, sampleCount: Int, samplesPerSegment: Int, channelCount: Int) throws -> [[Float]] {
var audioData = [[Float]](repeating: [Float](repeating: 0, count: samplesPerSegment * channelCount), count: sampleCount)
for segment in 0..<sampleCount {
let segmentStart = AVAudioFramePosition(segment * samplesPerSegment)
audioFile.framePosition = segmentStart
try audioFile.read(into: buffer)
if let channelData = buffer.floatChannelData {
let dataCount = samplesPerSegment * channelCount
audioData[segment] = Array(UnsafeBufferPointer(start: channelData[0], count: dataCount))
}
}
return audioData
}
private func processAudioSegments(audioData: [[Float]], sampleCount: Int, samplesPerSegment: Int, channelCount: Int) async throws -> [(Int, Float)] {
try await withThrowingTaskGroup(of: (Int, Float).self) { taskGroup in
for segment in 0..<sampleCount {
let segmentData = audioData[segment]
taskGroup.addTask {
var rms: Float = 0
vDSP_rmsqv(segmentData, 1, &rms, vDSP_Length(samplesPerSegment * channelCount))
return (segment, rms)
}
}
var results = [(Int, Float)]()
for try await result in taskGroup {
results.append(result)
}
return results
}
}
private func createSamplesArray(from processedResults: [(Int, Float)], sampleCount: Int) -> [Float] {
var samples = [Float](repeating: 0, count: sampleCount)
vDSP_vfill([0], &samples, 1, vDSP_Length(sampleCount))
for (segment, rms) in processedResults {
samples[segment] = rms
}
return samples
}
private func applyNoiseFloor(to samples: [Float], noiseFloor: Float) -> [Float] {
var result = samples
let noiseFloorArray = [Float](repeating: noiseFloor, count: samples.count)
vDSP_vsub(noiseFloorArray, 1, samples, 1, &result, 1, vDSP_Length(samples.count))
return result
}
private func normalizeSamples(_ samples: [Float]) -> [Float] {
var result = samples
var min: Float = 0
var max: Float = 0
vDSP_minv(samples, 1, &min, vDSP_Length(samples.count))
vDSP_maxv(samples, 1, &max, vDSP_Length(samples.count))
if max > min {
var a: Float = 1.0 / (max - min)
var b: Float = -min / (max - min)
vDSP_vsmsa(samples, 1, &a, &b, &result, 1, vDSP_Length(samples.count))
} else {
vDSP_vfill([0.5], &result, 1, vDSP_Length(samples.count))
}
return result
}
And this is how the processSamples function is used:
private func loadAudioSamples() async {
let url = recording.fileURL
if let audioFile = loadAudioFile(url: url) {
if recording.samples == nil {
recording.samples = try? await processSamples(from: audioFile)
}
}
}
private func loadAudioFile(url: URL) -> AVAudioFile? {
do {
let audioFile = try AVAudioFile(forReading: url)
return audioFile
} catch {
return nil
}
}
Any help or leads would be greatly appreciated! Thanks!
Post
Replies
Boosts
Views
Activity
Hi y'all,
After getting mono recording working, I want to differentiate my app from the standard voice memos to allow for stereo recording. I followed this tutorial (https://developer.apple.com/documentation/avfaudio/capturing_stereo_audio_from_built-in_microphones) to get my voice recorder to record stereo audio. However, when I look at the waveform in Audacity, both channels are the same. If I look at the file info after sharing it, it says the file is in stereo. I don't exactly know what's going on here. What I suspect is happening is that the recorder is only using one microphone. Here is the relevant part of my recorder:
// MARK: - Initialization
override init() {
super.init()
do {
try configureAudioSession()
try enableBuiltInMicrophone()
try setupAudioRecorder()
} catch {
// If any errors occur during initialization,
// terminate the app with a fatalError.
fatalError("Error: \(error)")
}
}
// MARK: - Audio Session and Recorder Configuration
private func enableBuiltInMicrophone() throws {
let audioSession = AVAudioSession.sharedInstance()
let availableInputs = audioSession.availableInputs
guard let builtInMicInput = availableInputs?.first(where: { $0.portType == .builtInMic }) else {
throw Errors.NoBuiltInMic
}
do {
try audioSession.setPreferredInput(builtInMicInput)
} catch {
throw Errors.UnableToSetBuiltInMicrophone
}
}
private func configureAudioSession() throws {
let audioSession = AVAudioSession.sharedInstance()
do {
try audioSession.setCategory(.record, mode: .default, options: [.allowBluetooth])
try audioSession.setActive(true)
} catch {
throw Errors.FailedToInitSessionError
}
}
private func setupAudioRecorder() throws {
let date = Date()
let dateFormatter = DateFormatter()
dateFormatter.locale = Locale(identifier: "en_US_POSIX")
dateFormatter.dateFormat = "yyyy-MM-dd, HH:mm:ss"
let timestamp = dateFormatter.string(from: date)
self.recording = Recording(name: timestamp)
guard let fileURL = recording?.returnURL() else {
fatalError("Failed to create file URL")
}
self.currentURL = fileURL
print("Recording URL: \(fileURL)")
do {
let audioSettings: [String: Any] = [
AVFormatIDKey: Int(kAudioFormatMPEG4AAC),
AVLinearPCMIsNonInterleaved: false,
AVSampleRateKey: 44_100.0,
AVNumberOfChannelsKey: isStereoSupported ? 2 : 1,
AVLinearPCMBitDepthKey: 16,
AVEncoderAudioQualityKey: AVAudioQuality.max.rawValue
]
audioRecorder = try AVAudioRecorder(url: fileURL, settings: audioSettings)
} catch {
throw Errors.UnableToCreateAudioRecorder
}
audioRecorder.delegate = self
audioRecorder.prepareToRecord()
}
//MARK: update orientation
public func updateOrientation(withDataSourceOrientation orientation: AVAudioSession.Orientation = .front, interfaceOrientation: UIInterfaceOrientation) async throws {
let session = AVAudioSession.sharedInstance()
guard let preferredInput = session.preferredInput,
let dataSources = preferredInput.dataSources,
let newDataSource = dataSources.first(where: { $0.orientation == orientation }),
let supportedPolarPatterns = newDataSource.supportedPolarPatterns else {
return
}
isStereoSupported = supportedPolarPatterns.contains(.stereo)
if isStereoSupported {
try newDataSource.setPreferredPolarPattern(.stereo)
}
try preferredInput.setPreferredDataSource(newDataSource)
try session.setPreferredInputOrientation(interfaceOrientation.inputOrientation)
}
Here is the relevant part of my SwiftUI view:
RecordView()
.onAppear {
Task {
if await AVAudioApplication.requestRecordPermission() {
// The user grants access. Present recording interface.
print("Permission granted")
} else {
// The user denies access. Present a message that indicates
// that they can change their permission settings in the
// Privacy & Security section of the Settings app.
model.showAlert.toggle()
}
try await recorder.updateOrientation(interfaceOrientation: deviceOrientation)
}
}
.onReceive(NotificationCenter.default.publisher(for: UIDevice.orientationDidChangeNotification)) { _ in
if let windowScene = UIApplication.shared.connectedScenes.first as? UIWindowScene,
let orientation = windowScene.windows.first?.windowScene?.interfaceOrientation {
deviceOrientation = orientation
Task {
do {
try await recorder.updateOrientation(interfaceOrientation: deviceOrientation)
} catch {
throw Errors.UnableToUpdateOrientation
}
}
}
}
Here is the full repo: https://github.com/aabagdi/MemoMan/tree/MemoManStereo
Thanks for any leads!
Hi guys,
First of all, I'm sorry if this is the wrong place to post this. I'm in the last steps of my task manager app: getting the tasks to sync between devices. However, I get the error "This NSPersistentStoreCoordinator has no persistent stores (unknown). It cannot perform a save operation." What does this error exactly mean? My container is initialised so it should have a persistent store, right? I've also enabled all the proper capabilities I'm pretty sure (eg, I've enabled CloudKit, created a container, enabled background fetch and remote notifications.) Here is the code for my data controller:
import CoreData
import Foundation
class DataController: ObservableObject {
let container = NSPersistentCloudKitContainer(name: "TaskDataModel")
init() {
guard let description = container.persistentStoreDescriptions.first else {
fatalError("Container descriptions not loaded")
}
description.setOption(true as NSNumber, forKey: NSPersistentHistoryTrackingKey)
container.viewContext.mergePolicy = NSMergeByPropertyObjectTrumpMergePolicy
container.viewContext.automaticallyMergesChangesFromParent = true
container.loadPersistentStores { description, error in
if let error = error {
print("Core Data failed to load: \(error.localizedDescription)")
}
}
}
}
Here is TaskManMain:
@main
struct TaskManApp: App {
@StateObject private var dataController = DataController()
var body: some Scene {
WindowGroup {
MainView()
.environment(\.managedObjectContext, dataController.container.viewContext)
}
}
}
Here is the full repo if y'all are interested:
https://github.com/aabagdi/TaskMan
Thanks for any help!