Hi all,
I'm getting a strange SwiftData error at runtime in my voice recorder app. Whenever I attempt to generate and cache a samples array so that my app can visualize the waveform of the audio, the app crashes and the following pops up in Xcode:
{
@storageRestrictions(accesses: _$backingData, initializes: _samples)
init(initialValue) {
_$backingData.setValue(forKey: \.samples, to: initialValue)
_samples = _SwiftDataNoType()
}
get {
_$observationRegistrar.access(self, keyPath: \.samples)
return self.getValue(forKey: \.samples)
}
set {
_$observationRegistrar.withMutation(of: self, keyPath: \.samples) {
self.setValue(forKey: \.samples, to: newValue)
}
}
}
With an execution breakpoint on the line _$observationRegistrar.withMutation(of: self, keyPath: \.samples)
.
Here is my model class:
import Foundation
import SwiftData
@Model final class Recording {
var id : UUID?
var name : String?
var date : Date?
var samples : [Float]? = nil
init(name: String) {
self.id = UUID()
self.name = name
self.date = Date.now
}
}
And here is where the samples are being generated (sorry for the long code):
private func processSamples(from audioFile: AVAudioFile) async throws -> [Float] {
let sampleCount = 128
let frameCount = Int(audioFile.length)
let samplesPerSegment = frameCount / sampleCount
let buffer = try createAudioBuffer(for: audioFile, frameCapacity: AVAudioFrameCount(samplesPerSegment))
let channelCount = Int(buffer.format.channelCount)
let audioData = try readAudioData(from: audioFile, into: buffer, sampleCount: sampleCount, samplesPerSegment: samplesPerSegment, channelCount: channelCount)
let processedResults = try await processAudioSegments(audioData: audioData, sampleCount: sampleCount, samplesPerSegment: samplesPerSegment, channelCount: channelCount)
var samples = createSamplesArray(from: processedResults, sampleCount: sampleCount)
samples = applyNoiseFloor(to: samples, noiseFloor: 0.01)
samples = normalizeSamples(samples)
return samples
}
private func createAudioBuffer(for audioFile: AVAudioFile, frameCapacity: AVAudioFrameCount) throws -> AVAudioPCMBuffer {
guard let buffer = AVAudioPCMBuffer(pcmFormat: audioFile.processingFormat, frameCapacity: frameCapacity) else {
throw Errors.AudioProcessingError
}
return buffer
}
private func readAudioData(from audioFile: AVAudioFile, into buffer: AVAudioPCMBuffer, sampleCount: Int, samplesPerSegment: Int, channelCount: Int) throws -> [[Float]] {
var audioData = [[Float]](repeating: [Float](repeating: 0, count: samplesPerSegment * channelCount), count: sampleCount)
for segment in 0..<sampleCount {
let segmentStart = AVAudioFramePosition(segment * samplesPerSegment)
audioFile.framePosition = segmentStart
try audioFile.read(into: buffer)
if let channelData = buffer.floatChannelData {
let dataCount = samplesPerSegment * channelCount
audioData[segment] = Array(UnsafeBufferPointer(start: channelData[0], count: dataCount))
}
}
return audioData
}
private func processAudioSegments(audioData: [[Float]], sampleCount: Int, samplesPerSegment: Int, channelCount: Int) async throws -> [(Int, Float)] {
try await withThrowingTaskGroup(of: (Int, Float).self) { taskGroup in
for segment in 0..<sampleCount {
let segmentData = audioData[segment]
taskGroup.addTask {
var rms: Float = 0
vDSP_rmsqv(segmentData, 1, &rms, vDSP_Length(samplesPerSegment * channelCount))
return (segment, rms)
}
}
var results = [(Int, Float)]()
for try await result in taskGroup {
results.append(result)
}
return results
}
}
private func createSamplesArray(from processedResults: [(Int, Float)], sampleCount: Int) -> [Float] {
var samples = [Float](repeating: 0, count: sampleCount)
vDSP_vfill([0], &samples, 1, vDSP_Length(sampleCount))
for (segment, rms) in processedResults {
samples[segment] = rms
}
return samples
}
private func applyNoiseFloor(to samples: [Float], noiseFloor: Float) -> [Float] {
var result = samples
let noiseFloorArray = [Float](repeating: noiseFloor, count: samples.count)
vDSP_vsub(noiseFloorArray, 1, samples, 1, &result, 1, vDSP_Length(samples.count))
return result
}
private func normalizeSamples(_ samples: [Float]) -> [Float] {
var result = samples
var min: Float = 0
var max: Float = 0
vDSP_minv(samples, 1, &min, vDSP_Length(samples.count))
vDSP_maxv(samples, 1, &max, vDSP_Length(samples.count))
if max > min {
var a: Float = 1.0 / (max - min)
var b: Float = -min / (max - min)
vDSP_vsmsa(samples, 1, &a, &b, &result, 1, vDSP_Length(samples.count))
} else {
vDSP_vfill([0.5], &result, 1, vDSP_Length(samples.count))
}
return result
}
And this is how the processSamples function is used:
private func loadAudioSamples() async {
let url = recording.fileURL
if let audioFile = loadAudioFile(url: url) {
if recording.samples == nil {
recording.samples = try? await processSamples(from: audioFile)
}
}
}
private func loadAudioFile(url: URL) -> AVAudioFile? {
do {
let audioFile = try AVAudioFile(forReading: url)
return audioFile
} catch {
return nil
}
}
Any help or leads would be greatly appreciated! Thanks!