One workaround (from a forum post I'll try to find!) is to create an intermediate model and copy over the data in a complex migration:
enum TripsSchemaV1_0_0: VersionedSchema {
static var versionIdentifier = Schema.Version(1, 0, 0)
static var models: [any PersistentModel.Type] { [Trip.self] }
@Model
class Trip {
var name: String
init(name: String) {
self.name = name
}
}
}
enum TripsSchemaV1_1_0: VersionedSchema {
static var versionIdentifier = Schema.Version(1, 1, 0)
static var models: [any PersistentModel.Type] { [Trip.self] }
@Model
class Trip {
var name: String
var title: String? // Migrated in schema v2.0
init(name: String, title: String? = nil) {
self.name = name
self.title = title
}
}
}
enum TripsSchemaV2_0_0: VersionedSchema {
static var versionIdentifier = Schema.Version(2, 0, 0)
static var models: [any PersistentModel.Type] { [Trip.self] }
@Model
class Trip {
var title: String
init(title: String) {
self.title = title
}
}
}
Then in your MigrationPlan, move the data:
enum TripsSchemaMigrationPlan: SchemaMigrationPlan {
static var schemas: [any VersionedSchema.Type] {
[
TripsSchemaV1_0_0.self,
TripsSchemaV1_1_0.self,
TripsSchemaV2_0_0.self,
]
}
static var stages: [MigrationStage] {
[
v1_0_0_to_v0_1_1_lightweight,
v1_1_0_to_v0_2_0_custom,
]
}
/// Filling in with `nil`, which SwiftData will happily do :)
private static let v1_0_0_to_v0_1_1_lightweight = MigrationStage.lightweight(
fromVersion: TripsSchemaV1_0_0.self,
toVersion: TripsSchemaV1_1_0.self
)
private static let v1_1_0_to_v0_2_0_custom = MigrationStage.custom(
fromVersion: TripsSchemaV1_1_0.self,
toVersion: TripsSchemaV2_0_0.self,
willMigrate: { context in
let trips = try context.fetch(FetchDescriptor<TripsSchemaV1_1_0.Trip>())
// Goal: fill-in the new name.
for trip in trips {
trip.title = trip.name
}
// Assuming autosave is on.
},
didMigrate: nil
)
}
Post
Replies
Boosts
Views
Activity
Some ideas:
Verify there isn't already an item with id: 1 when re-running mock code
Verify anything with @Attribute(.unique) is ACTUALLY unique in mock code
Make sure that any models that have a @Relationship have a corresponding deleteRule, if you're running a series of delete(...) elsewhere.
The first one caught me off guard -- if running a series of delete calls on PersistentModel types, make sure that any models that have a @Relationship also have a corresponding deleteRule. When deleting from either end, SwiftData can null out the child in its parent (.nullify), or delete its parent altogether (.cascade). Without this, a hanging reference will be left, causing null value errors.
For example:
@Model
struct Aisle {
@Relationship(deleteRule: .nullify, inverse: \Item.aisle)
var items: [Item]
}
@Model
struct Item {
@Relationship(deleteRule: .nullify) // works!
var aisle: Aisle
}
Also, save the model context before running deletes. Otherwise, changes will be flushed from the cache after the delete is ran, causing persistence issues. The joy of caching!
I was able to do this by adding Section for the @TableColumnBuilder in this Table initializer.
A (reduced) code sample:
struct ImportTrackTable: View {
@EnvironmentObject var store: Store
@State var selectedTracks: Set<Track.ID> = .init()
// Sorting
@State private var sortOrder: [KeyPathComparator<Binding<Track>>] = [
.init(\.title.wrappedValue, order: .forward),
]
private let titleComparator = KeyPathComparator<Track>(\.title, order: .forward)
var body: some View {
Table(
of: Binding<Track>.self,
selection: $selectedTracks,
sortOrder: $sortOrder
) {
TableColumn(
"Title",
value: \Binding<Track>.wrappedValue,
comparator: titleComparator
) { trackBinding in
TrackTextField(text: trackBinding.title)
}
TableColumn(
"Genres",
value: \Binding<Track>.wrappedValue.genreList
)
} rows: {
Section("Unassigned") {
ForEach(
$store.tracks
.sorted(using: sortOrder)
.filter({ track in
// Cool filter logic
})
) { $track in
TableRow($track)
.draggable($track.wrappedValue)
}
}
Section("Assigned") {
ForEach(
$store.tracks
.sorted(using: sortOrder)
) { $track in
TableRow($track)
.draggable($track.wrappedValue)
}
}
}
}
}
Make sure that you're only calling the completion handler once!
It can be a bit hard to trace with nested closures + networking, so try some print statements and see if that helps.