So I've been trying for weeks now to implement a compression mechanism into my app project that compresses MV-HEVC video files in-app without stripping videos of their 3D properties, but every single implementation I have tried has either stripped the encoded MV-HEVC video file of its 3D properties (making the video monoscopic), or has crashed with a fatal error. I've read the Reading multiview 3D video files and Converting side-by-side 3D video to multiview HEVC documentation files, but was unable to myself come out with anything useful.
My question therefore is: How do you go about compressing/encoding an MV-HEVC video file in-app whilst preserving the stereoscopic/3D properties of that MV-HEVC video file? Below is the best implementation I was able to come up with (which simply compresses uploaded MV-HEVC videos with an arbitrary bit rate). With this implementation (my compressVideo function), the MV-HEVC files that go through it are compressed fine, but the final result is the loss of that MV-HEVC video file's stereoscopic/3D properties.
If anyone could point me in the right direction with anything it would be greatly, greatly appreciated.
My current implementation (that strips MV-HEVC videos of their stereoscopic/3D properties):
static func compressVideo(sourceUrl: URL, bitrate: Int, completion: @escaping (Result<URL, Error>) -> Void) {
let asset = AVAsset(url: sourceUrl)
asset.loadTracks(withMediaType: .video) { videoTracks, videoError in
guard let videoTrack = videoTracks?.first, videoError == nil else {
completion(.failure(videoError ?? NSError(domain: "VideoUploader", code: -1, userInfo: [NSLocalizedDescriptionKey: "Failed to load video track"])))
return
}
asset.loadTracks(withMediaType: .audio) { audioTracks, audioError in
guard let audioTrack = audioTracks?.first, audioError == nil else {
completion(.failure(audioError ?? NSError(domain: "VideoUploader", code: -2, userInfo: [NSLocalizedDescriptionKey: "Failed to load audio track"])))
return
}
let outputUrl = sourceUrl.deletingLastPathComponent().appendingPathComponent(UUID().uuidString).appendingPathExtension("mov")
guard let assetReader = try? AVAssetReader(asset: asset),
let assetWriter = try? AVAssetWriter(outputURL: outputUrl, fileType: .mov) else {
completion(.failure(NSError(domain: "VideoUploader", code: -3, userInfo: [NSLocalizedDescriptionKey: "AssetReader/Writer initialization failed"])))
return
}
let videoReaderSettings: [String: Any] = [kCVPixelBufferPixelFormatTypeKey as String: kCVPixelFormatType_32ARGB]
let videoSettings: [String: Any] = [
AVVideoCompressionPropertiesKey: [AVVideoAverageBitRateKey: bitrate],
AVVideoCodecKey: AVVideoCodecType.hevc,
AVVideoHeightKey: videoTrack.naturalSize.height,
AVVideoWidthKey: videoTrack.naturalSize.width
]
let assetReaderVideoOutput = AVAssetReaderTrackOutput(track: videoTrack, outputSettings: videoReaderSettings)
let assetReaderAudioOutput = AVAssetReaderTrackOutput(track: audioTrack, outputSettings: nil)
if assetReader.canAdd(assetReaderVideoOutput) {
assetReader.add(assetReaderVideoOutput)
} else {
completion(.failure(NSError(domain: "VideoUploader", code: -4, userInfo: [NSLocalizedDescriptionKey: "Couldn't add video output reader"])))
return
}
if assetReader.canAdd(assetReaderAudioOutput) {
assetReader.add(assetReaderAudioOutput)
} else {
completion(.failure(NSError(domain: "VideoUploader", code: -5, userInfo: [NSLocalizedDescriptionKey: "Couldn't add audio output reader"])))
return
}
let audioInput = AVAssetWriterInput(mediaType: .audio, outputSettings: nil)
let videoInput = AVAssetWriterInput(mediaType: .video, outputSettings: videoSettings)
videoInput.transform = videoTrack.preferredTransform
assetWriter.shouldOptimizeForNetworkUse = true
assetWriter.add(videoInput)
assetWriter.add(audioInput)
assetReader.startReading()
assetWriter.startWriting()
assetWriter.startSession(atSourceTime: CMTime.zero)
let videoInputQueue = DispatchQueue(label: "videoQueue")
let audioInputQueue = DispatchQueue(label: "audioQueue")
videoInput.requestMediaDataWhenReady(on: videoInputQueue) {
while videoInput.isReadyForMoreMediaData {
if let sample = assetReaderVideoOutput.copyNextSampleBuffer() {
videoInput.append(sample)
} else {
videoInput.markAsFinished()
if assetReader.status == .completed {
assetWriter.finishWriting {
completion(.success(outputUrl))
}
}
break
}
}
}
audioInput.requestMediaDataWhenReady(on: audioInputQueue) {
while audioInput.isReadyForMoreMediaData {
if let sample = assetReaderAudioOutput.copyNextSampleBuffer() {
audioInput.append(sample)
} else {
audioInput.markAsFinished()
break
}
}
}
}
}
}