I am using
CoreML
style transfer based on the torch2coreml implementation on git. For purposes herein, I have only substituted my mlmodel
with an input/output size of 1200 pixels with the sample mlmodels
.This works perfectly on my iPhone 7 plus and uses a maximum of 65.11 MB of RAM. Running the identical code and identical
mlmodel
on an iPad Mini 2, it uses 758.87 MB of RAM before it crashes with an out of memory error.Memory allocations on the iPhone 7 Plus:
http://3DTOPO.com/CoreML-iPhonePlus.png
Memory allocations on the iPad Mini 2:
http://3DTOPO.com/CoreML-iPadMini2.png
Running on the iPad Mini, there are two 200 MB and one 197.77 MB
Espresso
library allocations that are not present on the iPhone 7+. The iPad Mini also uses a 49.39 MB allocation that the iPhone 7+ doesn't use, and three 16.48 MB allocations versus one 16.48 MB allocations on the iPhone 7+ (see screenshots above).What on earth is going on, and how can I fix it?
Relevant code (download project linked above for full source):
private var inputImage = UIImage(named: "input")!
let imageSize = 1200
private let models = [
test().model
]
@IBAction func styleButtonTouched(_ sender: UIButton) {
guard let image = inputImage.scaled(to: CGSize(width: imageSize, height: imageSize), scalingMode: .aspectFit).cgImage else {
print("Could not get a CGImage")
return
}
let model = models[0] //Use my test model
toggleLoading(show: true)
DispatchQueue.global(qos: .userInteractive).async {
let stylized = self.stylizeImage(cgImage: image, model: model)
DispatchQueue.main.async {
self.toggleLoading(show: false)
self.imageView.image = UIImage(cgImage: stylized)
}
}
}
private func stylizeImage(cgImage: CGImage, model: MLModel) -> CGImage {
let input = StyleTransferInput(input: pixelBuffer(cgImage: cgImage, width: imageSize, height: imageSize))
let outFeatures = try! model.prediction(from: input)
let output = outFeatures.featureValue(for: "outputImage")!.imageBufferValue!
CVPixelBufferLockBaseAddress(output, .readOnly)
let width = CVPixelBufferGetWidth(output)
let height = CVPixelBufferGetHeight(output)
let data = CVPixelBufferGetBaseAddress(output)!
let outContext = CGContext(data: data,
width: width,
height: height,
bitsPerComponent: 8,
bytesPerRow: CVPixelBufferGetBytesPerRow(output),
space: CGColorSpaceCreateDeviceRGB(),
bitmapInfo: CGImageByteOrderInfo.order32Little.rawValue | CGImageAlphaInfo.noneSkipFirst.rawValue)!
let outImage = outContext.makeImage()!
CVPixelBufferUnlockBaseAddress(output, .readOnly)
return outImage
}
private func pixelBuffer(cgImage: CGImage, width: Int, height: Int) -> CVPixelBuffer {
var pixelBuffer: CVPixelBuffer? = nil
let status = CVPixelBufferCreate(kCFAllocatorDefault, width, height, kCVPixelFormatType_32BGRA , nil, &pixelBuffer)
if status != kCVReturnSuccess {
fatalError("Cannot create pixel buffer for image")
}
CVPixelBufferLockBaseAddress(pixelBuffer!, CVPixelBufferLockFlags.init(rawValue: 0))
let data = CVPixelBufferGetBaseAddress(pixelBuffer!)
let rgbColorSpace = CGColorSpaceCreateDeviceRGB()
let bitmapInfo = CGBitmapInfo(rawValue: CGBitmapInfo.byteOrder32Little.rawValue | CGImageAlphaInfo.noneSkipFirst.rawValue)
let context = CGContext(data: data, width: width, height: height, bitsPerComponent: 8, bytesPerRow: CVPixelBufferGetBytesPerRow(pixelBuffer!), space: rgbColorSpace, bitmapInfo: bitmapInfo.rawValue)
context?.draw(cgImage, in: CGRect(x: 0, y: 0, width: width, height: height))
CVPixelBufferUnlockBaseAddress(pixelBuffer!, CVPixelBufferLockFlags(rawValue: 0))
return pixelBuffer!
}
class StyleTransferInput : MLFeatureProvider {
/// input as color (kCVPixelFormatType_32BGRA) image buffer, 720 pixels wide by 720 pixels high
var input: CVPixelBuffer
var featureNames: Set<String> {
get {
return ["inputImage"]
}
}
func featureValue(for featureName: String) -> MLFeatureValue? {
if (featureName == "inputImage") {
return MLFeatureValue(pixelBuffer: input)
}
return nil
}
init(input: CVPixelBuffer) {
self.input = input
}
}