Hi everyone in this forum,I have been developing image recognition app in ios 11 following the CoreML examples. However i notice that there is difference of the results when calling the model in ios, and the ones using coremltools in mac/python. I think the difference may lie on the image loading part. Python code use Pillow to load image, but xcode use CoreImage. I pasted the key codes as below. Hopefully somebody can help to point out the issue.Also the input image is a 299*299 jpg. So should not any resizing happened in either of the implementation. Thank you.################## python codes ##################import coremltools
from PIL import Image
from keras.preprocessing import image
import numpy as np
IMG_PATH='./test.jpg'
img = image.load_img(IMG_PATH)
model=coremltools.models.MLModel("./Inceptionv3.mlmodel")
res = model.predict({'image':img})################ ios codes . ################ self.image = [CIImage imageWithContentsOfURL:fileURL];
self.model = [[[Inceptionv3 alloc] init] model];
VNCoreMLModel *m = [VNCoreMLModel modelForMLModel: self.model error:nil];
VNCoreMLRequest *rq = [[VNCoreMLRequest alloc] initWithModel: m completionHandler: (VNRequestCompletionHandler) ^(VNRequest *request, NSError *error){
NSArray *results = [request.results copy];
NSString *top_results = @"";
for(int index = 0; index < kNumResults; index++)
{
VNClassificationObservation *res = ((VNClassificationObservation *)(results[index]));
NSString *tmp = [top_results stringByAppendingFormat: @"- %d %.4f %@\n ", index, res.confidence,res.identifier];
top_results = [tmp copy];
}
self.label_prob = [top_results copy];
}];
NSDictionary *d = [[NSDictionary alloc] init];
NSArray *a = @[rq];
VNImageRequestHandler *handler = [[VNImageRequestHandler alloc] initWithCIImage:self.image options:d];
dispatch_queue_t myCustomQueue;
myCustomQueue = dispatch_queue_create("com.example.MyCustomQueue", NULL);
dispatch_sync(myCustomQueue, ^{
[handler performRequests:a error:nil];
});