Hey,
im training an MLImageClassifier via the train()-method:
guard let job = try? MLImageClassifier.train(trainingData: trainingData, parameters: modelParameter, sessionParameters: sessionParameters) else{
debugPrint("Training failed")
return
}
Unfortunately the metrics of my MLProgress, which is created from the returning MLJob while training are empty.
Code for listening on Progress:
job.progress.publisher(for: \.fractionCompleted)
.sink{[weak job] fractionCompleted in
guard let job = job else {
debugPrint("failure in creating job")
return
}
guard let progress = MLProgress(progress: job.progress) else {
debugPrint("failure in creating progress")
return
}
print("ProgressPROGRESS: \(progress)")
print("Progress: \(fractionCompleted)")
}
.store(in: &subscriptions)
Printing the Progress ends in:
MLProgress(elapsedTime: 2.2328420877456665, phase: CreateML.MLPhase.extractingFeatures, itemCount: 32, totalItemCount: Optional(39), metrics: [:])
Got the Same result when listening to MLCheckpoints, Metrics are empty aswell:
MLCheckpoint(url: URLPATH.checkpoint, phase: CreateML.MLPhase.extractingFeatures, iteration: 32, date: 2024-04-18 11:21:18 +0000, metrics: [:])
Can some1 tell me how I can access the metrics while training?
Thanks!
Post
Replies
Boosts
Views
Activity
Hey, i just created and trained an MLImageClassifier via the MLImageclassifier.train() method (https://developer.apple.com/documentation/createml/mlimageclassifier/train(trainingdata:parameters:sessionparameters:))
For my Trainingdata (MLImageclassifier.DataSource) i am using my directoy structure, so i got an images folder with subfolders of person1, person2, person3 etc. which contain images of the labeled persons (https://developer.apple.com/documentation/createml/mlimageclassifier/datasource/labeleddirectories(at:))
I am saving the checkpoints and sessions in my appdirectory, so i can create an MLIMageClassifier from an exisiting MLSession and/or MLCheckpoint.
My question is: is there any way to add new labels, optimally from my directoy strucutre, to an MLImageClassifier which i create from an existing MLCheckpoint/MLSession?
So like adding a person4 and training my pretrained Classifier with only that person4.
Or is it simply not possible and i have to train from the beginning everytime i want to add a new label?
Unfortunately i cannot find anything in the API.
Thanks!
Hello,
I have created a Neural Network → K Nearest Neighbors Classifier with python.
# followed by k-Nearest Neighbors for classification.
import coremltools
import coremltools.proto.FeatureTypes_pb2 as ft
from coremltools.models.nearest_neighbors import KNearestNeighborsClassifierBuilder
import copy
# Take the SqueezeNet feature extractor from the Turi Create model.
base_model = coremltools.models.MLModel("SqueezeNet.mlmodel")
base_spec = base_model._spec
layers = copy.deepcopy(base_spec.neuralNetworkClassifier.layers)
# Delete the softmax and innerProduct layers. The new last layer is
# a "flatten" layer that outputs a 1000-element vector.
del layers[-1]
del layers[-1]
preprocessing = base_spec.neuralNetworkClassifier.preprocessing
# The Turi Create model is a classifier, which is treated as a special
# model type in Core ML. But we need a general-purpose neural network.
del base_spec.neuralNetworkClassifier.layers[:]
base_spec.neuralNetwork.layers.extend(layers)
# Also copy over the image preprocessing options.
base_spec.neuralNetwork.preprocessing.extend(preprocessing)
# Remove other classifier stuff.
base_spec.description.ClearField("metadata")
base_spec.description.ClearField("predictedFeatureName")
base_spec.description.ClearField("predictedProbabilitiesName")
# Remove the old classifier outputs.
del base_spec.description.output[:]
# Add a new output for the feature vector.
output = base_spec.description.output.add()
output.name = "features"
output.type.multiArrayType.shape.append(1000)
output.type.multiArrayType.dataType = ft.ArrayFeatureType.FLOAT32
# Connect the last layer to this new output.
base_spec.neuralNetwork.layers[-1].output[0] = "features"
# Create the k-NN model.
knn_builder = KNearestNeighborsClassifierBuilder(input_name="features",
output_name="label",
number_of_dimensions=1000,
default_class_label="???",
number_of_neighbors=3,
weighting_scheme="inverse_distance",
index_type="linear")
knn_spec = knn_builder.spec
knn_spec.description.input[0].shortDescription = "Input vector"
knn_spec.description.output[0].shortDescription = "Predicted label"
knn_spec.description.output[1].shortDescription = "Probabilities for each possible label"
knn_builder.set_number_of_neighbors_with_bounds(3, allowed_range=(1, 10))
# Use the same name as in the neural network models, so that we
# can use the same code for evaluating both types of model.
knn_spec.description.predictedProbabilitiesName = "labelProbability"
knn_spec.description.output[1].name = knn_spec.description.predictedProbabilitiesName
# Put it all together into a pipeline.
pipeline_spec = coremltools.proto.Model_pb2.Model()
pipeline_spec.specificationVersion = coremltools._MINIMUM_UPDATABLE_SPEC_VERSION
pipeline_spec.isUpdatable = True
pipeline_spec.description.input.extend(base_spec.description.input[:])
pipeline_spec.description.output.extend(knn_spec.description.output[:])
pipeline_spec.description.predictedFeatureName = knn_spec.description.predictedFeatureName
pipeline_spec.description.predictedProbabilitiesName = knn_spec.description.predictedProbabilitiesName
# Add inputs for training.
pipeline_spec.description.trainingInput.extend([base_spec.description.input[0]])
pipeline_spec.description.trainingInput[0].shortDescription = "Example image"
pipeline_spec.description.trainingInput.extend([knn_spec.description.trainingInput[1]])
pipeline_spec.description.trainingInput[1].shortDescription = "True label"
pipeline_spec.pipelineClassifier.pipeline.models.add().CopyFrom(base_spec)
pipeline_spec.pipelineClassifier.pipeline.models.add().CopyFrom(knn_spec)
pipeline_spec.pipelineClassifier.pipeline.names.extend(["FeatureExtractor", "kNNClassifier"])
coremltools.utils.save_spec(pipeline_spec, "../Models/FaceDetection.mlmodel")
it is from the following tutorial: https://machinethink.net/blog/coreml-training-part3/
It Works and I were am to include it into my project:
I want to train the model via the MLUpdateTask:
ar batchInputs: [MLFeatureProvider] = []
let imageconstraint = (model.model.modelDescription.inputDescriptionsByName["image"]?.imageConstraint)
let imageOptions: [MLFeatureValue.ImageOption: Any] = [
.cropAndScale: VNImageCropAndScaleOption.scaleFill.rawValue]
var featureProviders = [MLFeatureProvider]()
//URLS where images are stored
let trainingData = ImageManager.getImagesAndLabel()
for data in trainingData{
let label = data.key
for imgURL in data.value{
let featureValue = try MLFeatureValue(imageAt: imgURL, constraint: imageconstraint!, options: imageOptions)
if let pixelBuffer = featureValue.imageBufferValue{
let featureProvider = FaceDetectionTrainingInput(image: pixelBuffer, label: label)
batchInputs.append(featureProvider)}}
let trainingData = MLArrayBatchProvider(array: batchInputs)
When calling the MLUpdateTask as follows, the context.model from completionHandler is null.
Unfortunately there is no other Information available from the compiler.
do{
debugPrint(context)
try context.model.write(to: ModelManager.targetURL)
}
catch{
debugPrint("Error saving the model \(error)")
}
})
updateTask.resume()
I get the following error when I want to access the context.model: Thread 5: EXC_BAD_ACCESS (code=1, address=0x0)
Can some1 more experienced tell me how to fix this?
It seems like I am missing some parameters?
I am currently not splitting the Data when training into train and test data. only preprocessing im doing is scaling the image down to 227x227 pixels.
Thanks!