Vision: Object Tracking - Improving Accuracy

With Vision API, I am trying to implement object tracking. This is working fine:


func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
        guard
            /
            let pixelBuffer: CVPixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer),
            /
            let lastObservation = self.lastObservation
        else { return }
    
        /
        let request = VNTrackObjectRequest(detectedObjectObservation: lastObservation, completionHandler: self.handleVisionRequestUpdate)
        /
        /
        request.trackingLevel = .fast
    
        /
        do {
            try self.visionSequenceHandler.perform([request], on: pixelBuffer)
        } catch {
            print("Throws: \(error)")
        }
    }

    private func handleVisionRequestUpdate(_ request: VNRequest, error: Error?) {
        /
        DispatchQueue.main.async {
            /
            guard let newObservation = request.results?.first as? VNDetectedObjectObservation else { return }
        
            /
            self.lastObservation = newObservation
            /
            guard newObservation.confidence >= 0.3 else {
                /
                self.highlightView?.frame = .zero
                return
            }
        
            /
            var transformedRect = newObservation.boundingBox
            transformedRect.origin.y = 1 - transformedRect.origin.y
            let convertedRect = self.cameraLayer.layerRectConverted(fromMetadataOutputRect: transformedRect)
        
            /
            self.highlightView?.frame = convertedRect
        }
    }

    @IBAction private func userTapped(_ sender: UITapGestureRecognizer) {
        /
        self.highlightView?.frame.size = CGSize(width: 120, height: 120)
        self.highlightView?.center = sender.location(in: self.view)
    
        /
        let originalRect = self.highlightView?.frame ?? .zero
        var convertedRect = self.cameraLayer.metadataOutputRectConverted(fromLayerRect: originalRect)
        convertedRect.origin.y = 1 - convertedRect.origin.y
    
        /
        let newObservation = VNDetectedObjectObservation(boundingBox: convertedRect)
        self.lastObservation = newObservation
    }



Observation


I notice that object tracking gets confused sometimes under certain conditions.


let newObservation = VNDetectedObjectObservation(boundingBox: convertedRect)


Would passing a bigger rect improve tracking?


Question


How can I improve accuracy of object tracking? (I did try request.trackingLevel = .fast to .accurate)