AVCaptureSession startRunning crash

I have revisited AVCaptureSession in UIKit to capture a snapshot with the FaceTime camera. And my sample app will crash when AVCaptureSession starts running. Does anyone know how to fix it? The console says the following purple warning.

-[AVCaptureSession startRunning] should be called from background thread. Calling it on the main thread can lead to UI unresponsiveness

import UIKit
import AVFoundation

class CaptureViewController: UIViewController, AVCapturePhotoCaptureDelegate {
	var captureSession: AVCaptureSession!
	var cameraDevices: AVCaptureDevice!
	var imagePhotoOutput: AVCapturePhotoOutput!
	
	enum CameraCase {
		case front
		case back
	}
	
	
	// MARK: - IBAction
	@IBAction func selectTapped(_ sender: UIButton) {
		snapPicture()
	}
	
	
	// MARK: - Life cycle
	override func viewDidLoad() {
		super.viewDidLoad()
	}
	
	override func viewWillAppear(_ animated: Bool) {
		super.viewWillAppear(animated)
		
		prepareCamera(cameraCase: .front)
	}
	
	
	// MARK: - Camera
	func prepareCamera(cameraCase: CameraCase) {
		/* removing existing layers */
		if let sublayers = self.view.layer.sublayers {
			for sublayer in sublayers {
				if sublayer.isKind(of: AVCaptureVideoPreviewLayer.self) {
					sublayer.removeFromSuperlayer()
				}
			}
		}
		
		/* creating a capture session */
		captureSession = AVCaptureSession()
		
		if cameraCase == .front {
			guard let device = AVCaptureDevice.DiscoverySession(deviceTypes: [.builtInWideAngleCamera], mediaType: AVMediaType.video, position: .back).devices.first else { return }
			let videoInput = try? AVCaptureDeviceInput(device: device)
			if captureSession.canAddInput(videoInput!) {
				captureSession.addInput(videoInput!)
				imagePhotoOutput = AVCapturePhotoOutput() // setting output destination
				captureSession.addOutput(imagePhotoOutput) // adding photo output to session
			}
		} else {
			guard let device = AVCaptureDevice.DiscoverySession(deviceTypes: [.builtInWideAngleCamera], mediaType: AVMediaType.video, position: .front).devices.first else { return }
			let videoInput = try? AVCaptureDeviceInput(device: device)
			if captureSession.canAddInput(videoInput!) {
				captureSession.addInput(videoInput!)
				imagePhotoOutput = AVCapturePhotoOutput() // setting output destination
				captureSession.addOutput(imagePhotoOutput) // adding photo output to session
			}
		}
		
		
		/* creating a capture layer */
		let captureVideoLayer: AVCaptureVideoPreviewLayer = AVCaptureVideoPreviewLayer.init(session: captureSession)
		captureVideoLayer.frame = CGRect(x: 0, y: 0, width: view.frame.size.width, height: view.frame.size.height)
		captureVideoLayer.videoGravity = AVLayerVideoGravity.resizeAspect

		/* adding video capture layer to the view layer */
		self.view.layer.addSublayer(captureVideoLayer)		
		
		/* starting capture session */
		captureSession.startRunning() //<<<<<<<<<<<<<<<<<<<<<<<<<<< The console shows a purple warning here.
	}
}
  • Oops... The position argument is the other way around.

Add a Comment

Accepted Reply

I've ended up with the following lines of code.

func prepareCamera(cameraCase: CameraCase) {
	/* removing existing layers */
	if let sublayers = self.view.layer.sublayers {
		for sublayer in sublayers {
			if sublayer.isKind(of: AVCaptureVideoPreviewLayer.self) {
				sublayer.removeFromSuperlayer()
			}
		}
	}
	
	/* creating a capture session */
	captureSession = AVCaptureSession()
	
	guard let device = AVCaptureDevice.DiscoverySession(deviceTypes: [.builtInWideAngleCamera], mediaType: AVMediaType.video, position: cameraCase == .front ? .front : .back).devices.first else { return }
	let videoInput = try? AVCaptureDeviceInput(device: device)
	if captureSession.canAddInput(videoInput!) {
		captureSession.addInput(videoInput!)
		imagePhotoOutput = AVCapturePhotoOutput() // setting output destination
		captureSession.addOutput(imagePhotoOutput) // adding photo output to session
	}
	
	/* creating a capture layer */
	let previewLayer: AVCaptureVideoPreviewLayer = AVCaptureVideoPreviewLayer.init(session: captureSession)
	previewLayer.videoGravity = AVLayerVideoGravity.resizeAspect
	
	/* adding video capture layer to the view layer */
	self.view.layer.addSublayer(previewLayer)
		
	/* starting capture session */
	DispatchQueue.global(qos: .background).async {
		self.captureSession.startRunning()
	}
}

Replies

I've ended up with the following lines of code.

func prepareCamera(cameraCase: CameraCase) {
	/* removing existing layers */
	if let sublayers = self.view.layer.sublayers {
		for sublayer in sublayers {
			if sublayer.isKind(of: AVCaptureVideoPreviewLayer.self) {
				sublayer.removeFromSuperlayer()
			}
		}
	}
	
	/* creating a capture session */
	captureSession = AVCaptureSession()
	
	guard let device = AVCaptureDevice.DiscoverySession(deviceTypes: [.builtInWideAngleCamera], mediaType: AVMediaType.video, position: cameraCase == .front ? .front : .back).devices.first else { return }
	let videoInput = try? AVCaptureDeviceInput(device: device)
	if captureSession.canAddInput(videoInput!) {
		captureSession.addInput(videoInput!)
		imagePhotoOutput = AVCapturePhotoOutput() // setting output destination
		captureSession.addOutput(imagePhotoOutput) // adding photo output to session
	}
	
	/* creating a capture layer */
	let previewLayer: AVCaptureVideoPreviewLayer = AVCaptureVideoPreviewLayer.init(session: captureSession)
	previewLayer.videoGravity = AVLayerVideoGravity.resizeAspect
	
	/* adding video capture layer to the view layer */
	self.view.layer.addSublayer(previewLayer)
		
	/* starting capture session */
	DispatchQueue.global(qos: .background).async {
		self.captureSession.startRunning()
	}
}