Steps to Reproduce:
Create a SwiftUI view that initializes an ARKit session and a camera frame provider.
Attempt to run the ARKit session and retrieve camera frames.
Extract the intrinsics and extrinsics matrices from the camera frame’s sample data.
Attempt to project a 3D point from the world space onto the 2D screen using the retrieved camera parameters.
Encounter issues due to lack of detailed documentation on the correct usage and structure of the intrinsics and extrinsics matrices.
struct CodeLevelSupportView: View {
@State
private var vm = CodeLevelSupportViewModel()
var body: some View {
RealityView { realityViewContent in }
.onAppear {
vm.receiveCamera()
}
}
}
@MainActor
@Observable
class CodeLevelSupportViewModel {
let cameraSession = CameraFrameProvider()
let arSession = ARKitSession()
init() {
Task {
await arSession.requestAuthorization(for: [.cameraAccess])
}
}
func receiveCamera() {
Task {
do {
try await arSession.run([cameraSession])
guard let sequence = cameraSession.cameraFrameUpdates(for: .supportedVideoFormats(for: .main, cameraPositions: [.left])[0]) else {
print("failed to get cameraAccess authorization")
return
}
for try await frame in sequence {
guard let sample = frame.sample(for: .left) else {
print("failed to get camera sample")
return
}
let leftEyeScreenImage:CVPixelBuffer = sample.pixelBuffer
let leftEyeViewportWidth:Int = CVPixelBufferGetWidth(leftEyeScreenImage)
let leftEyeViewportHeight:Int = CVPixelBufferGetHeight(leftEyeScreenImage)
let intrinsics = sample.parameters.intrinsics
let extrinsics = sample.parameters.extrinsics
let oneMeterInFront:SIMD3<Float> = .init(x: 0, y: 0, z: -1)
projectWorldLocationToLeftEyeScreen(worldLocation: oneMeterInFront, intrinsics: intrinsics, extrinsics: extrinsics, viewportSize: (leftEyeViewportWidth,leftEyeViewportHeight))
}
} catch {
}
}
}
//After the function implementation is completed, it should return a CGPoint?, representing the point of this worldLocation in the LeftEyeViewport. If this worldLocation is not visible in the LeftEyeViewport (out of bounds), return nil.
func projectWorldLocationToLeftEyeScreen(worldLocation:SIMD3<Float>,intrinsics:simd_float3x3,extrinsics:simd_float4x4,viewportSize:(width:Int,height:Int)) {
//The API documentation does not provide the structure of intrinsics and extrinsics, making it hard to done this function.
}
}
Post
Replies
Boosts
Views
Activity
com.apple.Vision Code=9 "Could not build inference plan - ANECF error: failed to load ANE model file:///System/Library/Frameworks/ Vision.framework/anodv4_drop6_fp16.H14G.espresso.hwx
Code rise this error:
func imageToHeadBox(image: CVPixelBuffer) async throws -> [CGRect] {
let request:DetectFaceRectanglesRequest = DetectFaceRectanglesRequest()
let faceResult:[FaceObservation] = try await request.perform(on: image)
let faceBoxs:[CGRect] = faceResult.map { face in
let faceBoundingBox:CGRect = face.boundingBox.cgRect
return faceBoundingBox
}
return faceBoxs
}