Access depth data purely from lidar

Greetings everyone,

I am a new developer and I try to use the lidar of ipad pro for potential engineering applications. It seems the new feature 'depth map' is produced with the info both from lidar and camera. Is there a way that I can access to the depth datapoints which are purely detected by the lidar? My application scenario is too dark that the camera can not be used, so I can only relay on lidar. Thank you in advance!
Same question here. I havent find anything yet.
Hello,

As you have noted, and as stated in Explore ARKit 4, "The colored RGB image from the wide-angle camera and the depth ratings from the LiDAR scanner are fused together using advanced machine learning algorithms to create a dense depth map that is exposed through the API."

There is no API which offers access to the "depth ratings from the LiDAR scanner" directly. If you would like such an API, you should file an enhancement request using Feedback Assistant.

This app seems to be able to get depth points from LiDAR. https://apps.apple.com/us/app/id1524700432 How to access it ?

Hi I successfully saved the depth map to 32-bit binary file. Then I can read the file and convert it to png using python opencv.

func convertDepthData(depthMap: CVPixelBuffer) -> [[Float32]] {
        let width = CVPixelBufferGetWidth(depthMap)
        let height = CVPixelBufferGetHeight(depthMap)
        var convertedDepthMap: [[Float32]] = Array(
            repeating: Array(repeating: 0, count: width),
            count: height
        )
        CVPixelBufferLockBaseAddress(depthMap, CVPixelBufferLockFlags(rawValue: 2))
        let floatBuffer = unsafeBitCast(
            CVPixelBufferGetBaseAddress(depthMap),
            to: UnsafeMutablePointer<Float32>.self
        )
        for row in 0 ..< height {
            for col in 0 ..< width {
                convertedDepthMap[row][col] = floatBuffer[width * row + col]
            }
        }
        CVPixelBufferUnlockBaseAddress(depthMap, CVPixelBufferLockFlags(rawValue: 2))
        return convertedDepthMap
    }

Then save it to app container.

func saveFloat32ArrayToDocumentsDirectory(array: [[Float32]], fileName: String, folderName: String) -> URL? {
        let fileManager = FileManager.default
        let datasetDirectory = getDatasetDirectory()
        let depthDirectory = datasetDirectory.appendingPathComponent(folderName)
        if !FileManager.default.fileExists(atPath: depthDirectory.path) {
                do {
                    try FileManager.default.createDirectory(at: depthDirectory, withIntermediateDirectories: true, attributes: nil)
                } catch {
                    print("Error creating depth directory: \(error.localizedDescription)")
                    return nil
                }
            }
        let fileURL = depthDirectory.appendingPathComponent(fileName)
        
        
        
        let rowCount = array.count
        let colCount = array[0].count
        var flatArray = array.flatMap { $0 }
        
        do {
            let data = Data(bytes: &flatArray, count: MemoryLayout<Float32>.size * rowCount * colCount)
            try data.write(to: fileURL, options: .atomic)
            return fileURL
        } catch {
            print("Error saving float32 array: \(error)")
            return nil
        }
    }

You can call these two function in this way, please note that the frame is ARFrame.

guard let depthMap = frame.sceneDepth?.depthMap
let convertedDepthMap = convertDepthData(depthMap: depthMap)
        // Save the converted depth map
        let fileName = String(format: "%06d.bin", frameNumber)
        if let savedFileURL = saveFloat32ArrayToDocumentsDirectory(array: convertedDepthMap, fileName: fileName, folderName: "depth") {
            print("Array saved to: \(savedFileURL)")
        } else {
            print("Error saving float32 array to the app container.")
        }

In openCV, you can get the depth array or convert to png. The depth image is 256*192. Because the original value is in float and we can only save int in png, I times 1000. Meanwhile, I save a .bin file to a 16-bit grayscale png.

import numpy as np
import cv2
import glob
import os
from PIL import Image

# Set the number of rows and columns in your 2D Float32 array
row_count = 192
col_count = 256

# Replace this with the path to the folder containing the binary files
folder_path = "./depth/"
output_path = "./depthImg2/"

# Loop through all the .bin files in the folder
for bin_file_path in glob.glob(os.path.join(folder_path, "*.bin")):
    # Read the binary file and convert it to a NumPy array
    with open(bin_file_path, "rb") as f:
        float32_array = np.fromfile(f, dtype=np.float32).reshape(row_count, col_count)
        print(float32_array)

    float32_array *= 1000
    # Normalize the float32 array to the range [0, 2**16 - 1] and convert it to a 16-bit unsigned integer format
    # normalized_array = cv2.normalize(float32_array, None, 0, 2**16 - 1, cv2.NORM_MINMAX)
    max_depth = np.max(float32_array)
    min_depth = np.min(float32_array)
    print(f"Max depth: {max_depth}, Min depth: {min_depth}")

    uint16_array = float32_array.astype(np.uint16)

    # Get the filename without the extension
    filename = os.path.splitext(os.path.basename(bin_file_path))[0]

    # Save the 16-bit grayscale depth image with the same filename as the original file
    depth_image_path = os.path.join(output_path, filename + ".png")
    cv2.imwrite(depth_image_path, uint16_array)
Access depth data purely from lidar
 
 
Q