Thank you for the reply. I found out my issue here was because my memcpy should have used get base address first, like this...
float *bufferAddress = (float*)CVPixelBufferGetBaseAddress(session.currentFrame.capturedDepthData.depthDataMap);
memcpy(buffer, bufferAddress, ciImageWidth*ciImageHeight*bytesPerPixel);
But now I seem to be having an issue where face tracking position (this is in unity) is not matching up with the 3d depth data world position after applying the intrinsic matrix. I assume the world position of face tracking and of the 3d point cloud is supposed to match since it's based on the same camera?
Also, I'm noticing that even though session.currentFrame.capturedDepthData.cameraCalibrationData.intrinsicMatrix has data that session.currentFrame.capturedDepthData.cameraCalibrationData.intrinsicMatrixReferenceDimensions often does not. Shouldn't they both have data at the same time?
https://i.imgur.com/PPHXbBS.mp4
void FrameUpdate(UnityARCamera cam)
{
// get size of camera texture
UnityARSessionNativeInterface m_session = UnityARSessionNativeInterface.GetARSessionNativeInterface();
// get size of canvas
Canvas uiCanvas = FindObjectOfType();
int panelWidth = (int)uiCanvas.pixelRect.width;
int panelHeight = (int)uiCanvas.pixelRect.height;
// if session first frame
if (!sessionStarted)
{
DepthPlugin.GetCurrentFrameCameraTextureSize(m_session.GetNativeSessionPtr(), out textureWidth, out textureHeight);
Debug.Log("Get camera texture size returned " + textureWidth.ToString() + "," + textureHeight.ToString());
// create 2d texture for camera texture
cameraTexture = new Texture2D(textureWidth, textureHeight, TextureFormat.RGBA32, false);
// GetComponent().texture = cameraTexture;
// allocate some memory for a raw texture buffer
rawBuffer = Marshal.AllocHGlobal(textureWidth * textureHeight * 4);
// flag as started
sessionStarted = true;
}
// UnityARSessionNativeInterface.ARFrameUpdatedEvent -= FirstFrameUpdate;
// get texture from camera
DepthPlugin.GetCurrentFrameCameraTextureIntoBufferAsARGB(m_session.GetNativeSessionPtr(), rawBuffer, textureWidth, textureHeight);
cameraTexture.LoadRawTextureData(rawBuffer, textureWidth * textureHeight * 4);
cameraTexture.Apply();
// if we got a depth buffer
DepthPlugin.GetCurrentFrameDepthBufferSize(m_session.GetNativeSessionPtr(), out depthWidth, out depthHeight, out depthBytesPerPixel);
Debug.Log("Get depth buffer size returned " + depthWidth.ToString() + "," + depthHeight.ToString() + "bytes per pixel " + depthBytesPerPixel.ToString());
Matrix4x4 intrinsicMatrix;
Vector2 intrinsicSize;
DepthPlugin.GetCurrentFrameIntrinsics(m_session.GetNativeSessionPtr(), out intrinsicMatrix, out intrinsicSize);
Debug.Log("matrix 2,0 is " + intrinsicMatrix.m02.ToString() + " 2,1 is " + intrinsicMatrix.m12.ToString() + " 0,0 is " + intrinsicMatrix.m00.ToString() + " 1,1 is " + intrinsicMatrix.m11.ToString());
if (depthWidth > 0 && intrinsicSize.x > 0)
{
// allocate buffer
if (!depthStarted)
{
// create 2d texture for camera texture
depthTexture = new Texture2D(depthWidth, depthHeight, TextureFormat.RGBA32, false);
GetComponent().texture = depthTexture;
// allocate some memory for a raw texture buffer
rawDepthBuffer = Marshal.AllocHGlobal(depthWidth * depthHeight * depthBytesPerPixel);
}
// get updated buffer
DepthPlugin.GetCurrentFrameDepthBufferIntoBuffer(m_session.GetNativeSessionPtr(), rawDepthBuffer, depthWidth, depthHeight, depthBytesPerPixel);
// update texture from buffer
Color[] pixels = depthTexture.GetPixels();
unsafe
{
float* bufferPointer = (float*)rawDepthBuffer.ToPointer();
for (int i = 0; i < pixels.Length; i++)
{
pixels[i] = new Vector4(bufferPointer[i], bufferPointer[i], bufferPointer[i], 1.0f);
}
}
depthTexture.SetPixels(pixels);
depthTexture.Apply();
// create point cloud first time around
float minDepth = 0.0f;
float maxDepth = 0.0f;
// get intrinsic matrix
Color[] texturePixels = cameraTexture.GetPixels();
unsafe
{
// build points
pointCloud = new List();
colorCloud = new List();
float* bufferPointer = (float*)rawDepthBuffer.ToPointer();
for (int v = 0; v < depthHeight; v++)
{
for (int u = 0; u < depthWidth; u++)
{
// calculate x, y, z
float z = bufferPointer[v*depthWidth+u];
if (z < minDepth)
minDepth = z;
if (z > maxDepth)
maxDepth = z;
if (z > 0.01f && z < 1.0f)
{
// add point
float U = (float)u / (float)depthWidth;
float V = (float)v / (float)depthHeight;
float x = ((float)U*intrinsicSize.x - intrinsicMatrix.m20) * z / intrinsicMatrix.m00;
float y = ((float)V*intrinsicSize.y - intrinsicMatrix.m21) * z / intrinsicMatrix.m11;
pointCloud.Add(new Vector3(x, y, z));
// find color for this UV
int textureX = (int)((float)textureWidth * U);
int textureY = (int)((float)textureHeight * V);
Color thisColor = texturePixels[(textureY * textureWidth) + textureX];
colorCloud.Add(new Color(thisColor.r, thisColor.g, thisColor.b));
}
}
}
}
Debug.Log("Unity min depth is " + minDepth.ToString() + " max depth is " + maxDepth.ToString());
pointCloudChanged = true;
// started getting depth info
depthStarted = true;
}
}