我现在正在尝试为英特尔实感 3D 相机 (SR300) 编写 PMD 相机的类似代码。但是,我在 realsense SDK 中找不到做同样事情的方法。这有什么技巧吗?或者你有什么建议?
pmdGet3DCoordinates(PMDHandle hnd, float * data, size_t size);
一个示例用法是:
float * cartesianDist = new float[nRows*nCols*3];
res = pmdGet3DCoordinates(hnd, cartesianDist, nCols*nRows*3*sizeOf(float));
我需要这个函数来创建一个 xyzmap,如下面的代码所示:
int res = pmdGet3DCoordinates(hnd, dists, 3 * numPixels * sizeof(float));
xyzMap = cv::Mat(xyzMap.size(), xyzMap.type(), dists);
到目前为止,我已经编写了以下代码,我不知道它对 Intel RealSense 是否有意义。请随意发表评论。
void SR300Camera::fillInZCoords()
{
//int res = pmdGet3DCoordinates(hnd, dists, 3 * numPixels * sizeof(float)); //store x,y,z coordinates dists (type: float*)
////float * zCoords = new float[1]; //store z-Coordinates of dists in zCoords
//xyzMap = cv::Mat(xyzMap.size(), xyzMap.type(), dists);
Projection *projection = pp->QueryCaptureManager()->QueryDevice()->CreateProjection();
std::vector<Point3DF32> vertices;
vertices.resize(bufferSize.width * bufferSize.height);
projection->QueryVertices(sample->depth, &vertices[0]);
xyzBuffer.clear();
for (int i = 0; i < bufferSize.width*bufferSize.height; i++) {
//cv::Point3f p;
//p.x = vertices[i].x;
//p.y = vertices[i].y;
//p.z = vertices[i].z;
//xyzBuffer.push_back(p);
xyzBuffer.push_back(cv::Point3f(vertices[i].x, vertices[i].y, vertices[i].z));
}
xyzMap = cv::Mat(xyzBuffer);
projection->Release();
}