OpenCV Unity - 从2D图像位置获取3D位置

时间:2018-03-02 19:13:39

标签: c++ opencv unity3d image-processing hololens

我正在编写一个Hololens应用程序,我试图做一个实时跟踪系统。我将使用OpenCV进行计算机视觉算法。我在那里看到,这可以获得颜色对象的轮廓 - > https://www.youtube.com/watch?v=hQ-bpfdWQh8&t=26s

因此,使用此解决方案,我可以获得颜色区域的轮廓,然后获得该区域的中间像素位置,但是现在如何在Hololens的坐标系中将该位置转换为3D位置?

我看到了一些姿势估计的解决方案 - > http://opencv-python-tutroals.readthedocs.io/en/latest/py_tutorials/py_calib3d/py_pose/py_pose.html

这是使用此方法:https://docs.opencv.org/3.0-beta/modules/cuda/doc/calib3d.html

我拥有该功能所需的所有参数,除了" object"如果我正确阅读,那就是物体3D位置,但我没有那个物体3D pos我试图计算它。

所以这就是我到目前为止所拥有的

#include <sstream>
#include <string>
#include <iostream>
#include <vector>

#include "Object.h"

// default capture width and height
const int FRAME_WIDTH = 640;
const int FRAME_HEIGHT = 480;

// max number of objects to be detected in frame
const int MAX_NUM_OBJECTS = 50;

// minimum and maximum object area
const int MIN_OBJECT_AREA = 20 * 20;
const int MAX_OBJECT_AREA = FRAME_HEIGHT * FRAME_WIDTH / 1.5;

// names that will appear at the top of each window
const string windowName = "Original Image";
const string threesholdName = "Threeshold Image";

// List of objects to track
std::vector<Object> objects;


string intToString(int number) {

    std::stringstream ss;
    ss << number;
    return ss.str();
}

Object* getObjectByCoord(int x, int y) {
    for (Object &o : objects) {
        if (o.getXPos() == x && o.getYPos() == y)
            return &o;
    }

    return NULL;
}

void drawObject(Object o, int index, Mat &frame, vector< vector<Point> > contours, vector<Vec4i> hierarchy) {
    cv::drawContours(frame, contours, index, o.getColor(), 3, 8, hierarchy);
    cv::circle(frame, cv::Point(o.getXPos(), o.getYPos()), 5, o.getColor());
    cv::putText(frame, intToString(o.getXPos()) + " , " + intToString(o.getYPos()), cv::Point(o.getXPos(), o.getYPos() + 20), 1, 1, o.getColor());
    cv::putText(frame, o.getType(), cv::Point(o.getXPos(), o.getYPos() - 20), 1, 2, o.getColor());
}

void morphOps(Mat &thresh) {

    //create structuring element that will be used to "dilate" and "erode" image.
    //the element chosen here is a 3px by 3px rectangle
    Mat erodeElement = getStructuringElement(MORPH_RECT, Size(3, 3));
    //dilate with larger element so make sure object is nicely visible
    Mat dilateElement = getStructuringElement(MORPH_RECT, Size(8, 8));

    erode(thresh, thresh, erodeElement);
    erode(thresh, thresh, erodeElement);

    dilate(thresh, thresh, dilateElement);
    dilate(thresh, thresh, dilateElement);
}

void trackFilteredObject(Object theObject, Mat threshold, Mat &cameraFeed) {
    Mat temp;
    threshold.copyTo(temp);

    //these two vectors needed for output of findContours
    vector< vector<Point> > contours;
    vector<Vec4i> hierarchy;

    //find contours of filtered image using openCV findContours function
    findContours(temp, contours, hierarchy, CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE);

    //use moments method to find our filtered object
    double refArea = 0;

    if (hierarchy.size() > 0) {
        int numObjects = hierarchy.size();

        for (int index = 0; index >= 0; index = hierarchy[index][0]) {

            Moments moment = moments((cv::Mat)contours[index]);
            double area = moment.m00;

            if (area>MIN_OBJECT_AREA) {
                int x = moment.m10 / area;
                int y = moment.m01 / area;

                if (getObjectByCoord(x, y) != NULL)
                    continue;

                theObject.setXPos(x);
                theObject.setYPos(y);

                drawObject(theObject, index, cameraFeed, contours, hierarchy);

                break;
            }
        }
    }
}

int main(int argc, char* argv[])
{
    //Matrix to store each frame of the webcam feed
    Mat cameraFeed;
    Mat threshold;
    Mat HSV;

    namedWindow(windowName, 0);

    // Preparing all the objects to track
    // Color in BGR and HSV -> SV are to 255
    objects.push_back(Object("blue", Scalar(160, 70, 50), Scalar(190, 255, 255), Scalar(255, 252, 123)));

    //video capture object to acquire webcam feed
    VideoCapture capture;

    //open capture object at location zero (default location for webcam)
    capture.open(0);

    //set height and width of capture frame
    capture.set(CV_CAP_PROP_FRAME_WIDTH, FRAME_WIDTH);
    capture.set(CV_CAP_PROP_FRAME_HEIGHT, FRAME_HEIGHT);

    //start an infinite loop where webcam feed is copied to cameraFeed matrix
    //all of our operations will be performed within this loop
    waitKey(1000);

    while (1) {
        capture.read(cameraFeed);

        if (!cameraFeed.data)
        {
            return -1;
        }

        // Track objects
        for (Object &o : objects) {
            cvtColor(cameraFeed, HSV, COLOR_BGR2HSV);
            inRange(HSV, o.getHSVmin(), o.getHSVmax(), threshold);
            morphOps(threshold);
            imshow(threesholdName, threshold);
            trackFilteredObject(o, threshold, cameraFeed);
        }

        imshow(windowName, cameraFeed);

        //delay 30ms so that screen can refresh.
        waitKey(30);
    }
    return 0;
}

我可以在一个颜色范围内获得一个物体的轮廓,但我需要在一个适合Hololens坐标系的3D点上平移2D点。 (我可以使用projectionMatrix等)

你们这里有人可以帮助我吗?

0 个答案:

没有答案