Path: blob/master/AugmentedRealityWithArucoMarkers/augmented_reality_with_aruco.cpp
3092 views
// This code is written by Sunita Nayak at BigVision LLC. It is based on the OpenCV project. It is subject to the license terms in the LICENSE file found in this distribution and at http://opencv.org/license.html12// Usage example: ./augmented_reality_with_aruco.out --image=test.jpg3// ./augmented_reality_with_aruco.out --video=test.mp44#include <fstream>5#include <sstream>6#include <iostream>78#include <opencv2/aruco.hpp>9#include <opencv2/imgproc.hpp>10#include <opencv2/highgui.hpp>11#include <opencv2/calib3d.hpp>1213const char* keys =14"{help h usage ? | | Usage examples: \n\t\t./augmented_reality_with_aruco.out --image=test.jpg \n\t\t./augmented_reality_with_aruco.out --video=test.mp4}"15"{image i |<none>| input image }"16"{video v |<none>| input video }"17;18using namespace cv;19using namespace aruco;20using namespace std;2122int main(int argc, char** argv)23{24CommandLineParser parser(argc, argv, keys);25parser.about("Use this script to do Augmented Reality using Aruco markers in OpenCV.");26if (parser.has("help"))27{28parser.printMessage();29return 0;30}31// Open a video file or an image file or a camera stream.32string str, outputFile;33VideoCapture cap;34VideoWriter video;35Mat frame, blob;3637Mat im_src = imread("new_scenery.jpg");3839try {4041outputFile = "ar_out_cpp.avi";42if (parser.has("image"))43{44// Open the image file45str = parser.get<String>("image");46ifstream ifile(str);47if (!ifile) throw("error");48cap.open(str);49str.replace(str.end()-4, str.end(), "_ar_out_cpp.jpg");50outputFile = str;51}52else if (parser.has("video"))53{54// Open the video file55str = parser.get<String>("video");56ifstream ifile(str);57if (!ifile) throw("error");58cap.open(str);59str.replace(str.end()-4, str.end(), "_ar_out_cpp.avi");60outputFile = str;61}62// Open the webcaom63else cap.open(parser.get<int>("device"));6465}66catch(...) {67cout << "Could not open the input image/video stream" << endl;68return 0;69}7071// Get the video writer initialized to save the output video72if (!parser.has("image")) {73video.open(outputFile, VideoWriter::fourcc('M','J','P','G'), 28, Size(2*cap.get(CAP_PROP_FRAME_WIDTH), cap.get(CAP_PROP_FRAME_HEIGHT)));74}7576// Create a window77static const string kWinName = "Augmented Reality using Aruco markers in OpenCV";78namedWindow(kWinName, WINDOW_NORMAL);7980// Process frames.81while (waitKey(1) < 0)82{83// get frame from the video84cap >> frame;8586try {87// Stop the program if reached end of video88if (frame.empty()) {89cout << "Done processing !!!" << endl;90cout << "Output file is stored as " << outputFile << endl;91waitKey(3000);92break;93}9495vector<int> markerIds;9697// Load the dictionary that was used to generate the markers.98Ptr<Dictionary> dictionary = getPredefinedDictionary(DICT_6X6_250);99100// Declare the vectors that would contain the detected marker corners and the rejected marker candidates101vector<vector<Point2f>> markerCorners, rejectedCandidates;102103// Initialize the detector parameters using default values104Ptr<DetectorParameters> parameters = DetectorParameters::create();105106// Detect the markers in the image107detectMarkers(frame, dictionary, markerCorners, markerIds, parameters, rejectedCandidates);108109// Using the detected markers, locate the quadrilateral on the target frame where the new scene is going to be displayed.110vector<Point> pts_dst;111float scalingFac = 0.02;//0.015;112113Point refPt1, refPt2, refPt3, refPt4;114115// finding top left corner point of the target quadrilateral116std::vector<int>::iterator it = std::find(markerIds.begin(), markerIds.end(), 25);117int index = std::distance(markerIds.begin(), it);118refPt1 = markerCorners.at(index).at(1);119120// finding top right corner point of the target quadrilateral121it = std::find(markerIds.begin(), markerIds.end(), 33);122index = std::distance(markerIds.begin(), it);123refPt2 = markerCorners.at(index).at(2);124125float distance = norm(refPt1-refPt2);126pts_dst.push_back(Point(refPt1.x - round(scalingFac*distance), refPt1.y - round(scalingFac*distance)));127128pts_dst.push_back(Point(refPt2.x + round(scalingFac*distance), refPt2.y - round(scalingFac*distance)));129130// finding bottom right corner point of the target quadrilateral131it = std::find(markerIds.begin(), markerIds.end(), 30);132index = std::distance(markerIds.begin(), it);133refPt3 = markerCorners.at(index).at(0);134pts_dst.push_back(Point(refPt3.x + round(scalingFac*distance), refPt3.y + round(scalingFac*distance)));135136// finding bottom left corner point of the target quadrilateral137it = std::find(markerIds.begin(), markerIds.end(), 23);138index = std::distance(markerIds.begin(), it);139refPt4 = markerCorners.at(index).at(0);140pts_dst.push_back(Point(refPt4.x - round(scalingFac*distance), refPt4.y + round(scalingFac*distance)));141142// Get the corner points of the new scene image.143vector<Point> pts_src;144pts_src.push_back(Point(0,0));145pts_src.push_back(Point(im_src.cols, 0));146pts_src.push_back(Point(im_src.cols, im_src.rows));147pts_src.push_back(Point(0, im_src.rows));148149// Compute homography from source and destination points150Mat h = cv::findHomography(pts_src, pts_dst);151152// Warped image153Mat warpedImage;154155// Warp source image to destination based on homography156warpPerspective(im_src, warpedImage, h, frame.size(), INTER_CUBIC);157158// Prepare a mask representing region to copy from the warped image into the original frame.159Mat mask = Mat::zeros(frame.rows, frame.cols, CV_8UC1);160fillConvexPoly(mask, pts_dst, Scalar(255, 255, 255), LINE_AA);161162// Erode the mask to not copy the boundary effects from the warping163Mat element = getStructuringElement( MORPH_RECT, Size(5,5));164// Mat element = getStructuringElement( MORPH_RECT, Size(3,3));165erode(mask, mask, element);166167// Copy the warped image into the original frame in the mask region.168Mat imOut = frame.clone();169warpedImage.copyTo(imOut, mask);170171// Showing the original image and the new output image side by side172Mat concatenatedOutput;173hconcat(frame, imOut, concatenatedOutput);174175if (parser.has("image")) imwrite(outputFile, concatenatedOutput);176else video.write(concatenatedOutput);177178imshow(kWinName, concatenatedOutput);179180}181catch(const std::exception& e) {182cout << endl << " e : " << e.what() << endl;183cout << "Could not do homography !! " << endl;184// return 0;185}186187}188189cap.release();190if (!parser.has("image")) video.release();191192return 0;193}194195196