添加项目文件。

This commit is contained in:
CaiXiang
2025-01-20 10:30:01 +08:00
parent 77371da5d7
commit 752be79e06
1010 changed files with 610100 additions and 0 deletions

View File

@@ -0,0 +1,212 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2015, Baisheng Lai (laibaisheng@gmail.com), Zhejiang University,
// all rights reserved.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef __OPENCV_MULTICAMERACALIBRATION_HPP__
#define __OPENCV_MULTICAMERACALIBRATION_HPP__
#include "opencv2/ccalib/randpattern.hpp"
#include "opencv2/ccalib/omnidir.hpp"
#include <string>
#include <iostream>
namespace cv { namespace multicalib {
//! @addtogroup ccalib
//! @{
#define HEAD -1
#define INVALID -2
/** @brief Class for multiple camera calibration that supports pinhole camera and omnidirection camera.
For omnidirectional camera model, please refer to omnidir.hpp in ccalib module.
It first calibrate each camera individually, then a bundle adjustment like optimization is applied to
refine extrinsic parameters. So far, it only support "random" pattern for calibration,
see randomPattern.hpp in ccalib module for details.
Images that are used should be named by "cameraIdx-timestamp.*", several images with the same timestamp
means that they are the same pattern that are photographed. cameraIdx should start from 0.
For more details, please refer to paper
B. Li, L. Heng, K. Kevin and M. Pollefeys, "A Multiple-Camera System
Calibration Toolbox Using A Feature Descriptor-Based Calibration
Pattern", in IROS 2013.
*/
class CV_EXPORTS MultiCameraCalibration
{
public:
enum {
PINHOLE,
OMNIDIRECTIONAL
//FISHEYE
};
// an edge connects a camera and pattern
struct edge
{
int cameraVertex; // vertex index for camera in this edge
int photoVertex; // vertex index for pattern in this edge
int photoIndex; // photo index among photos for this camera
Mat transform; // transform from pattern to camera
edge(int cv, int pv, int pi, Mat trans)
{
cameraVertex = cv;
photoVertex = pv;
photoIndex = pi;
transform = trans;
}
};
struct vertex
{
Mat pose; // relative pose to the first camera. For camera vertex, it is the
// transform from the first camera to this camera, for pattern vertex,
// it is the transform from pattern to the first camera
int timestamp; // timestamp of photo, only available for photo vertex
vertex(Mat po, int ts)
{
pose = po;
timestamp = ts;
}
vertex()
{
pose = Mat::eye(4, 4, CV_32F);
timestamp = -1;
}
};
/* @brief Constructor
@param cameraType camera type, PINHOLE or OMNIDIRECTIONAL
@param nCameras number of cameras
@fileName filename of string list that are used for calibration, the file is generated
by imagelist_creator from OpenCv samples. The first one in the list is the pattern filename.
@patternWidth the physical width of pattern, in user defined unit.
@patternHeight the physical height of pattern, in user defined unit.
@showExtration whether show extracted features and feature filtering.
@nMiniMatches minimal number of matched features for a frame.
@flags Calibration flags
@criteria optimization stopping criteria.
@detector feature detector that detect feature points in pattern and images.
@descriptor feature descriptor.
@matcher feature matcher.
*/
MultiCameraCalibration(int cameraType, int nCameras, const std::string& fileName, float patternWidth,
float patternHeight, int verbose = 0, int showExtration = 0, int nMiniMatches = 20, int flags = 0,
TermCriteria criteria = TermCriteria(TermCriteria::COUNT + TermCriteria::EPS, 200, 1e-7),
Ptr<FeatureDetector> detector = AKAZE::create(AKAZE::DESCRIPTOR_MLDB, 0, 3, 0.006f),
Ptr<DescriptorExtractor> descriptor = AKAZE::create(AKAZE::DESCRIPTOR_MLDB,0, 3, 0.006f),
Ptr<DescriptorMatcher> matcher = DescriptorMatcher::create("BruteForce-L1"));
/* @brief load images
*/
void loadImages();
/* @brief initialize multiple camera calibration. It calibrates each camera individually.
*/
void initialize();
/* @brief optimization extrinsic parameters
*/
double optimizeExtrinsics();
/* @brief run multi-camera camera calibration, it runs loadImage(), initialize() and optimizeExtrinsics()
*/
double run();
/* @brief write camera parameters to file.
*/
void writeParameters(const std::string& filename);
private:
std::vector<std::string> readStringList();
int getPhotoVertex(int timestamp);
void graphTraverse(const Mat& G, int begin, std::vector<int>& order, std::vector<int>& pre);
void findRowNonZero(const Mat& row, Mat& idx);
void computeJacobianExtrinsic(const Mat& extrinsicParams, Mat& JTJ_inv, Mat& JTE);
void computePhotoCameraJacobian(const Mat& rvecPhoto, const Mat& tvecPhoto, const Mat& rvecCamera,
const Mat& tvecCamera, Mat& rvecTran, Mat& tvecTran, const Mat& objectPoints, const Mat& imagePoints, const Mat& K,
const Mat& distort, const Mat& xi, Mat& jacobianPhoto, Mat& jacobianCamera, Mat& E);
void compose_motion(InputArray _om1, InputArray _T1, InputArray _om2, InputArray _T2, Mat& om3, Mat& T3, Mat& dom3dom1,
Mat& dom3dT1, Mat& dom3dom2, Mat& dom3dT2, Mat& dT3dom1, Mat& dT3dT1, Mat& dT3dom2, Mat& dT3dT2);
void JRodriguesMatlab(const Mat& src, Mat& dst);
void dAB(InputArray A, InputArray B, OutputArray dABdA, OutputArray dABdB);
double computeProjectError(Mat& parameters);
void vector2parameters(const Mat& parameters, std::vector<Vec3f>& rvecVertex, std::vector<Vec3f>& tvecVertexs);
void parameters2vector(const std::vector<Vec3f>& rvecVertex, const std::vector<Vec3f>& tvecVertex, Mat& parameters);
int _camType; //PINHOLE, FISHEYE or OMNIDIRECTIONAL
int _nCamera;
int _nMiniMatches;
int _flags;
int _verbose;
double _error;
float _patternWidth, _patternHeight;
TermCriteria _criteria;
std::string _filename;
int _showExtraction;
Ptr<FeatureDetector> _detector;
Ptr<DescriptorExtractor> _descriptor;
Ptr<DescriptorMatcher> _matcher;
std::vector<edge> _edgeList;
std::vector<vertex> _vertexList;
std::vector<std::vector<cv::Mat> > _objectPointsForEachCamera;
std::vector<std::vector<cv::Mat> > _imagePointsForEachCamera;
std::vector<cv::Mat> _cameraMatrix;
std::vector<cv::Mat> _distortCoeffs;
std::vector<cv::Mat> _xi;
std::vector<std::vector<Mat> > _omEachCamera, _tEachCamera;
};
//! @}
}} // namespace multicalib, cv
#endif

View File

@@ -0,0 +1,315 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2015, Baisheng Lai (laibaisheng@gmail.com), Zhejiang University,
// all rights reserved.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef __OPENCV_OMNIDIR_HPP__
#define __OPENCV_OMNIDIR_HPP__
#include "opencv2/core.hpp"
#include "opencv2/core/affine.hpp"
#include <vector>
namespace cv
{
namespace omnidir
{
//! @addtogroup ccalib
//! @{
enum {
CALIB_USE_GUESS = 1,
CALIB_FIX_SKEW = 2,
CALIB_FIX_K1 = 4,
CALIB_FIX_K2 = 8,
CALIB_FIX_P1 = 16,
CALIB_FIX_P2 = 32,
CALIB_FIX_XI = 64,
CALIB_FIX_GAMMA = 128,
CALIB_FIX_CENTER = 256
};
enum{
RECTIFY_PERSPECTIVE = 1,
RECTIFY_CYLINDRICAL = 2,
RECTIFY_LONGLATI = 3,
RECTIFY_STEREOGRAPHIC = 4
};
enum{
XYZRGB = 1,
XYZ = 2
};
/**
* This module was accepted as a GSoC 2015 project for OpenCV, authored by
* Baisheng Lai, mentored by Bo Li.
*/
/** @brief Projects points for omnidirectional camera using CMei's model
@param objectPoints Object points in world coordinate, vector of vector of Vec3f or Mat of
1xN/Nx1 3-channel of type CV_32F and N is the number of points. 64F is also acceptable.
@param imagePoints Output array of image points, vector of vector of Vec2f or
1xN/Nx1 2-channel of type CV_32F. 64F is also acceptable.
@param rvec vector of rotation between world coordinate and camera coordinate, i.e., om
@param tvec vector of translation between pattern coordinate and camera coordinate
@param K Camera matrix \f$K = \vecthreethree{f_x}{s}{c_x}{0}{f_y}{c_y}{0}{0}{_1}\f$.
@param D Input vector of distortion coefficients \f$(k_1, k_2, p_1, p_2)\f$.
@param xi The parameter xi for CMei's model
@param jacobian Optional output 2Nx16 of type CV_64F jacobian matrix, contains the derivatives of
image pixel points wrt parameters including \f$om, T, f_x, f_y, s, c_x, c_y, xi, k_1, k_2, p_1, p_2\f$.
This matrix will be used in calibration by optimization.
The function projects object 3D points of world coordinate to image pixels, parameter by intrinsic
and extrinsic parameters. Also, it optionally compute a by-product: the jacobian matrix containing
contains the derivatives of image pixel points wrt intrinsic and extrinsic parameters.
*/
CV_EXPORTS_W void projectPoints(InputArray objectPoints, OutputArray imagePoints, InputArray rvec, InputArray tvec,
InputArray K, double xi, InputArray D, OutputArray jacobian = noArray());
/** @overload */
CV_EXPORTS void projectPoints(InputArray objectPoints, OutputArray imagePoints, const Affine3d& affine,
InputArray K, double xi, InputArray D, OutputArray jacobian = noArray());
/** @brief Undistort 2D image points for omnidirectional camera using CMei's model
@param distorted Array of distorted image points, vector of Vec2f
or 1xN/Nx1 2-channel Mat of type CV_32F, 64F depth is also acceptable
@param K Camera matrix \f$K = \vecthreethree{f_x}{s}{c_x}{0}{f_y}{c_y}{0}{0}{_1}\f$.
@param D Distortion coefficients \f$(k_1, k_2, p_1, p_2)\f$.
@param xi The parameter xi for CMei's model
@param R Rotation trainsform between the original and object space : 3x3 1-channel, or vector: 3x1/1x3
1-channel or 1x1 3-channel
@param undistorted array of normalized object points, vector of Vec2f/Vec2d or 1xN/Nx1 2-channel Mat with the same
depth of distorted points.
*/
CV_EXPORTS_W void undistortPoints(InputArray distorted, OutputArray undistorted, InputArray K, InputArray D, InputArray xi, InputArray R);
/** @brief Computes undistortion and rectification maps for omnidirectional camera image transform by a rotation R.
It output two maps that are used for cv::remap(). If D is empty then zero distortion is used,
if R or P is empty then identity matrices are used.
@param K Camera matrix \f$K = \vecthreethree{f_x}{s}{c_x}{0}{f_y}{c_y}{0}{0}{_1}\f$, with depth CV_32F or CV_64F
@param D Input vector of distortion coefficients \f$(k_1, k_2, p_1, p_2)\f$, with depth CV_32F or CV_64F
@param xi The parameter xi for CMei's model
@param R Rotation transform between the original and object space : 3x3 1-channel, or vector: 3x1/1x3, with depth CV_32F or CV_64F
@param P New camera matrix (3x3) or new projection matrix (3x4)
@param size Undistorted image size.
@param m1type Type of the first output map that can be CV_32FC1 or CV_16SC2 . See convertMaps()
for details.
@param map1 The first output map.
@param map2 The second output map.
@param flags Flags indicates the rectification type, RECTIFY_PERSPECTIVE, RECTIFY_CYLINDRICAL, RECTIFY_LONGLATI and RECTIFY_STEREOGRAPHIC
are supported.
*/
CV_EXPORTS_W void initUndistortRectifyMap(InputArray K, InputArray D, InputArray xi, InputArray R, InputArray P, const cv::Size& size,
int m1type, OutputArray map1, OutputArray map2, int flags);
/** @brief Undistort omnidirectional images to perspective images
@param distorted The input omnidirectional image.
@param undistorted The output undistorted image.
@param K Camera matrix \f$K = \vecthreethree{f_x}{s}{c_x}{0}{f_y}{c_y}{0}{0}{_1}\f$.
@param D Input vector of distortion coefficients \f$(k_1, k_2, p_1, p_2)\f$.
@param xi The parameter xi for CMei's model.
@param flags Flags indicates the rectification type, RECTIFY_PERSPECTIVE, RECTIFY_CYLINDRICAL, RECTIFY_LONGLATI and RECTIFY_STEREOGRAPHIC
@param Knew Camera matrix of the distorted image. If it is not assigned, it is just K.
@param new_size The new image size. By default, it is the size of distorted.
@param R Rotation matrix between the input and output images. By default, it is identity matrix.
*/
CV_EXPORTS_W void undistortImage(InputArray distorted, OutputArray undistorted, InputArray K, InputArray D, InputArray xi, int flags,
InputArray Knew = cv::noArray(), const Size& new_size = Size(), InputArray R = Mat::eye(3, 3, CV_64F));
/** @brief Perform omnidirectional camera calibration, the default depth of outputs is CV_64F.
@param objectPoints Vector of vector of Vec3f object points in world (pattern) coordinate.
It also can be vector of Mat with size 1xN/Nx1 and type CV_32FC3. Data with depth of 64_F is also acceptable.
@param imagePoints Vector of vector of Vec2f corresponding image points of objectPoints. It must be the same
size and the same type with objectPoints.
@param size Image size of calibration images.
@param K Output calibrated camera matrix.
@param xi Output parameter xi for CMei's model
@param D Output distortion parameters \f$(k_1, k_2, p_1, p_2)\f$
@param rvecs Output rotations for each calibration images
@param tvecs Output translation for each calibration images
@param flags The flags that control calibrate
@param criteria Termination criteria for optimization
@param idx Indices of images that pass initialization, which are really used in calibration. So the size of rvecs is the
same as idx.total().
*/
CV_EXPORTS_W double calibrate(InputArrayOfArrays objectPoints, InputArrayOfArrays imagePoints, Size size,
InputOutputArray K, InputOutputArray xi, InputOutputArray D, OutputArrayOfArrays rvecs, OutputArrayOfArrays tvecs,
int flags, TermCriteria criteria, OutputArray idx=noArray());
/** @brief Stereo calibration for omnidirectional camera model. It computes the intrinsic parameters for two
cameras and the extrinsic parameters between two cameras. The default depth of outputs is CV_64F.
@param objectPoints Object points in world (pattern) coordinate. Its type is vector<vector<Vec3f> >.
It also can be vector of Mat with size 1xN/Nx1 and type CV_32FC3. Data with depth of 64_F is also acceptable.
@param imagePoints1 The corresponding image points of the first camera, with type vector<vector<Vec2f> >.
It must be the same size and the same type as objectPoints.
@param imagePoints2 The corresponding image points of the second camera, with type vector<vector<Vec2f> >.
It must be the same size and the same type as objectPoints.
@param imageSize1 Image size of calibration images of the first camera.
@param imageSize2 Image size of calibration images of the second camera.
@param K1 Output camera matrix for the first camera.
@param xi1 Output parameter xi of Mei's model for the first camera
@param D1 Output distortion parameters \f$(k_1, k_2, p_1, p_2)\f$ for the first camera
@param K2 Output camera matrix for the first camera.
@param xi2 Output parameter xi of CMei's model for the second camera
@param D2 Output distortion parameters \f$(k_1, k_2, p_1, p_2)\f$ for the second camera
@param rvec Output rotation between the first and second camera
@param tvec Output translation between the first and second camera
@param rvecsL Output rotation for each image of the first camera
@param tvecsL Output translation for each image of the first camera
@param flags The flags that control stereoCalibrate
@param criteria Termination criteria for optimization
@param idx Indices of image pairs that pass initialization, which are really used in calibration. So the size of rvecs is the
same as idx.total().
@
*/
CV_EXPORTS_W double stereoCalibrate(InputOutputArrayOfArrays objectPoints, InputOutputArrayOfArrays imagePoints1, InputOutputArrayOfArrays imagePoints2,
const Size& imageSize1, const Size& imageSize2, InputOutputArray K1, InputOutputArray xi1, InputOutputArray D1, InputOutputArray K2, InputOutputArray xi2,
InputOutputArray D2, OutputArray rvec, OutputArray tvec, OutputArrayOfArrays rvecsL, OutputArrayOfArrays tvecsL, int flags, TermCriteria criteria, OutputArray idx=noArray());
/** @brief Stereo rectification for omnidirectional camera model. It computes the rectification rotations for two cameras
@param R Rotation between the first and second camera
@param T Translation between the first and second camera
@param R1 Output 3x3 rotation matrix for the first camera
@param R2 Output 3x3 rotation matrix for the second camera
*/
CV_EXPORTS_W void stereoRectify(InputArray R, InputArray T, OutputArray R1, OutputArray R2);
/** @brief Stereo 3D reconstruction from a pair of images
@param image1 The first input image
@param image2 The second input image
@param K1 Input camera matrix of the first camera
@param D1 Input distortion parameters \f$(k_1, k_2, p_1, p_2)\f$ for the first camera
@param xi1 Input parameter xi for the first camera for CMei's model
@param K2 Input camera matrix of the second camera
@param D2 Input distortion parameters \f$(k_1, k_2, p_1, p_2)\f$ for the second camera
@param xi2 Input parameter xi for the second camera for CMei's model
@param R Rotation between the first and second camera
@param T Translation between the first and second camera
@param flag Flag of rectification type, RECTIFY_PERSPECTIVE or RECTIFY_LONGLATI
@param numDisparities The parameter 'numDisparities' in StereoSGBM, see StereoSGBM for details.
@param SADWindowSize The parameter 'SADWindowSize' in StereoSGBM, see StereoSGBM for details.
@param disparity Disparity map generated by stereo matching
@param image1Rec Rectified image of the first image
@param image2Rec rectified image of the second image
@param newSize Image size of rectified image, see omnidir::undistortImage
@param Knew New camera matrix of rectified image, see omnidir::undistortImage
@param pointCloud Point cloud of 3D reconstruction, with type CV_64FC3
@param pointType Point cloud type, it can be XYZRGB or XYZ
*/
CV_EXPORTS_W void stereoReconstruct(InputArray image1, InputArray image2, InputArray K1, InputArray D1, InputArray xi1,
InputArray K2, InputArray D2, InputArray xi2, InputArray R, InputArray T, int flag, int numDisparities, int SADWindowSize,
OutputArray disparity, OutputArray image1Rec, OutputArray image2Rec, const Size& newSize = Size(), InputArray Knew = cv::noArray(),
OutputArray pointCloud = cv::noArray(), int pointType = XYZRGB);
namespace internal
{
void initializeCalibration(InputArrayOfArrays objectPoints, InputArrayOfArrays imagePoints, Size size, OutputArrayOfArrays omAll,
OutputArrayOfArrays tAll, OutputArray K, double& xi, OutputArray idx = noArray());
void initializeStereoCalibration(InputArrayOfArrays objectPoints, InputArrayOfArrays imagePoints1, InputArrayOfArrays imagePoints2,
const Size& size1, const Size& size2, OutputArray om, OutputArray T, OutputArrayOfArrays omL, OutputArrayOfArrays tL, OutputArray K1, OutputArray D1, OutputArray K2, OutputArray D2,
double &xi1, double &xi2, int flags, OutputArray idx);
void computeJacobian(InputArrayOfArrays objectPoints, InputArrayOfArrays imagePoints, InputArray parameters, Mat& JTJ_inv, Mat& JTE, int flags,
double epsilon);
void computeJacobianStereo(InputArrayOfArrays objectPoints, InputArrayOfArrays imagePoints1, InputArrayOfArrays imagePoints2,
InputArray parameters, Mat& JTJ_inv, Mat& JTE, int flags, double epsilon);
void encodeParameters(InputArray K, InputArrayOfArrays omAll, InputArrayOfArrays tAll, InputArray distoaration, double xi, OutputArray parameters);
void encodeParametersStereo(InputArray K1, InputArray K2, InputArray om, InputArray T, InputArrayOfArrays omL, InputArrayOfArrays tL,
InputArray D1, InputArray D2, double xi1, double xi2, OutputArray parameters);
void decodeParameters(InputArray paramsters, OutputArray K, OutputArrayOfArrays omAll, OutputArrayOfArrays tAll, OutputArray distoration, double& xi);
void decodeParametersStereo(InputArray parameters, OutputArray K1, OutputArray K2, OutputArray om, OutputArray T, OutputArrayOfArrays omL,
OutputArrayOfArrays tL, OutputArray D1, OutputArray D2, double& xi1, double& xi2);
void estimateUncertainties(InputArrayOfArrays objectPoints, InputArrayOfArrays imagePoints, InputArray parameters, Mat& errors, Vec2d& std_error, double& rms, int flags);
void estimateUncertaintiesStereo(InputArrayOfArrays objectPoints, InputArrayOfArrays imagePoints1, InputArrayOfArrays imagePoints2, InputArray parameters, Mat& errors,
Vec2d& std_error, double& rms, int flags);
double computeMeanReproErr(InputArrayOfArrays imagePoints, InputArrayOfArrays proImagePoints);
double computeMeanReproErr(InputArrayOfArrays objectPoints, InputArrayOfArrays imagePoints, InputArray K, InputArray D, double xi, InputArrayOfArrays omAll,
InputArrayOfArrays tAll);
double computeMeanReproErrStereo(InputArrayOfArrays objectPoints, InputArrayOfArrays imagePoints1, InputArrayOfArrays imagePoints2, InputArray K1, InputArray K2,
InputArray D1, InputArray D2, double xi1, double xi2, InputArray om, InputArray T, InputArrayOfArrays omL, InputArrayOfArrays TL);
void subMatrix(const Mat& src, Mat& dst, const std::vector<int>& cols, const std::vector<int>& rows);
void flags2idx(int flags, std::vector<int>& idx, int n);
void flags2idxStereo(int flags, std::vector<int>& idx, int n);
void fillFixed(Mat&G, int flags, int n);
void fillFixedStereo(Mat& G, int flags, int n);
double findMedian(const Mat& row);
Vec3d findMedian3(InputArray mat);
void getInterset(InputArray idx1, InputArray idx2, OutputArray inter1, OutputArray inter2, OutputArray inter_ori);
void compose_motion(InputArray _om1, InputArray _T1, InputArray _om2, InputArray _T2, Mat& om3, Mat& T3, Mat& dom3dom1,
Mat& dom3dT1, Mat& dom3dom2, Mat& dom3dT2, Mat& dT3dom1, Mat& dT3dT1, Mat& dT3dom2, Mat& dT3dT2);
//void JRodriguesMatlab(const Mat& src, Mat& dst);
//void dAB(InputArray A, InputArray B, OutputArray dABdA, OutputArray dABdB);
} // internal
//! @}
} // omnidir
} //cv
#endif

View File

@@ -0,0 +1,184 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2015, Baisheng Lai (laibaisheng@gmail.com), Zhejiang University,
// all rights reserved.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef __OPENCV_RANDOMPATTERN_HPP__
#define __OPENCV_RANDOMPATTERN_HPP__
#include "opencv2/features2d.hpp"
#include "opencv2/highgui.hpp"
namespace cv { namespace randpattern {
//! @addtogroup ccalib
//! @{
/** @brief Class for finding features points and corresponding 3D in world coordinate of
a "random" pattern, which can be to be used in calibration. It is useful when pattern is
partly occluded or only a part of pattern can be observed in multiple cameras calibration.
The pattern can be generated by RandomPatternGenerator class described in this file.
Please refer to paper
B. Li, L. Heng, K. Kevin and M. Pollefeys, "A Multiple-Camera System
Calibration Toolbox Using A Feature Descriptor-Based Calibration
Pattern", in IROS 2013.
*/
class CV_EXPORTS RandomPatternCornerFinder
{
public:
/* @brief Construct RandomPatternCornerFinder object
@param patternWidth the real width of "random" pattern in a user defined unit.
@param patternHeight the real height of "random" pattern in a user defined unit.
@param nMiniMatch number of minimal matches, otherwise that image is abandoned
@depth depth of output objectPoints and imagePoints, set it to be CV_32F or CV_64F.
@showExtraction whether show feature extraction, 0 for no and 1 for yes.
@detector feature detector to detect feature points in pattern and images.
@descriptor feature descriptor.
@matcher feature matcher.
*/
RandomPatternCornerFinder(float patternWidth, float patternHeight,
int nminiMatch = 20, int depth = CV_32F, int verbose = 0, int showExtraction = 0,
Ptr<FeatureDetector> detector = AKAZE::create(AKAZE::DESCRIPTOR_MLDB, 0, 3, 0.005f),
Ptr<DescriptorExtractor> descriptor = AKAZE::create(AKAZE::DESCRIPTOR_MLDB,0, 3, 0.005f),
Ptr<DescriptorMatcher> matcher = DescriptorMatcher::create("BruteForce-L1"));
/* @brief Load pattern image and compute features for pattern
@param patternImage image for "random" pattern generated by RandomPatternGenerator, run it first.
*/
void loadPattern(const cv::Mat &patternImage);
/* @brief Load pattern and features
@param patternImage image for "random" pattern generated by RandomPatternGenerator, run it first.
@param patternKeyPoints keyPoints created from a FeatureDetector.
@param patternDescriptors descriptors created from a DescriptorExtractor.
*/
void loadPattern(const cv::Mat &patternImage, const std::vector<cv::KeyPoint> &patternKeyPoints, const cv::Mat &patternDescriptors);
/* @brief Compute matched object points and image points which are used for calibration
The objectPoints (3D) and imagePoints (2D) are stored inside the class. Run getObjectPoints()
and getImagePoints() to get them.
@param inputImages vector of 8-bit grayscale images containing "random" pattern
that are used for calibration.
*/
void computeObjectImagePoints(std::vector<cv::Mat> inputImages);
//void computeObjectImagePoints2(std::vector<cv::Mat> inputImages);
/* @brief Compute object and image points for a single image. It returns a vector<Mat> that
the first element stores the imagePoints and the second one stores the objectPoints.
@param inputImage single input image for calibration
*/
std::vector<cv::Mat> computeObjectImagePointsForSingle(cv::Mat inputImage);
/* @brief Get object(3D) points
*/
const std::vector<cv::Mat> &getObjectPoints();
/* @brief and image(2D) points
*/
const std::vector<cv::Mat> &getImagePoints();
private:
std::vector<cv::Mat> _objectPonits, _imagePoints;
float _patternWidth, _patternHeight;
cv::Size _patternImageSize;
int _nminiMatch;
int _depth;
int _verbose;
Ptr<FeatureDetector> _detector;
Ptr<DescriptorExtractor> _descriptor;
Ptr<DescriptorMatcher> _matcher;
Mat _descriptorPattern;
std::vector<cv::KeyPoint> _keypointsPattern;
Mat _patternImage;
int _showExtraction;
void keyPoints2MatchedLocation(const std::vector<cv::KeyPoint>& imageKeypoints,
const std::vector<cv::KeyPoint>& patternKeypoints, const std::vector<cv::DMatch> matchces,
cv::Mat& matchedImagelocation, cv::Mat& matchedPatternLocation);
void getFilteredLocation(cv::Mat& imageKeypoints, cv::Mat& patternKeypoints, const cv::Mat mask);
void getObjectImagePoints(const cv::Mat& imageKeypoints, const cv::Mat& patternKeypoints);
void crossCheckMatching( cv::Ptr<DescriptorMatcher>& descriptorMatcher,
const Mat& descriptors1, const Mat& descriptors2,
std::vector<DMatch>& filteredMatches12, int knn=1 );
void drawCorrespondence(const Mat& image1, const std::vector<cv::KeyPoint> keypoint1,
const Mat& image2, const std::vector<cv::KeyPoint> keypoint2, const std::vector<cv::DMatch> matchces,
const Mat& mask1, const Mat& mask2, const int step);
};
/* @brief Class to generate "random" pattern image that are used for RandomPatternCornerFinder
Please refer to paper
B. Li, L. Heng, K. Kevin and M. Pollefeys, "A Multiple-Camera System
Calibration Toolbox Using A Feature Descriptor-Based Calibration
Pattern", in IROS 2013.
*/
class CV_EXPORTS RandomPatternGenerator
{
public:
/* @brief Construct RandomPatternGenerator
@param imageWidth image width of the generated pattern image
@param imageHeight image height of the generated pattern image
*/
RandomPatternGenerator(int imageWidth, int imageHeight);
/* @brief Generate pattern
*/
void generatePattern();
/* @brief Get pattern
*/
cv::Mat getPattern();
private:
cv::Mat _pattern;
int _imageWidth, _imageHeight;
};
//! @}
}} //namespace randpattern, cv
#endif