添加项目文件。

This commit is contained in:
CaiXiang
2025-06-09 09:09:25 +08:00
parent 75b909652e
commit 88acb23465
1054 changed files with 615623 additions and 0 deletions

BIN
3rdparty/opencv/dll/opencv_aruco454.dll vendored Normal file

Binary file not shown.

Binary file not shown.

BIN
3rdparty/opencv/dll/opencv_bgsegm454.dll vendored Normal file

Binary file not shown.

Binary file not shown.

Binary file not shown.

BIN
3rdparty/opencv/dll/opencv_ccalib454.dll vendored Normal file

Binary file not shown.

BIN
3rdparty/opencv/dll/opencv_core454.dll vendored Normal file

Binary file not shown.

Binary file not shown.

BIN
3rdparty/opencv/dll/opencv_dnn454.dll vendored Normal file

Binary file not shown.

Binary file not shown.

Binary file not shown.

BIN
3rdparty/opencv/dll/opencv_dpm454.dll vendored Normal file

Binary file not shown.

BIN
3rdparty/opencv/dll/opencv_face454.dll vendored Normal file

Binary file not shown.

Binary file not shown.

BIN
3rdparty/opencv/dll/opencv_flann454.dll vendored Normal file

Binary file not shown.

BIN
3rdparty/opencv/dll/opencv_fuzzy454.dll vendored Normal file

Binary file not shown.

BIN
3rdparty/opencv/dll/opencv_gapi454.dll vendored Normal file

Binary file not shown.

BIN
3rdparty/opencv/dll/opencv_hfs454.dll vendored Normal file

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

BIN
3rdparty/opencv/dll/opencv_mcc454.dll vendored Normal file

Binary file not shown.

BIN
3rdparty/opencv/dll/opencv_ml454.dll vendored Normal file

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

BIN
3rdparty/opencv/dll/opencv_photo454.dll vendored Normal file

Binary file not shown.

BIN
3rdparty/opencv/dll/opencv_plot454.dll vendored Normal file

Binary file not shown.

Binary file not shown.

BIN
3rdparty/opencv/dll/opencv_rapid454.dll vendored Normal file

Binary file not shown.

BIN
3rdparty/opencv/dll/opencv_reg454.dll vendored Normal file

Binary file not shown.

BIN
3rdparty/opencv/dll/opencv_rgbd454.dll vendored Normal file

Binary file not shown.

Binary file not shown.

BIN
3rdparty/opencv/dll/opencv_shape454.dll vendored Normal file

Binary file not shown.

BIN
3rdparty/opencv/dll/opencv_stereo454.dll vendored Normal file

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

BIN
3rdparty/opencv/dll/opencv_text454.dll vendored Normal file

Binary file not shown.

Binary file not shown.

BIN
3rdparty/opencv/dll/opencv_video454.dll vendored Normal file

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

BIN
3rdparty/opencv/dll/opencv_xphoto454.dll vendored Normal file

Binary file not shown.

616
3rdparty/opencv/inc/opencv2/aruco.hpp vendored Normal file
View File

@@ -0,0 +1,616 @@
/*
By downloading, copying, installing or using the software you agree to this
license. If you do not agree to this license, do not download, install,
copy or use the software.
License Agreement
For Open Source Computer Vision Library
(3-clause BSD License)
Copyright (C) 2013, OpenCV Foundation, all rights reserved.
Third party copyrights are property of their respective owners.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the names of the copyright holders nor the names of the contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
This software is provided by the copyright holders and contributors "as is" and
any express or implied warranties, including, but not limited to, the implied
warranties of merchantability and fitness for a particular purpose are
disclaimed. In no event shall copyright holders or contributors be liable for
any direct, indirect, incidental, special, exemplary, or consequential damages
(including, but not limited to, procurement of substitute goods or services;
loss of use, data, or profits; or business interruption) however caused
and on any theory of liability, whether in contract, strict liability,
or tort (including negligence or otherwise) arising in any way out of
the use of this software, even if advised of the possibility of such damage.
*/
#ifndef __OPENCV_ARUCO_HPP__
#define __OPENCV_ARUCO_HPP__
#include <opencv2/core.hpp>
#include <vector>
#include "opencv2/aruco/dictionary.hpp"
/**
* @defgroup aruco ArUco Marker Detection
* This module is dedicated to square fiducial markers (also known as Augmented Reality Markers)
* These markers are useful for easy, fast and robust camera pose estimation.ç
*
* The main functionalities are:
* - Detection of markers in an image
* - Pose estimation from a single marker or from a board/set of markers
* - Detection of ChArUco board for high subpixel accuracy
* - Camera calibration from both, ArUco boards and ChArUco boards.
* - Detection of ChArUco diamond markers
* The samples directory includes easy examples of how to use the module.
*
* The implementation is based on the ArUco Library by R. Muñoz-Salinas and S. Garrido-Jurado @cite Aruco2014.
*
* Markers can also be detected based on the AprilTag 2 @cite wang2016iros fiducial detection method.
*
* @sa S. Garrido-Jurado, R. Muñoz-Salinas, F. J. Madrid-Cuevas, and M. J. Marín-Jiménez. 2014.
* "Automatic generation and detection of highly reliable fiducial markers under occlusion".
* Pattern Recogn. 47, 6 (June 2014), 2280-2292. DOI=10.1016/j.patcog.2014.01.005
*
* @sa http://www.uco.es/investiga/grupos/ava/node/26
*
* This module has been originally developed by Sergio Garrido-Jurado as a project
* for Google Summer of Code 2015 (GSoC 15).
*
*
*/
namespace cv {
namespace aruco {
//! @addtogroup aruco
//! @{
enum CornerRefineMethod{
CORNER_REFINE_NONE, ///< Tag and corners detection based on the ArUco approach
CORNER_REFINE_SUBPIX, ///< ArUco approach and refine the corners locations using corner subpixel accuracy
CORNER_REFINE_CONTOUR, ///< ArUco approach and refine the corners locations using the contour-points line fitting
CORNER_REFINE_APRILTAG, ///< Tag and corners detection based on the AprilTag 2 approach @cite wang2016iros
};
/**
* @brief Parameters for the detectMarker process:
* - adaptiveThreshWinSizeMin: minimum window size for adaptive thresholding before finding
* contours (default 3).
* - adaptiveThreshWinSizeMax: maximum window size for adaptive thresholding before finding
* contours (default 23).
* - adaptiveThreshWinSizeStep: increments from adaptiveThreshWinSizeMin to adaptiveThreshWinSizeMax
* during the thresholding (default 10).
* - adaptiveThreshConstant: constant for adaptive thresholding before finding contours (default 7)
* - minMarkerPerimeterRate: determine minimum perimeter for marker contour to be detected. This
* is defined as a rate respect to the maximum dimension of the input image (default 0.03).
* - maxMarkerPerimeterRate: determine maximum perimeter for marker contour to be detected. This
* is defined as a rate respect to the maximum dimension of the input image (default 4.0).
* - polygonalApproxAccuracyRate: minimum accuracy during the polygonal approximation process to
* determine which contours are squares. (default 0.03)
* - minCornerDistanceRate: minimum distance between corners for detected markers relative to its
* perimeter (default 0.05)
* - minDistanceToBorder: minimum distance of any corner to the image border for detected markers
* (in pixels) (default 3)
* - minMarkerDistanceRate: minimum mean distance beetween two marker corners to be considered
* similar, so that the smaller one is removed. The rate is relative to the smaller perimeter
* of the two markers (default 0.05).
* - cornerRefinementMethod: corner refinement method. (CORNER_REFINE_NONE, no refinement.
* CORNER_REFINE_SUBPIX, do subpixel refinement. CORNER_REFINE_CONTOUR use contour-Points,
* CORNER_REFINE_APRILTAG use the AprilTag2 approach). (default CORNER_REFINE_NONE)
* - cornerRefinementWinSize: window size for the corner refinement process (in pixels) (default 5).
* - cornerRefinementMaxIterations: maximum number of iterations for stop criteria of the corner
* refinement process (default 30).
* - cornerRefinementMinAccuracy: minimum error for the stop cristeria of the corner refinement
* process (default: 0.1)
* - markerBorderBits: number of bits of the marker border, i.e. marker border width (default 1).
* - perspectiveRemovePixelPerCell: number of bits (per dimension) for each cell of the marker
* when removing the perspective (default 4).
* - perspectiveRemoveIgnoredMarginPerCell: width of the margin of pixels on each cell not
* considered for the determination of the cell bit. Represents the rate respect to the total
* size of the cell, i.e. perspectiveRemovePixelPerCell (default 0.13)
* - maxErroneousBitsInBorderRate: maximum number of accepted erroneous bits in the border (i.e.
* number of allowed white bits in the border). Represented as a rate respect to the total
* number of bits per marker (default 0.35).
* - minOtsuStdDev: minimun standard deviation in pixels values during the decodification step to
* apply Otsu thresholding (otherwise, all the bits are set to 0 or 1 depending on mean higher
* than 128 or not) (default 5.0)
* - errorCorrectionRate error correction rate respect to the maximun error correction capability
* for each dictionary. (default 0.6).
* - aprilTagMinClusterPixels: reject quads containing too few pixels. (default 5)
* - aprilTagMaxNmaxima: how many corner candidates to consider when segmenting a group of pixels into a quad. (default 10)
* - aprilTagCriticalRad: Reject quads where pairs of edges have angles that are close to straight or close to
* 180 degrees. Zero means that no quads are rejected. (In radians) (default 10*PI/180)
* - aprilTagMaxLineFitMse: When fitting lines to the contours, what is the maximum mean squared error
* allowed? This is useful in rejecting contours that are far from being quad shaped; rejecting
* these quads "early" saves expensive decoding processing. (default 10.0)
* - aprilTagMinWhiteBlackDiff: When we build our model of black & white pixels, we add an extra check that
* the white model must be (overall) brighter than the black model. How much brighter? (in pixel values, [0,255]). (default 5)
* - aprilTagDeglitch: should the thresholded image be deglitched? Only useful for very noisy images. (default 0)
* - aprilTagQuadDecimate: Detection of quads can be done on a lower-resolution image, improving speed at a
* cost of pose accuracy and a slight decrease in detection rate. Decoding the binary payload is still
* done at full resolution. (default 0.0)
* - aprilTagQuadSigma: What Gaussian blur should be applied to the segmented image (used for quad detection?)
* Parameter is the standard deviation in pixels. Very noisy images benefit from non-zero values (e.g. 0.8). (default 0.0)
* - detectInvertedMarker: to check if there is a white marker. In order to generate a "white" marker just
* invert a normal marker by using a tilde, ~markerImage. (default false)
*/
struct CV_EXPORTS_W DetectorParameters {
DetectorParameters();
CV_WRAP static Ptr<DetectorParameters> create();
CV_PROP_RW int adaptiveThreshWinSizeMin;
CV_PROP_RW int adaptiveThreshWinSizeMax;
CV_PROP_RW int adaptiveThreshWinSizeStep;
CV_PROP_RW double adaptiveThreshConstant;
CV_PROP_RW double minMarkerPerimeterRate;
CV_PROP_RW double maxMarkerPerimeterRate;
CV_PROP_RW double polygonalApproxAccuracyRate;
CV_PROP_RW double minCornerDistanceRate;
CV_PROP_RW int minDistanceToBorder;
CV_PROP_RW double minMarkerDistanceRate;
CV_PROP_RW int cornerRefinementMethod;
CV_PROP_RW int cornerRefinementWinSize;
CV_PROP_RW int cornerRefinementMaxIterations;
CV_PROP_RW double cornerRefinementMinAccuracy;
CV_PROP_RW int markerBorderBits;
CV_PROP_RW int perspectiveRemovePixelPerCell;
CV_PROP_RW double perspectiveRemoveIgnoredMarginPerCell;
CV_PROP_RW double maxErroneousBitsInBorderRate;
CV_PROP_RW double minOtsuStdDev;
CV_PROP_RW double errorCorrectionRate;
// April :: User-configurable parameters.
CV_PROP_RW float aprilTagQuadDecimate;
CV_PROP_RW float aprilTagQuadSigma;
// April :: Internal variables
CV_PROP_RW int aprilTagMinClusterPixels;
CV_PROP_RW int aprilTagMaxNmaxima;
CV_PROP_RW float aprilTagCriticalRad;
CV_PROP_RW float aprilTagMaxLineFitMse;
CV_PROP_RW int aprilTagMinWhiteBlackDiff;
CV_PROP_RW int aprilTagDeglitch;
// to detect white (inverted) markers
CV_PROP_RW bool detectInvertedMarker;
};
/**
* @brief Basic marker detection
*
* @param image input image
* @param dictionary indicates the type of markers that will be searched
* @param corners vector of detected marker corners. For each marker, its four corners
* are provided, (e.g std::vector<std::vector<cv::Point2f> > ). For N detected markers,
* the dimensions of this array is Nx4. The order of the corners is clockwise.
* @param ids vector of identifiers of the detected markers. The identifier is of type int
* (e.g. std::vector<int>). For N detected markers, the size of ids is also N.
* The identifiers have the same order than the markers in the imgPoints array.
* @param parameters marker detection parameters
* @param rejectedImgPoints contains the imgPoints of those squares whose inner code has not a
* correct codification. Useful for debugging purposes.
* @param cameraMatrix optional input 3x3 floating-point camera matrix
* \f$A = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\f$
* @param distCoeff optional vector of distortion coefficients
* \f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6],[s_1, s_2, s_3, s_4]])\f$ of 4, 5, 8 or 12 elements
*
* Performs marker detection in the input image. Only markers included in the specific dictionary
* are searched. For each detected marker, it returns the 2D position of its corner in the image
* and its corresponding identifier.
* Note that this function does not perform pose estimation.
* @sa estimatePoseSingleMarkers, estimatePoseBoard
*
*/
CV_EXPORTS_W void detectMarkers(InputArray image, const Ptr<Dictionary> &dictionary, OutputArrayOfArrays corners,
OutputArray ids, const Ptr<DetectorParameters> &parameters = DetectorParameters::create(),
OutputArrayOfArrays rejectedImgPoints = noArray(), InputArray cameraMatrix= noArray(), InputArray distCoeff= noArray());
/**
* @brief Pose estimation for single markers
*
* @param corners vector of already detected markers corners. For each marker, its four corners
* are provided, (e.g std::vector<std::vector<cv::Point2f> > ). For N detected markers,
* the dimensions of this array should be Nx4. The order of the corners should be clockwise.
* @sa detectMarkers
* @param markerLength the length of the markers' side. The returning translation vectors will
* be in the same unit. Normally, unit is meters.
* @param cameraMatrix input 3x3 floating-point camera matrix
* \f$A = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\f$
* @param distCoeffs vector of distortion coefficients
* \f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6],[s_1, s_2, s_3, s_4]])\f$ of 4, 5, 8 or 12 elements
* @param rvecs array of output rotation vectors (@sa Rodrigues) (e.g. std::vector<cv::Vec3d>).
* Each element in rvecs corresponds to the specific marker in imgPoints.
* @param tvecs array of output translation vectors (e.g. std::vector<cv::Vec3d>).
* Each element in tvecs corresponds to the specific marker in imgPoints.
* @param _objPoints array of object points of all the marker corners
*
* This function receives the detected markers and returns their pose estimation respect to
* the camera individually. So for each marker, one rotation and translation vector is returned.
* The returned transformation is the one that transforms points from each marker coordinate system
* to the camera coordinate system.
* The marker corrdinate system is centered on the middle of the marker, with the Z axis
* perpendicular to the marker plane.
* The coordinates of the four corners of the marker in its own coordinate system are:
* (-markerLength/2, markerLength/2, 0), (markerLength/2, markerLength/2, 0),
* (markerLength/2, -markerLength/2, 0), (-markerLength/2, -markerLength/2, 0)
*/
CV_EXPORTS_W void estimatePoseSingleMarkers(InputArrayOfArrays corners, float markerLength,
InputArray cameraMatrix, InputArray distCoeffs,
OutputArray rvecs, OutputArray tvecs, OutputArray _objPoints = noArray());
/**
* @brief Board of markers
*
* A board is a set of markers in the 3D space with a common coordinate system.
* The common form of a board of marker is a planar (2D) board, however any 3D layout can be used.
* A Board object is composed by:
* - The object points of the marker corners, i.e. their coordinates respect to the board system.
* - The dictionary which indicates the type of markers of the board
* - The identifier of all the markers in the board.
*/
class CV_EXPORTS_W Board {
public:
/**
* @brief Provide way to create Board by passing necessary data. Specially needed in Python.
*
* @param objPoints array of object points of all the marker corners in the board
* @param dictionary the dictionary of markers employed for this board
* @param ids vector of the identifiers of the markers in the board
*
*/
CV_WRAP static Ptr<Board> create(InputArrayOfArrays objPoints, const Ptr<Dictionary> &dictionary, InputArray ids);
/**
* @brief Set ids vector
*
* @param ids vector of the identifiers of the markers in the board (should be the same size
* as objPoints)
*
* Recommended way to set ids vector, which will fail if the size of ids does not match size
* of objPoints.
*/
CV_WRAP void setIds(InputArray ids);
/// array of object points of all the marker corners in the board
/// each marker include its 4 corners in CCW order. For M markers, the size is Mx4.
CV_PROP std::vector< std::vector< Point3f > > objPoints;
/// the dictionary of markers employed for this board
CV_PROP Ptr<Dictionary> dictionary;
/// vector of the identifiers of the markers in the board (same size than objPoints)
/// The identifiers refers to the board dictionary
CV_PROP_RW std::vector< int > ids;
};
/**
* @brief Planar board with grid arrangement of markers
* More common type of board. All markers are placed in the same plane in a grid arrangement.
* The board can be drawn using drawPlanarBoard() function (@sa drawPlanarBoard)
*/
class CV_EXPORTS_W GridBoard : public Board {
public:
/**
* @brief Draw a GridBoard
*
* @param outSize size of the output image in pixels.
* @param img output image with the board. The size of this image will be outSize
* and the board will be on the center, keeping the board proportions.
* @param marginSize minimum margins (in pixels) of the board in the output image
* @param borderBits width of the marker borders.
*
* This function return the image of the GridBoard, ready to be printed.
*/
CV_WRAP void draw(Size outSize, OutputArray img, int marginSize = 0, int borderBits = 1);
/**
* @brief Create a GridBoard object
*
* @param markersX number of markers in X direction
* @param markersY number of markers in Y direction
* @param markerLength marker side length (normally in meters)
* @param markerSeparation separation between two markers (same unit as markerLength)
* @param dictionary dictionary of markers indicating the type of markers
* @param firstMarker id of first marker in dictionary to use on board.
* @return the output GridBoard object
*
* This functions creates a GridBoard object given the number of markers in each direction and
* the marker size and marker separation.
*/
CV_WRAP static Ptr<GridBoard> create(int markersX, int markersY, float markerLength,
float markerSeparation, const Ptr<Dictionary> &dictionary, int firstMarker = 0);
/**
*
*/
CV_WRAP Size getGridSize() const { return Size(_markersX, _markersY); }
/**
*
*/
CV_WRAP float getMarkerLength() const { return _markerLength; }
/**
*
*/
CV_WRAP float getMarkerSeparation() const { return _markerSeparation; }
private:
// number of markers in X and Y directions
int _markersX, _markersY;
// marker side length (normally in meters)
float _markerLength;
// separation between markers in the grid
float _markerSeparation;
};
/**
* @brief Pose estimation for a board of markers
*
* @param corners vector of already detected markers corners. For each marker, its four corners
* are provided, (e.g std::vector<std::vector<cv::Point2f> > ). For N detected markers, the
* dimensions of this array should be Nx4. The order of the corners should be clockwise.
* @param ids list of identifiers for each marker in corners
* @param board layout of markers in the board. The layout is composed by the marker identifiers
* and the positions of each marker corner in the board reference system.
* @param cameraMatrix input 3x3 floating-point camera matrix
* \f$A = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\f$
* @param distCoeffs vector of distortion coefficients
* \f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6],[s_1, s_2, s_3, s_4]])\f$ of 4, 5, 8 or 12 elements
* @param rvec Output vector (e.g. cv::Mat) corresponding to the rotation vector of the board
* (see cv::Rodrigues). Used as initial guess if not empty.
* @param tvec Output vector (e.g. cv::Mat) corresponding to the translation vector of the board.
* @param useExtrinsicGuess defines whether initial guess for \b rvec and \b tvec will be used or not.
* Used as initial guess if not empty.
*
* This function receives the detected markers and returns the pose of a marker board composed
* by those markers.
* A Board of marker has a single world coordinate system which is defined by the board layout.
* The returned transformation is the one that transforms points from the board coordinate system
* to the camera coordinate system.
* Input markers that are not included in the board layout are ignored.
* The function returns the number of markers from the input employed for the board pose estimation.
* Note that returning a 0 means the pose has not been estimated.
*/
CV_EXPORTS_W int estimatePoseBoard(InputArrayOfArrays corners, InputArray ids, const Ptr<Board> &board,
InputArray cameraMatrix, InputArray distCoeffs, InputOutputArray rvec,
InputOutputArray tvec, bool useExtrinsicGuess = false);
/**
* @brief Refind not detected markers based on the already detected and the board layout
*
* @param image input image
* @param board layout of markers in the board.
* @param detectedCorners vector of already detected marker corners.
* @param detectedIds vector of already detected marker identifiers.
* @param rejectedCorners vector of rejected candidates during the marker detection process.
* @param cameraMatrix optional input 3x3 floating-point camera matrix
* \f$A = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\f$
* @param distCoeffs optional vector of distortion coefficients
* \f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6],[s_1, s_2, s_3, s_4]])\f$ of 4, 5, 8 or 12 elements
* @param minRepDistance minimum distance between the corners of the rejected candidate and the
* reprojected marker in order to consider it as a correspondence.
* @param errorCorrectionRate rate of allowed erroneous bits respect to the error correction
* capability of the used dictionary. -1 ignores the error correction step.
* @param checkAllOrders Consider the four posible corner orders in the rejectedCorners array.
* If it set to false, only the provided corner order is considered (default true).
* @param recoveredIdxs Optional array to returns the indexes of the recovered candidates in the
* original rejectedCorners array.
* @param parameters marker detection parameters
*
* This function tries to find markers that were not detected in the basic detecMarkers function.
* First, based on the current detected marker and the board layout, the function interpolates
* the position of the missing markers. Then it tries to find correspondence between the reprojected
* markers and the rejected candidates based on the minRepDistance and errorCorrectionRate
* parameters.
* If camera parameters and distortion coefficients are provided, missing markers are reprojected
* using projectPoint function. If not, missing marker projections are interpolated using global
* homography, and all the marker corners in the board must have the same Z coordinate.
*/
CV_EXPORTS_W void refineDetectedMarkers(
InputArray image,const Ptr<Board> &board, InputOutputArrayOfArrays detectedCorners,
InputOutputArray detectedIds, InputOutputArrayOfArrays rejectedCorners,
InputArray cameraMatrix = noArray(), InputArray distCoeffs = noArray(),
float minRepDistance = 10.f, float errorCorrectionRate = 3.f, bool checkAllOrders = true,
OutputArray recoveredIdxs = noArray(), const Ptr<DetectorParameters> &parameters = DetectorParameters::create());
/**
* @brief Draw detected markers in image
*
* @param image input/output image. It must have 1 or 3 channels. The number of channels is not
* altered.
* @param corners positions of marker corners on input image.
* (e.g std::vector<std::vector<cv::Point2f> > ). For N detected markers, the dimensions of
* this array should be Nx4. The order of the corners should be clockwise.
* @param ids vector of identifiers for markers in markersCorners .
* Optional, if not provided, ids are not painted.
* @param borderColor color of marker borders. Rest of colors (text color and first corner color)
* are calculated based on this one to improve visualization.
*
* Given an array of detected marker corners and its corresponding ids, this functions draws
* the markers in the image. The marker borders are painted and the markers identifiers if provided.
* Useful for debugging purposes.
*/
CV_EXPORTS_W void drawDetectedMarkers(InputOutputArray image, InputArrayOfArrays corners,
InputArray ids = noArray(),
Scalar borderColor = Scalar(0, 255, 0));
/**
* @brief Draw coordinate system axis from pose estimation
*
* @param image input/output image. It must have 1 or 3 channels. The number of channels is not
* altered.
* @param cameraMatrix input 3x3 floating-point camera matrix
* \f$A = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\f$
* @param distCoeffs vector of distortion coefficients
* \f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6],[s_1, s_2, s_3, s_4]])\f$ of 4, 5, 8 or 12 elements
* @param rvec rotation vector of the coordinate system that will be drawn. (@sa Rodrigues).
* @param tvec translation vector of the coordinate system that will be drawn.
* @param length length of the painted axis in the same unit than tvec (usually in meters)
*
* Given the pose estimation of a marker or board, this function draws the axis of the world
* coordinate system, i.e. the system centered on the marker/board. Useful for debugging purposes.
*
* @deprecated use cv::drawFrameAxes
*/
CV_EXPORTS_W void drawAxis(InputOutputArray image, InputArray cameraMatrix, InputArray distCoeffs,
InputArray rvec, InputArray tvec, float length);
/**
* @brief Draw a canonical marker image
*
* @param dictionary dictionary of markers indicating the type of markers
* @param id identifier of the marker that will be returned. It has to be a valid id
* in the specified dictionary.
* @param sidePixels size of the image in pixels
* @param img output image with the marker
* @param borderBits width of the marker border.
*
* This function returns a marker image in its canonical form (i.e. ready to be printed)
*/
CV_EXPORTS_W void drawMarker(const Ptr<Dictionary> &dictionary, int id, int sidePixels, OutputArray img,
int borderBits = 1);
/**
* @brief Draw a planar board
* @sa _drawPlanarBoardImpl
*
* @param board layout of the board that will be drawn. The board should be planar,
* z coordinate is ignored
* @param outSize size of the output image in pixels.
* @param img output image with the board. The size of this image will be outSize
* and the board will be on the center, keeping the board proportions.
* @param marginSize minimum margins (in pixels) of the board in the output image
* @param borderBits width of the marker borders.
*
* This function return the image of a planar board, ready to be printed. It assumes
* the Board layout specified is planar by ignoring the z coordinates of the object points.
*/
CV_EXPORTS_W void drawPlanarBoard(const Ptr<Board> &board, Size outSize, OutputArray img,
int marginSize = 0, int borderBits = 1);
/**
* @brief Implementation of drawPlanarBoard that accepts a raw Board pointer.
*/
void _drawPlanarBoardImpl(Board *board, Size outSize, OutputArray img,
int marginSize = 0, int borderBits = 1);
/**
* @brief Calibrate a camera using aruco markers
*
* @param corners vector of detected marker corners in all frames.
* The corners should have the same format returned by detectMarkers (see #detectMarkers).
* @param ids list of identifiers for each marker in corners
* @param counter number of markers in each frame so that corners and ids can be split
* @param board Marker Board layout
* @param imageSize Size of the image used only to initialize the intrinsic camera matrix.
* @param cameraMatrix Output 3x3 floating-point camera matrix
* \f$A = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\f$ . If CV\_CALIB\_USE\_INTRINSIC\_GUESS
* and/or CV_CALIB_FIX_ASPECT_RATIO are specified, some or all of fx, fy, cx, cy must be
* initialized before calling the function.
* @param distCoeffs Output vector of distortion coefficients
* \f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6],[s_1, s_2, s_3, s_4]])\f$ of 4, 5, 8 or 12 elements
* @param rvecs Output vector of rotation vectors (see Rodrigues ) estimated for each board view
* (e.g. std::vector<cv::Mat>>). That is, each k-th rotation vector together with the corresponding
* k-th translation vector (see the next output parameter description) brings the board pattern
* from the model coordinate space (in which object points are specified) to the world coordinate
* space, that is, a real position of the board pattern in the k-th pattern view (k=0.. *M* -1).
* @param tvecs Output vector of translation vectors estimated for each pattern view.
* @param stdDeviationsIntrinsics Output vector of standard deviations estimated for intrinsic parameters.
* Order of deviations values:
* \f$(f_x, f_y, c_x, c_y, k_1, k_2, p_1, p_2, k_3, k_4, k_5, k_6 , s_1, s_2, s_3,
* s_4, \tau_x, \tau_y)\f$ If one of parameters is not estimated, it's deviation is equals to zero.
* @param stdDeviationsExtrinsics Output vector of standard deviations estimated for extrinsic parameters.
* Order of deviations values: \f$(R_1, T_1, \dotsc , R_M, T_M)\f$ where M is number of pattern views,
* \f$R_i, T_i\f$ are concatenated 1x3 vectors.
* @param perViewErrors Output vector of average re-projection errors estimated for each pattern view.
* @param flags flags Different flags for the calibration process (see #calibrateCamera for details).
* @param criteria Termination criteria for the iterative optimization algorithm.
*
* This function calibrates a camera using an Aruco Board. The function receives a list of
* detected markers from several views of the Board. The process is similar to the chessboard
* calibration in calibrateCamera(). The function returns the final re-projection error.
*/
CV_EXPORTS_AS(calibrateCameraArucoExtended) double calibrateCameraAruco(
InputArrayOfArrays corners, InputArray ids, InputArray counter, const Ptr<Board> &board,
Size imageSize, InputOutputArray cameraMatrix, InputOutputArray distCoeffs,
OutputArrayOfArrays rvecs, OutputArrayOfArrays tvecs,
OutputArray stdDeviationsIntrinsics, OutputArray stdDeviationsExtrinsics,
OutputArray perViewErrors, int flags = 0,
TermCriteria criteria = TermCriteria(TermCriteria::COUNT + TermCriteria::EPS, 30, DBL_EPSILON));
/** @brief It's the same function as #calibrateCameraAruco but without calibration error estimation.
*/
CV_EXPORTS_W double calibrateCameraAruco(
InputArrayOfArrays corners, InputArray ids, InputArray counter, const Ptr<Board> &board,
Size imageSize, InputOutputArray cameraMatrix, InputOutputArray distCoeffs,
OutputArrayOfArrays rvecs = noArray(), OutputArrayOfArrays tvecs = noArray(), int flags = 0,
TermCriteria criteria = TermCriteria(TermCriteria::COUNT + TermCriteria::EPS, 30, DBL_EPSILON));
/**
* @brief Given a board configuration and a set of detected markers, returns the corresponding
* image points and object points to call solvePnP
*
* @param board Marker board layout.
* @param detectedCorners List of detected marker corners of the board.
* @param detectedIds List of identifiers for each marker.
* @param objPoints Vector of vectors of board marker points in the board coordinate space.
* @param imgPoints Vector of vectors of the projections of board marker corner points.
*/
CV_EXPORTS_W void getBoardObjectAndImagePoints(const Ptr<Board> &board, InputArrayOfArrays detectedCorners,
InputArray detectedIds, OutputArray objPoints, OutputArray imgPoints);
//! @}
}
}
#endif

View File

@@ -0,0 +1,353 @@
/*
By downloading, copying, installing or using the software you agree to this
license. If you do not agree to this license, do not download, install,
copy or use the software.
License Agreement
For Open Source Computer Vision Library
(3-clause BSD License)
Copyright (C) 2013, OpenCV Foundation, all rights reserved.
Third party copyrights are property of their respective owners.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the names of the copyright holders nor the names of the contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
This software is provided by the copyright holders and contributors "as is" and
any express or implied warranties, including, but not limited to, the implied
warranties of merchantability and fitness for a particular purpose are
disclaimed. In no event shall copyright holders or contributors be liable for
any direct, indirect, incidental, special, exemplary, or consequential damages
(including, but not limited to, procurement of substitute goods or services;
loss of use, data, or profits; or business interruption) however caused
and on any theory of liability, whether in contract, strict liability,
or tort (including negligence or otherwise) arising in any way out of
the use of this software, even if advised of the possibility of such damage.
*/
#ifndef __OPENCV_CHARUCO_HPP__
#define __OPENCV_CHARUCO_HPP__
#include <opencv2/core.hpp>
#include <vector>
#include <opencv2/aruco.hpp>
namespace cv {
namespace aruco {
//! @addtogroup aruco
//! @{
/**
* @brief ChArUco board
* Specific class for ChArUco boards. A ChArUco board is a planar board where the markers are placed
* inside the white squares of a chessboard. The benefits of ChArUco boards is that they provide
* both, ArUco markers versatility and chessboard corner precision, which is important for
* calibration and pose estimation.
* This class also allows the easy creation and drawing of ChArUco boards.
*/
class CV_EXPORTS_W CharucoBoard : public Board {
public:
// vector of chessboard 3D corners precalculated
CV_PROP std::vector< Point3f > chessboardCorners;
// for each charuco corner, nearest marker id and nearest marker corner id of each marker
CV_PROP std::vector< std::vector< int > > nearestMarkerIdx;
CV_PROP std::vector< std::vector< int > > nearestMarkerCorners;
/**
* @brief Draw a ChArUco board
*
* @param outSize size of the output image in pixels.
* @param img output image with the board. The size of this image will be outSize
* and the board will be on the center, keeping the board proportions.
* @param marginSize minimum margins (in pixels) of the board in the output image
* @param borderBits width of the marker borders.
*
* This function return the image of the ChArUco board, ready to be printed.
*/
CV_WRAP void draw(Size outSize, OutputArray img, int marginSize = 0, int borderBits = 1);
/**
* @brief Create a CharucoBoard object
*
* @param squaresX number of chessboard squares in X direction
* @param squaresY number of chessboard squares in Y direction
* @param squareLength chessboard square side length (normally in meters)
* @param markerLength marker side length (same unit than squareLength)
* @param dictionary dictionary of markers indicating the type of markers.
* The first markers in the dictionary are used to fill the white chessboard squares.
* @return the output CharucoBoard object
*
* This functions creates a CharucoBoard object given the number of squares in each direction
* and the size of the markers and chessboard squares.
*/
CV_WRAP static Ptr<CharucoBoard> create(int squaresX, int squaresY, float squareLength,
float markerLength, const Ptr<Dictionary> &dictionary);
/**
*
*/
CV_WRAP Size getChessboardSize() const { return Size(_squaresX, _squaresY); }
/**
*
*/
CV_WRAP float getSquareLength() const { return _squareLength; }
/**
*
*/
CV_WRAP float getMarkerLength() const { return _markerLength; }
private:
void _getNearestMarkerCorners();
// number of markers in X and Y directions
int _squaresX, _squaresY;
// size of chessboard squares side (normally in meters)
float _squareLength;
// marker side length (normally in meters)
float _markerLength;
};
/**
* @brief Interpolate position of ChArUco board corners
* @param markerCorners vector of already detected markers corners. For each marker, its four
* corners are provided, (e.g std::vector<std::vector<cv::Point2f> > ). For N detected markers, the
* dimensions of this array should be Nx4. The order of the corners should be clockwise.
* @param markerIds list of identifiers for each marker in corners
* @param image input image necesary for corner refinement. Note that markers are not detected and
* should be sent in corners and ids parameters.
* @param board layout of ChArUco board.
* @param charucoCorners interpolated chessboard corners
* @param charucoIds interpolated chessboard corners identifiers
* @param cameraMatrix optional 3x3 floating-point camera matrix
* \f$A = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\f$
* @param distCoeffs optional vector of distortion coefficients
* \f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6],[s_1, s_2, s_3, s_4]])\f$ of 4, 5, 8 or 12 elements
* @param minMarkers number of adjacent markers that must be detected to return a charuco corner
*
* This function receives the detected markers and returns the 2D position of the chessboard corners
* from a ChArUco board using the detected Aruco markers. If camera parameters are provided,
* the process is based in an approximated pose estimation, else it is based on local homography.
* Only visible corners are returned. For each corner, its corresponding identifier is
* also returned in charucoIds.
* The function returns the number of interpolated corners.
*/
CV_EXPORTS_W int interpolateCornersCharuco(InputArrayOfArrays markerCorners, InputArray markerIds,
InputArray image, const Ptr<CharucoBoard> &board,
OutputArray charucoCorners, OutputArray charucoIds,
InputArray cameraMatrix = noArray(),
InputArray distCoeffs = noArray(), int minMarkers = 2);
/**
* @brief Pose estimation for a ChArUco board given some of their corners
* @param charucoCorners vector of detected charuco corners
* @param charucoIds list of identifiers for each corner in charucoCorners
* @param board layout of ChArUco board.
* @param cameraMatrix input 3x3 floating-point camera matrix
* \f$A = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\f$
* @param distCoeffs vector of distortion coefficients
* \f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6],[s_1, s_2, s_3, s_4]])\f$ of 4, 5, 8 or 12 elements
* @param rvec Output vector (e.g. cv::Mat) corresponding to the rotation vector of the board
* (see cv::Rodrigues).
* @param tvec Output vector (e.g. cv::Mat) corresponding to the translation vector of the board.
* @param useExtrinsicGuess defines whether initial guess for \b rvec and \b tvec will be used or not.
*
* This function estimates a Charuco board pose from some detected corners.
* The function checks if the input corners are enough and valid to perform pose estimation.
* If pose estimation is valid, returns true, else returns false.
*/
CV_EXPORTS_W bool estimatePoseCharucoBoard(InputArray charucoCorners, InputArray charucoIds,
const Ptr<CharucoBoard> &board, InputArray cameraMatrix,
InputArray distCoeffs, InputOutputArray rvec,
InputOutputArray tvec, bool useExtrinsicGuess = false);
/**
* @brief Draws a set of Charuco corners
* @param image input/output image. It must have 1 or 3 channels. The number of channels is not
* altered.
* @param charucoCorners vector of detected charuco corners
* @param charucoIds list of identifiers for each corner in charucoCorners
* @param cornerColor color of the square surrounding each corner
*
* This function draws a set of detected Charuco corners. If identifiers vector is provided, it also
* draws the id of each corner.
*/
CV_EXPORTS_W void drawDetectedCornersCharuco(InputOutputArray image, InputArray charucoCorners,
InputArray charucoIds = noArray(),
Scalar cornerColor = Scalar(255, 0, 0));
/**
* @brief Calibrate a camera using Charuco corners
*
* @param charucoCorners vector of detected charuco corners per frame
* @param charucoIds list of identifiers for each corner in charucoCorners per frame
* @param board Marker Board layout
* @param imageSize input image size
* @param cameraMatrix Output 3x3 floating-point camera matrix
* \f$A = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\f$ . If CV\_CALIB\_USE\_INTRINSIC\_GUESS
* and/or CV_CALIB_FIX_ASPECT_RATIO are specified, some or all of fx, fy, cx, cy must be
* initialized before calling the function.
* @param distCoeffs Output vector of distortion coefficients
* \f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6],[s_1, s_2, s_3, s_4]])\f$ of 4, 5, 8 or 12 elements
* @param rvecs Output vector of rotation vectors (see Rodrigues ) estimated for each board view
* (e.g. std::vector<cv::Mat>>). That is, each k-th rotation vector together with the corresponding
* k-th translation vector (see the next output parameter description) brings the board pattern
* from the model coordinate space (in which object points are specified) to the world coordinate
* space, that is, a real position of the board pattern in the k-th pattern view (k=0.. *M* -1).
* @param tvecs Output vector of translation vectors estimated for each pattern view.
* @param stdDeviationsIntrinsics Output vector of standard deviations estimated for intrinsic parameters.
* Order of deviations values:
* \f$(f_x, f_y, c_x, c_y, k_1, k_2, p_1, p_2, k_3, k_4, k_5, k_6 , s_1, s_2, s_3,
* s_4, \tau_x, \tau_y)\f$ If one of parameters is not estimated, it's deviation is equals to zero.
* @param stdDeviationsExtrinsics Output vector of standard deviations estimated for extrinsic parameters.
* Order of deviations values: \f$(R_1, T_1, \dotsc , R_M, T_M)\f$ where M is number of pattern views,
* \f$R_i, T_i\f$ are concatenated 1x3 vectors.
* @param perViewErrors Output vector of average re-projection errors estimated for each pattern view.
* @param flags flags Different flags for the calibration process (see #calibrateCamera for details).
* @param criteria Termination criteria for the iterative optimization algorithm.
*
* This function calibrates a camera using a set of corners of a Charuco Board. The function
* receives a list of detected corners and its identifiers from several views of the Board.
* The function returns the final re-projection error.
*/
CV_EXPORTS_AS(calibrateCameraCharucoExtended) double calibrateCameraCharuco(
InputArrayOfArrays charucoCorners, InputArrayOfArrays charucoIds, const Ptr<CharucoBoard> &board,
Size imageSize, InputOutputArray cameraMatrix, InputOutputArray distCoeffs,
OutputArrayOfArrays rvecs, OutputArrayOfArrays tvecs,
OutputArray stdDeviationsIntrinsics, OutputArray stdDeviationsExtrinsics,
OutputArray perViewErrors, int flags = 0,
TermCriteria criteria = TermCriteria(TermCriteria::COUNT + TermCriteria::EPS, 30, DBL_EPSILON));
/** @brief It's the same function as #calibrateCameraCharuco but without calibration error estimation.
*/
CV_EXPORTS_W double calibrateCameraCharuco(
InputArrayOfArrays charucoCorners, InputArrayOfArrays charucoIds, const Ptr<CharucoBoard> &board,
Size imageSize, InputOutputArray cameraMatrix, InputOutputArray distCoeffs,
OutputArrayOfArrays rvecs = noArray(), OutputArrayOfArrays tvecs = noArray(), int flags = 0,
TermCriteria criteria = TermCriteria(TermCriteria::COUNT + TermCriteria::EPS, 30, DBL_EPSILON));
/**
* @brief Detect ChArUco Diamond markers
*
* @param image input image necessary for corner subpixel.
* @param markerCorners list of detected marker corners from detectMarkers function.
* @param markerIds list of marker ids in markerCorners.
* @param squareMarkerLengthRate rate between square and marker length:
* squareMarkerLengthRate = squareLength/markerLength. The real units are not necessary.
* @param diamondCorners output list of detected diamond corners (4 corners per diamond). The order
* is the same than in marker corners: top left, top right, bottom right and bottom left. Similar
* format than the corners returned by detectMarkers (e.g std::vector<std::vector<cv::Point2f> > ).
* @param diamondIds ids of the diamonds in diamondCorners. The id of each diamond is in fact of
* type Vec4i, so each diamond has 4 ids, which are the ids of the aruco markers composing the
* diamond.
* @param cameraMatrix Optional camera calibration matrix.
* @param distCoeffs Optional camera distortion coefficients.
*
* This function detects Diamond markers from the previous detected ArUco markers. The diamonds
* are returned in the diamondCorners and diamondIds parameters. If camera calibration parameters
* are provided, the diamond search is based on reprojection. If not, diamond search is based on
* homography. Homography is faster than reprojection but can slightly reduce the detection rate.
*/
CV_EXPORTS_W void detectCharucoDiamond(InputArray image, InputArrayOfArrays markerCorners,
InputArray markerIds, float squareMarkerLengthRate,
OutputArrayOfArrays diamondCorners, OutputArray diamondIds,
InputArray cameraMatrix = noArray(),
InputArray distCoeffs = noArray());
/**
* @brief Draw a set of detected ChArUco Diamond markers
*
* @param image input/output image. It must have 1 or 3 channels. The number of channels is not
* altered.
* @param diamondCorners positions of diamond corners in the same format returned by
* detectCharucoDiamond(). (e.g std::vector<std::vector<cv::Point2f> > ). For N detected markers,
* the dimensions of this array should be Nx4. The order of the corners should be clockwise.
* @param diamondIds vector of identifiers for diamonds in diamondCorners, in the same format
* returned by detectCharucoDiamond() (e.g. std::vector<Vec4i>).
* Optional, if not provided, ids are not painted.
* @param borderColor color of marker borders. Rest of colors (text color and first corner color)
* are calculated based on this one.
*
* Given an array of detected diamonds, this functions draws them in the image. The marker borders
* are painted and the markers identifiers if provided.
* Useful for debugging purposes.
*/
CV_EXPORTS_W void drawDetectedDiamonds(InputOutputArray image, InputArrayOfArrays diamondCorners,
InputArray diamondIds = noArray(),
Scalar borderColor = Scalar(0, 0, 255));
/**
* @brief Draw a ChArUco Diamond marker
*
* @param dictionary dictionary of markers indicating the type of markers.
* @param ids list of 4 ids for each ArUco marker in the ChArUco marker.
* @param squareLength size of the chessboard squares in pixels.
* @param markerLength size of the markers in pixels.
* @param img output image with the marker. The size of this image will be
* 3*squareLength + 2*marginSize,.
* @param marginSize minimum margins (in pixels) of the marker in the output image
* @param borderBits width of the marker borders.
*
* This function return the image of a ChArUco marker, ready to be printed.
*/
CV_EXPORTS_W void drawCharucoDiamond(const Ptr<Dictionary> &dictionary, Vec4i ids, int squareLength,
int markerLength, OutputArray img, int marginSize = 0,
int borderBits = 1);
/**
* @brief test whether the ChArUco markers are collinear
*
* @param _board layout of ChArUco board.
* @param _charucoIds list of identifiers for each corner in charucoCorners per frame.
* @return bool value, 1 (true) if detected corners form a line, 0 (false) if they do not.
solvePnP, calibration functions will fail if the corners are collinear (true).
*
* The number of ids in charucoIDs should be <= the number of chessboard corners in the board. This functions checks whether the charuco corners are on a straight line (returns true, if so), or not (false). Axis parallel, as well as diagonal and other straight lines detected. Degenerate cases: for number of charucoIDs <= 2, the function returns true.
*/
CV_EXPORTS_W bool testCharucoCornersCollinear(const Ptr<CharucoBoard> &_board,
InputArray _charucoIds);
//! @}
}
}
#endif

View File

@@ -0,0 +1,212 @@
/*
By downloading, copying, installing or using the software you agree to this
license. If you do not agree to this license, do not download, install,
copy or use the software.
License Agreement
For Open Source Computer Vision Library
(3-clause BSD License)
Copyright (C) 2013, OpenCV Foundation, all rights reserved.
Third party copyrights are property of their respective owners.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the names of the copyright holders nor the names of the contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
This software is provided by the copyright holders and contributors "as is" and
any express or implied warranties, including, but not limited to, the implied
warranties of merchantability and fitness for a particular purpose are
disclaimed. In no event shall copyright holders or contributors be liable for
any direct, indirect, incidental, special, exemplary, or consequential damages
(including, but not limited to, procurement of substitute goods or services;
loss of use, data, or profits; or business interruption) however caused
and on any theory of liability, whether in contract, strict liability,
or tort (including negligence or otherwise) arising in any way out of
the use of this software, even if advised of the possibility of such damage.
*/
#ifndef __OPENCV_DICTIONARY_HPP__
#define __OPENCV_DICTIONARY_HPP__
#include <opencv2/core.hpp>
namespace cv {
namespace aruco {
//! @addtogroup aruco
//! @{
/**
* @brief Dictionary/Set of markers. It contains the inner codification
*
* bytesList contains the marker codewords where
* - bytesList.rows is the dictionary size
* - each marker is encoded using `nbytes = ceil(markerSize*markerSize/8.)`
* - each row contains all 4 rotations of the marker, so its length is `4*nbytes`
*
* `bytesList.ptr(i)[k*nbytes + j]` is then the j-th byte of i-th marker, in its k-th rotation.
*/
class CV_EXPORTS_W Dictionary {
public:
CV_PROP_RW Mat bytesList; // marker code information
CV_PROP_RW int markerSize; // number of bits per dimension
CV_PROP_RW int maxCorrectionBits; // maximum number of bits that can be corrected
/**
*/
Dictionary(const Mat &_bytesList = Mat(), int _markerSize = 0, int _maxcorr = 0);
/**
Dictionary(const Dictionary &_dictionary);
*/
/**
*/
Dictionary(const Ptr<Dictionary> &_dictionary);
/**
* @see generateCustomDictionary
*/
CV_WRAP_AS(create) static Ptr<Dictionary> create(int nMarkers, int markerSize, int randomSeed=0);
/**
* @see generateCustomDictionary
*/
CV_WRAP_AS(create_from) static Ptr<Dictionary> create(int nMarkers, int markerSize,
const Ptr<Dictionary> &baseDictionary, int randomSeed=0);
/**
* @see getPredefinedDictionary
*/
CV_WRAP static Ptr<Dictionary> get(int dict);
/**
* @brief Given a matrix of bits. Returns whether if marker is identified or not.
* It returns by reference the correct id (if any) and the correct rotation
*/
bool identify(const Mat &onlyBits, int &idx, int &rotation, double maxCorrectionRate) const;
/**
* @brief Returns the distance of the input bits to the specific id. If allRotations is true,
* the four posible bits rotation are considered
*/
int getDistanceToId(InputArray bits, int id, bool allRotations = true) const;
/**
* @brief Draw a canonical marker image
*/
CV_WRAP void drawMarker(int id, int sidePixels, OutputArray _img, int borderBits = 1) const;
/**
* @brief Transform matrix of bits to list of bytes in the 4 rotations
*/
CV_WRAP static Mat getByteListFromBits(const Mat &bits);
/**
* @brief Transform list of bytes to matrix of bits
*/
CV_WRAP static Mat getBitsFromByteList(const Mat &byteList, int markerSize);
};
/**
* @brief Predefined markers dictionaries/sets
* Each dictionary indicates the number of bits and the number of markers contained
* - DICT_ARUCO_ORIGINAL: standard ArUco Library Markers. 1024 markers, 5x5 bits, 0 minimum
distance
*/
enum PREDEFINED_DICTIONARY_NAME {
DICT_4X4_50 = 0,
DICT_4X4_100,
DICT_4X4_250,
DICT_4X4_1000,
DICT_5X5_50,
DICT_5X5_100,
DICT_5X5_250,
DICT_5X5_1000,
DICT_6X6_50,
DICT_6X6_100,
DICT_6X6_250,
DICT_6X6_1000,
DICT_7X7_50,
DICT_7X7_100,
DICT_7X7_250,
DICT_7X7_1000,
DICT_ARUCO_ORIGINAL,
DICT_APRILTAG_16h5, ///< 4x4 bits, minimum hamming distance between any two codes = 5, 30 codes
DICT_APRILTAG_25h9, ///< 5x5 bits, minimum hamming distance between any two codes = 9, 35 codes
DICT_APRILTAG_36h10, ///< 6x6 bits, minimum hamming distance between any two codes = 10, 2320 codes
DICT_APRILTAG_36h11 ///< 6x6 bits, minimum hamming distance between any two codes = 11, 587 codes
};
/**
* @brief Returns one of the predefined dictionaries defined in PREDEFINED_DICTIONARY_NAME
*/
CV_EXPORTS Ptr<Dictionary> getPredefinedDictionary(PREDEFINED_DICTIONARY_NAME name);
/**
* @brief Returns one of the predefined dictionaries referenced by DICT_*.
*/
CV_EXPORTS_W Ptr<Dictionary> getPredefinedDictionary(int dict);
/**
* @see generateCustomDictionary
*/
CV_EXPORTS_AS(custom_dictionary) Ptr<Dictionary> generateCustomDictionary(
int nMarkers,
int markerSize,
int randomSeed=0);
/**
* @brief Generates a new customizable marker dictionary
*
* @param nMarkers number of markers in the dictionary
* @param markerSize number of bits per dimension of each markers
* @param baseDictionary Include the markers in this dictionary at the beginning (optional)
* @param randomSeed a user supplied seed for theRNG()
*
* This function creates a new dictionary composed by nMarkers markers and each markers composed
* by markerSize x markerSize bits. If baseDictionary is provided, its markers are directly
* included and the rest are generated based on them. If the size of baseDictionary is higher
* than nMarkers, only the first nMarkers in baseDictionary are taken and no new marker is added.
*/
CV_EXPORTS_AS(custom_dictionary_from) Ptr<Dictionary> generateCustomDictionary(
int nMarkers,
int markerSize,
const Ptr<Dictionary> &baseDictionary,
int randomSeed=0);
//! @}
}
}
#endif

101
3rdparty/opencv/inc/opencv2/barcode.hpp vendored Normal file
View File

@@ -0,0 +1,101 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
// Copyright (c) 2020-2021 darkliang wangberlinT Certseeds
#ifndef __OPENCV_BARCODE_HPP__
#define __OPENCV_BARCODE_HPP__
#include <opencv2/core.hpp>
#include <ostream>
/** @defgroup barcode Barcode detecting and decoding methods
*/
namespace cv {
namespace barcode {
//! @addtogroup barcode
//! @{
enum BarcodeType
{
NONE, EAN_8, EAN_13, UPC_A, UPC_E, UPC_EAN_EXTENSION
};
static inline std::ostream &operator<<(std::ostream &out, const BarcodeType &barcode_type)
{
switch (barcode_type)
{
case BarcodeType::EAN_8:
out << "EAN_8";
break;
case BarcodeType::EAN_13:
out << "EAN_13";
break;
case BarcodeType::UPC_E:
out << "UPC_E";
break;
case BarcodeType::UPC_A:
out << "UPC_A";
break;
case BarcodeType::UPC_EAN_EXTENSION:
out << "UPC_EAN_EXTENSION";
break;
default:
out << "NONE";
}
return out;
}
class CV_EXPORTS_W BarcodeDetector
{
public:
/**
* @brief Initialize the BarcodeDetector.
* @param prototxt_path prototxt file path for the super resolution model
* @param model_path model file path for the super resolution model
*/
CV_WRAP BarcodeDetector(const std::string &prototxt_path = "", const std::string &model_path = "");
~BarcodeDetector();
/** @brief Detects Barcode in image and returns the rectangle(s) containing the code.
*
* @param img grayscale or color (BGR) image containing (or not) Barcode.
* @param points Output vector of vector of vertices of the minimum-area rotated rectangle containing the codes.
* For N detected barcodes, the dimensions of this array should be [N][4].
* Order of four points in vector< Point2f> is bottomLeft, topLeft, topRight, bottomRight.
*/
CV_WRAP bool detect(InputArray img, OutputArray points) const;
/** @brief Decodes barcode in image once it's found by the detect() method.
*
* @param img grayscale or color (BGR) image containing bar code.
* @param points vector of rotated rectangle vertices found by detect() method (or some other algorithm).
* For N detected barcodes, the dimensions of this array should be [N][4].
* Order of four points in vector<Point2f> is bottomLeft, topLeft, topRight, bottomRight.
* @param decoded_info UTF8-encoded output vector of string or empty vector of string if the codes cannot be decoded.
* @param decoded_type vector of BarcodeType, specifies the type of these barcodes
*/
CV_WRAP bool decode(InputArray img, InputArray points, CV_OUT std::vector<std::string> &decoded_info, CV_OUT
std::vector<BarcodeType> &decoded_type) const;
/** @brief Both detects and decodes barcode
* @param img grayscale or color (BGR) image containing barcode.
* @param decoded_info UTF8-encoded output vector of string(s) or empty vector of string if the codes cannot be decoded.
* @param decoded_type vector of BarcodeType, specifies the type of these barcodes
* @param points optional output vector of vertices of the found barcode rectangle. Will be empty if not found.
*/
CV_WRAP bool detectAndDecode(InputArray img, CV_OUT std::vector<std::string> &decoded_info, CV_OUT
std::vector<BarcodeType> &decoded_type, OutputArray points = noArray()) const;
protected:
struct Impl;
Ptr<Impl> p;
};
//! @}
}
} // cv::barcode::
#endif //__OPENCV_BARCODE_HPP__

380
3rdparty/opencv/inc/opencv2/bgsegm.hpp vendored Normal file
View File

@@ -0,0 +1,380 @@
/*
By downloading, copying, installing or using the software you agree to this
license. If you do not agree to this license, do not download, install,
copy or use the software.
License Agreement
For Open Source Computer Vision Library
(3-clause BSD License)
Copyright (C) 2013, OpenCV Foundation, all rights reserved.
Third party copyrights are property of their respective owners.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the names of the copyright holders nor the names of the contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
This software is provided by the copyright holders and contributors "as is" and
any express or implied warranties, including, but not limited to, the implied
warranties of merchantability and fitness for a particular purpose are
disclaimed. In no event shall copyright holders or contributors be liable for
any direct, indirect, incidental, special, exemplary, or consequential damages
(including, but not limited to, procurement of substitute goods or services;
loss of use, data, or profits; or business interruption) however caused
and on any theory of liability, whether in contract, strict liability,
or tort (including negligence or otherwise) arising in any way out of
the use of this software, even if advised of the possibility of such damage.
*/
#ifndef __OPENCV_BGSEGM_HPP__
#define __OPENCV_BGSEGM_HPP__
#include "opencv2/video.hpp"
#ifdef __cplusplus
/** @defgroup bgsegm Improved Background-Foreground Segmentation Methods
*/
namespace cv
{
namespace bgsegm
{
//! @addtogroup bgsegm
//! @{
/** @brief Gaussian Mixture-based Background/Foreground Segmentation Algorithm.
The class implements the algorithm described in @cite KB2001 .
*/
class CV_EXPORTS_W BackgroundSubtractorMOG : public BackgroundSubtractor
{
public:
CV_WRAP virtual int getHistory() const = 0;
CV_WRAP virtual void setHistory(int nframes) = 0;
CV_WRAP virtual int getNMixtures() const = 0;
CV_WRAP virtual void setNMixtures(int nmix) = 0;
CV_WRAP virtual double getBackgroundRatio() const = 0;
CV_WRAP virtual void setBackgroundRatio(double backgroundRatio) = 0;
CV_WRAP virtual double getNoiseSigma() const = 0;
CV_WRAP virtual void setNoiseSigma(double noiseSigma) = 0;
};
/** @brief Creates mixture-of-gaussian background subtractor
@param history Length of the history.
@param nmixtures Number of Gaussian mixtures.
@param backgroundRatio Background ratio.
@param noiseSigma Noise strength (standard deviation of the brightness or each color channel). 0
means some automatic value.
*/
CV_EXPORTS_W Ptr<BackgroundSubtractorMOG>
createBackgroundSubtractorMOG(int history=200, int nmixtures=5,
double backgroundRatio=0.7, double noiseSigma=0);
/** @brief Background Subtractor module based on the algorithm given in @cite Gold2012 .
Takes a series of images and returns a sequence of mask (8UC1)
images of the same size, where 255 indicates Foreground and 0 represents Background.
This class implements an algorithm described in "Visual Tracking of Human Visitors under
Variable-Lighting Conditions for a Responsive Audio Art Installation," A. Godbehere,
A. Matsukawa, K. Goldberg, American Control Conference, Montreal, June 2012.
*/
class CV_EXPORTS_W BackgroundSubtractorGMG : public BackgroundSubtractor
{
public:
/** @brief Returns total number of distinct colors to maintain in histogram.
*/
CV_WRAP virtual int getMaxFeatures() const = 0;
/** @brief Sets total number of distinct colors to maintain in histogram.
*/
CV_WRAP virtual void setMaxFeatures(int maxFeatures) = 0;
/** @brief Returns the learning rate of the algorithm.
It lies between 0.0 and 1.0. It determines how quickly features are "forgotten" from
histograms.
*/
CV_WRAP virtual double getDefaultLearningRate() const = 0;
/** @brief Sets the learning rate of the algorithm.
*/
CV_WRAP virtual void setDefaultLearningRate(double lr) = 0;
/** @brief Returns the number of frames used to initialize background model.
*/
CV_WRAP virtual int getNumFrames() const = 0;
/** @brief Sets the number of frames used to initialize background model.
*/
CV_WRAP virtual void setNumFrames(int nframes) = 0;
/** @brief Returns the parameter used for quantization of color-space.
It is the number of discrete levels in each channel to be used in histograms.
*/
CV_WRAP virtual int getQuantizationLevels() const = 0;
/** @brief Sets the parameter used for quantization of color-space
*/
CV_WRAP virtual void setQuantizationLevels(int nlevels) = 0;
/** @brief Returns the prior probability that each individual pixel is a background pixel.
*/
CV_WRAP virtual double getBackgroundPrior() const = 0;
/** @brief Sets the prior probability that each individual pixel is a background pixel.
*/
CV_WRAP virtual void setBackgroundPrior(double bgprior) = 0;
/** @brief Returns the kernel radius used for morphological operations
*/
CV_WRAP virtual int getSmoothingRadius() const = 0;
/** @brief Sets the kernel radius used for morphological operations
*/
CV_WRAP virtual void setSmoothingRadius(int radius) = 0;
/** @brief Returns the value of decision threshold.
Decision value is the value above which pixel is determined to be FG.
*/
CV_WRAP virtual double getDecisionThreshold() const = 0;
/** @brief Sets the value of decision threshold.
*/
CV_WRAP virtual void setDecisionThreshold(double thresh) = 0;
/** @brief Returns the status of background model update
*/
CV_WRAP virtual bool getUpdateBackgroundModel() const = 0;
/** @brief Sets the status of background model update
*/
CV_WRAP virtual void setUpdateBackgroundModel(bool update) = 0;
/** @brief Returns the minimum value taken on by pixels in image sequence. Usually 0.
*/
CV_WRAP virtual double getMinVal() const = 0;
/** @brief Sets the minimum value taken on by pixels in image sequence.
*/
CV_WRAP virtual void setMinVal(double val) = 0;
/** @brief Returns the maximum value taken on by pixels in image sequence. e.g. 1.0 or 255.
*/
CV_WRAP virtual double getMaxVal() const = 0;
/** @brief Sets the maximum value taken on by pixels in image sequence.
*/
CV_WRAP virtual void setMaxVal(double val) = 0;
};
/** @brief Creates a GMG Background Subtractor
@param initializationFrames number of frames used to initialize the background models.
@param decisionThreshold Threshold value, above which it is marked foreground, else background.
*/
CV_EXPORTS_W Ptr<BackgroundSubtractorGMG> createBackgroundSubtractorGMG(int initializationFrames=120,
double decisionThreshold=0.8);
/** @brief Background subtraction based on counting.
About as fast as MOG2 on a high end system.
More than twice faster than MOG2 on cheap hardware (benchmarked on Raspberry Pi3).
%Algorithm by Sagi Zeevi ( https://github.com/sagi-z/BackgroundSubtractorCNT )
*/
class CV_EXPORTS_W BackgroundSubtractorCNT : public BackgroundSubtractor
{
public:
// BackgroundSubtractor interface
CV_WRAP virtual void apply(InputArray image, OutputArray fgmask, double learningRate=-1) CV_OVERRIDE = 0;
CV_WRAP virtual void getBackgroundImage(OutputArray backgroundImage) const CV_OVERRIDE = 0;
/** @brief Returns number of frames with same pixel color to consider stable.
*/
CV_WRAP virtual int getMinPixelStability() const = 0;
/** @brief Sets the number of frames with same pixel color to consider stable.
*/
CV_WRAP virtual void setMinPixelStability(int value) = 0;
/** @brief Returns maximum allowed credit for a pixel in history.
*/
CV_WRAP virtual int getMaxPixelStability() const = 0;
/** @brief Sets the maximum allowed credit for a pixel in history.
*/
CV_WRAP virtual void setMaxPixelStability(int value) = 0;
/** @brief Returns if we're giving a pixel credit for being stable for a long time.
*/
CV_WRAP virtual bool getUseHistory() const = 0;
/** @brief Sets if we're giving a pixel credit for being stable for a long time.
*/
CV_WRAP virtual void setUseHistory(bool value) = 0;
/** @brief Returns if we're parallelizing the algorithm.
*/
CV_WRAP virtual bool getIsParallel() const = 0;
/** @brief Sets if we're parallelizing the algorithm.
*/
CV_WRAP virtual void setIsParallel(bool value) = 0;
};
/** @brief Creates a CNT Background Subtractor
@param minPixelStability number of frames with same pixel color to consider stable
@param useHistory determines if we're giving a pixel credit for being stable for a long time
@param maxPixelStability maximum allowed credit for a pixel in history
@param isParallel determines if we're parallelizing the algorithm
*/
CV_EXPORTS_W Ptr<BackgroundSubtractorCNT>
createBackgroundSubtractorCNT(int minPixelStability = 15,
bool useHistory = true,
int maxPixelStability = 15*60,
bool isParallel = true);
enum LSBPCameraMotionCompensation {
LSBP_CAMERA_MOTION_COMPENSATION_NONE = 0,
LSBP_CAMERA_MOTION_COMPENSATION_LK
};
/** @brief Implementation of the different yet better algorithm which is called GSOC, as it was implemented during GSOC and was not originated from any paper.
This algorithm demonstrates better performance on CDNET 2014 dataset compared to other algorithms in OpenCV.
*/
class CV_EXPORTS_W BackgroundSubtractorGSOC : public BackgroundSubtractor
{
public:
// BackgroundSubtractor interface
CV_WRAP virtual void apply(InputArray image, OutputArray fgmask, double learningRate=-1) CV_OVERRIDE = 0;
CV_WRAP virtual void getBackgroundImage(OutputArray backgroundImage) const CV_OVERRIDE = 0;
};
/** @brief Background Subtraction using Local SVD Binary Pattern. More details about the algorithm can be found at @cite LGuo2016
*/
class CV_EXPORTS_W BackgroundSubtractorLSBP : public BackgroundSubtractor
{
public:
// BackgroundSubtractor interface
CV_WRAP virtual void apply(InputArray image, OutputArray fgmask, double learningRate=-1) CV_OVERRIDE = 0;
CV_WRAP virtual void getBackgroundImage(OutputArray backgroundImage) const CV_OVERRIDE = 0;
};
/** @brief This is for calculation of the LSBP descriptors.
*/
class CV_EXPORTS_W BackgroundSubtractorLSBPDesc
{
public:
static void calcLocalSVDValues(OutputArray localSVDValues, const Mat& frame);
static void computeFromLocalSVDValues(OutputArray desc, const Mat& localSVDValues, const Point2i* LSBPSamplePoints);
static void compute(OutputArray desc, const Mat& frame, const Point2i* LSBPSamplePoints);
};
/** @brief Creates an instance of BackgroundSubtractorGSOC algorithm.
Implementation of the different yet better algorithm which is called GSOC, as it was implemented during GSOC and was not originated from any paper.
@param mc Whether to use camera motion compensation.
@param nSamples Number of samples to maintain at each point of the frame.
@param replaceRate Probability of replacing the old sample - how fast the model will update itself.
@param propagationRate Probability of propagating to neighbors.
@param hitsThreshold How many positives the sample must get before it will be considered as a possible replacement.
@param alpha Scale coefficient for threshold.
@param beta Bias coefficient for threshold.
@param blinkingSupressionDecay Blinking supression decay factor.
@param blinkingSupressionMultiplier Blinking supression multiplier.
@param noiseRemovalThresholdFacBG Strength of the noise removal for background points.
@param noiseRemovalThresholdFacFG Strength of the noise removal for foreground points.
*/
CV_EXPORTS_W Ptr<BackgroundSubtractorGSOC> createBackgroundSubtractorGSOC(int mc = LSBP_CAMERA_MOTION_COMPENSATION_NONE, int nSamples = 20, float replaceRate = 0.003f, float propagationRate = 0.01f, int hitsThreshold = 32, float alpha = 0.01f, float beta = 0.0022f, float blinkingSupressionDecay = 0.1f, float blinkingSupressionMultiplier = 0.1f, float noiseRemovalThresholdFacBG = 0.0004f, float noiseRemovalThresholdFacFG = 0.0008f);
/** @brief Creates an instance of BackgroundSubtractorLSBP algorithm.
Background Subtraction using Local SVD Binary Pattern. More details about the algorithm can be found at @cite LGuo2016
@param mc Whether to use camera motion compensation.
@param nSamples Number of samples to maintain at each point of the frame.
@param LSBPRadius LSBP descriptor radius.
@param Tlower Lower bound for T-values. See @cite LGuo2016 for details.
@param Tupper Upper bound for T-values. See @cite LGuo2016 for details.
@param Tinc Increase step for T-values. See @cite LGuo2016 for details.
@param Tdec Decrease step for T-values. See @cite LGuo2016 for details.
@param Rscale Scale coefficient for threshold values.
@param Rincdec Increase/Decrease step for threshold values.
@param noiseRemovalThresholdFacBG Strength of the noise removal for background points.
@param noiseRemovalThresholdFacFG Strength of the noise removal for foreground points.
@param LSBPthreshold Threshold for LSBP binary string.
@param minCount Minimal number of matches for sample to be considered as foreground.
*/
CV_EXPORTS_W Ptr<BackgroundSubtractorLSBP> createBackgroundSubtractorLSBP(int mc = LSBP_CAMERA_MOTION_COMPENSATION_NONE, int nSamples = 20, int LSBPRadius = 16, float Tlower = 2.0f, float Tupper = 32.0f, float Tinc = 1.0f, float Tdec = 0.05f, float Rscale = 10.0f, float Rincdec = 0.005f, float noiseRemovalThresholdFacBG = 0.0004f, float noiseRemovalThresholdFacFG = 0.0008f, int LSBPthreshold = 8, int minCount = 2);
/** @brief Synthetic frame sequence generator for testing background subtraction algorithms.
It will generate the moving object on top of the background.
It will apply some distortion to the background to make the test more complex.
*/
class CV_EXPORTS_W SyntheticSequenceGenerator : public Algorithm
{
private:
const double amplitude;
const double wavelength;
const double wavespeed;
const double objspeed;
unsigned timeStep;
Point2d pos;
Point2d dir;
Mat background;
Mat object;
RNG rng;
public:
/** @brief Creates an instance of SyntheticSequenceGenerator.
@param background Background image for object.
@param object Object image which will move slowly over the background.
@param amplitude Amplitude of wave distortion applied to background.
@param wavelength Length of waves in distortion applied to background.
@param wavespeed How fast waves will move.
@param objspeed How fast object will fly over background.
*/
CV_WRAP SyntheticSequenceGenerator(InputArray background, InputArray object, double amplitude, double wavelength, double wavespeed, double objspeed);
/** @brief Obtain the next frame in the sequence.
@param frame Output frame.
@param gtMask Output ground-truth (reference) segmentation mask object/background.
*/
CV_WRAP void getNextFrame(OutputArray frame, OutputArray gtMask);
};
/** @brief Creates an instance of SyntheticSequenceGenerator.
@param background Background image for object.
@param object Object image which will move slowly over the background.
@param amplitude Amplitude of wave distortion applied to background.
@param wavelength Length of waves in distortion applied to background.
@param wavespeed How fast waves will move.
@param objspeed How fast object will fly over background.
*/
CV_EXPORTS_W Ptr<SyntheticSequenceGenerator> createSyntheticSequenceGenerator(InputArray background, InputArray object, double amplitude = 2.0, double wavelength = 20.0, double wavespeed = 0.2, double objspeed = 6.0);
//! @}
}
}
#endif
#endif

View File

@@ -0,0 +1,60 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef __OPENCV_BIOINSPIRED_HPP__
#define __OPENCV_BIOINSPIRED_HPP__
#include "opencv2/core.hpp"
#include "opencv2/bioinspired/retina.hpp"
#include "opencv2/bioinspired/retinafasttonemapping.hpp"
#include "opencv2/bioinspired/transientareassegmentationmodule.hpp"
/** @defgroup bioinspired Biologically inspired vision models and derivated tools
The module provides biological visual systems models (human visual system and others). It also
provides derivated objects that take advantage of those bio-inspired models.
@ref bioinspired_retina
*/
#endif

View File

@@ -0,0 +1,48 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifdef __OPENCV_BUILD
#error this is a compatibility header which should not be used inside the OpenCV library
#endif
#include "opencv2/bioinspired.hpp"

View File

@@ -0,0 +1,454 @@
/*#******************************************************************************
** IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
**
** By downloading, copying, installing or using the software you agree to this license.
** If you do not agree to this license, do not download, install,
** copy or use the software.
**
**
** bioinspired : interfaces allowing OpenCV users to integrate Human Vision System models. Presented models originate from Jeanny Herault's original research and have been reused and adapted by the author&collaborators for computed vision applications since his thesis with Alice Caplier at Gipsa-Lab.
** Use: extract still images & image sequences features, from contours details to motion spatio-temporal features, etc. for high level visual scene analysis. Also contribute to image enhancement/compression such as tone mapping.
**
** Maintainers : Listic lab (code author current affiliation & applications) and Gipsa Lab (original research origins & applications)
**
** Creation - enhancement process 2007-2015
** Author: Alexandre Benoit (benoit.alexandre.vision@gmail.com), LISTIC lab, Annecy le vieux, France
**
** Theses algorithm have been developped by Alexandre BENOIT since his thesis with Alice Caplier at Gipsa-Lab (www.gipsa-lab.inpg.fr) and the research he pursues at LISTIC Lab (www.listic.univ-savoie.fr).
** Refer to the following research paper for more information:
** Benoit A., Caplier A., Durette B., Herault, J., "USING HUMAN VISUAL SYSTEM MODELING FOR BIO-INSPIRED LOW LEVEL IMAGE PROCESSING", Elsevier, Computer Vision and Image Understanding 114 (2010), pp. 758-773, DOI: http://dx.doi.org/10.1016/j.cviu.2010.01.011
** This work have been carried out thanks to Jeanny Herault who's research and great discussions are the basis of all this work, please take a look at his book:
** Vision: Images, Signals and Neural Networks: Models of Neural Processing in Visual Perception (Progress in Neural Processing),By: Jeanny Herault, ISBN: 9814273686. WAPI (Tower ID): 113266891.
**
** The retina filter includes the research contributions of phd/research collegues from which code has been redrawn by the author :
** _take a look at the retinacolor.hpp module to discover Brice Chaix de Lavarene color mosaicing/demosaicing and the reference paper:
** ====> B. Chaix de Lavarene, D. Alleysson, B. Durette, J. Herault (2007). "Efficient demosaicing through recursive filtering", IEEE International Conference on Image Processing ICIP 2007
** _take a look at imagelogpolprojection.hpp to discover retina spatial log sampling which originates from Barthelemy Durette phd with Jeanny Herault. A Retina / V1 cortex projection is also proposed and originates from Jeanny's discussions.
** ====> more informations in the above cited Jeanny Heraults's book.
**
** License Agreement
** For Open Source Computer Vision Library
**
** Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
** Copyright (C) 2008-2011, Willow Garage Inc., all rights reserved.
**
** For Human Visual System tools (bioinspired)
** Copyright (C) 2007-2015, LISTIC Lab, Annecy le Vieux and GIPSA Lab, Grenoble, France, all rights reserved.
**
** Third party copyrights are property of their respective owners.
**
** Redistribution and use in source and binary forms, with or without modification,
** are permitted provided that the following conditions are met:
**
** * Redistributions of source code must retain the above copyright notice,
** this list of conditions and the following disclaimer.
**
** * Redistributions in binary form must reproduce the above copyright notice,
** this list of conditions and the following disclaimer in the documentation
** and/or other materials provided with the distribution.
**
** * The name of the copyright holders may not be used to endorse or promote products
** derived from this software without specific prior written permission.
**
** This software is provided by the copyright holders and contributors "as is" and
** any express or implied warranties, including, but not limited to, the implied
** warranties of merchantability and fitness for a particular purpose are disclaimed.
** In no event shall the Intel Corporation or contributors be liable for any direct,
** indirect, incidental, special, exemplary, or consequential damages
** (including, but not limited to, procurement of substitute goods or services;
** loss of use, data, or profits; or business interruption) however caused
** and on any theory of liability, whether in contract, strict liability,
** or tort (including negligence or otherwise) arising in any way out of
** the use of this software, even if advised of the possibility of such damage.
*******************************************************************************/
#ifndef __OPENCV_BIOINSPIRED_RETINA_HPP__
#define __OPENCV_BIOINSPIRED_RETINA_HPP__
/**
@file
@date Jul 19, 2011
@author Alexandre Benoit
*/
#include "opencv2/core.hpp" // for all OpenCV core functionalities access, including cv::Exception support
namespace cv{
namespace bioinspired{
//! @addtogroup bioinspired
//! @{
enum {
RETINA_COLOR_RANDOM, //!< each pixel position is either R, G or B in a random choice
RETINA_COLOR_DIAGONAL,//!< color sampling is RGBRGBRGB..., line 2 BRGBRGBRG..., line 3, GBRGBRGBR...
RETINA_COLOR_BAYER//!< standard bayer sampling
};
/** @brief retina model parameters structure
For better clarity, check explenations on the comments of methods : setupOPLandIPLParvoChannel and setupIPLMagnoChannel
Here is the default configuration file of the retina module. It gives results such as the first
retina output shown on the top of this page.
@code{xml}
<?xml version="1.0"?>
<opencv_storage>
<OPLandIPLparvo>
<colorMode>1</colorMode>
<normaliseOutput>1</normaliseOutput>
<photoreceptorsLocalAdaptationSensitivity>7.5e-01</photoreceptorsLocalAdaptationSensitivity>
<photoreceptorsTemporalConstant>9.0e-01</photoreceptorsTemporalConstant>
<photoreceptorsSpatialConstant>5.3e-01</photoreceptorsSpatialConstant>
<horizontalCellsGain>0.01</horizontalCellsGain>
<hcellsTemporalConstant>0.5</hcellsTemporalConstant>
<hcellsSpatialConstant>7.</hcellsSpatialConstant>
<ganglionCellsSensitivity>7.5e-01</ganglionCellsSensitivity></OPLandIPLparvo>
<IPLmagno>
<normaliseOutput>1</normaliseOutput>
<parasolCells_beta>0.</parasolCells_beta>
<parasolCells_tau>0.</parasolCells_tau>
<parasolCells_k>7.</parasolCells_k>
<amacrinCellsTemporalCutFrequency>2.0e+00</amacrinCellsTemporalCutFrequency>
<V0CompressionParameter>9.5e-01</V0CompressionParameter>
<localAdaptintegration_tau>0.</localAdaptintegration_tau>
<localAdaptintegration_k>7.</localAdaptintegration_k></IPLmagno>
</opencv_storage>
@endcode
Here is the 'realistic" setup used to obtain the second retina output shown on the top of this page.
@code{xml}
<?xml version="1.0"?>
<opencv_storage>
<OPLandIPLparvo>
<colorMode>1</colorMode>
<normaliseOutput>1</normaliseOutput>
<photoreceptorsLocalAdaptationSensitivity>8.9e-01</photoreceptorsLocalAdaptationSensitivity>
<photoreceptorsTemporalConstant>9.0e-01</photoreceptorsTemporalConstant>
<photoreceptorsSpatialConstant>5.3e-01</photoreceptorsSpatialConstant>
<horizontalCellsGain>0.3</horizontalCellsGain>
<hcellsTemporalConstant>0.5</hcellsTemporalConstant>
<hcellsSpatialConstant>7.</hcellsSpatialConstant>
<ganglionCellsSensitivity>8.9e-01</ganglionCellsSensitivity></OPLandIPLparvo>
<IPLmagno>
<normaliseOutput>1</normaliseOutput>
<parasolCells_beta>0.</parasolCells_beta>
<parasolCells_tau>0.</parasolCells_tau>
<parasolCells_k>7.</parasolCells_k>
<amacrinCellsTemporalCutFrequency>2.0e+00</amacrinCellsTemporalCutFrequency>
<V0CompressionParameter>9.5e-01</V0CompressionParameter>
<localAdaptintegration_tau>0.</localAdaptintegration_tau>
<localAdaptintegration_k>7.</localAdaptintegration_k></IPLmagno>
</opencv_storage>
@endcode
*/
struct RetinaParameters{
//! Outer Plexiform Layer (OPL) and Inner Plexiform Layer Parvocellular (IplParvo) parameters
struct OPLandIplParvoParameters{
OPLandIplParvoParameters():colorMode(true),
normaliseOutput(true),
photoreceptorsLocalAdaptationSensitivity(0.75f),
photoreceptorsTemporalConstant(0.9f),
photoreceptorsSpatialConstant(0.53f),
horizontalCellsGain(0.01f),
hcellsTemporalConstant(0.5f),
hcellsSpatialConstant(7.f),
ganglionCellsSensitivity(0.75f) { } // default setup
bool colorMode, normaliseOutput;
float photoreceptorsLocalAdaptationSensitivity, photoreceptorsTemporalConstant, photoreceptorsSpatialConstant, horizontalCellsGain, hcellsTemporalConstant, hcellsSpatialConstant, ganglionCellsSensitivity;
};
//! Inner Plexiform Layer Magnocellular channel (IplMagno)
struct IplMagnoParameters{
IplMagnoParameters():
normaliseOutput(true),
parasolCells_beta(0.f),
parasolCells_tau(0.f),
parasolCells_k(7.f),
amacrinCellsTemporalCutFrequency(2.0f),
V0CompressionParameter(0.95f),
localAdaptintegration_tau(0.f),
localAdaptintegration_k(7.f) { } // default setup
bool normaliseOutput;
float parasolCells_beta, parasolCells_tau, parasolCells_k, amacrinCellsTemporalCutFrequency, V0CompressionParameter, localAdaptintegration_tau, localAdaptintegration_k;
};
OPLandIplParvoParameters OPLandIplParvo;
IplMagnoParameters IplMagno;
};
/** @brief class which allows the Gipsa/Listic Labs model to be used with OpenCV.
This retina model allows spatio-temporal image processing (applied on still images, video sequences).
As a summary, these are the retina model properties:
- It applies a spectral whithening (mid-frequency details enhancement)
- high frequency spatio-temporal noise reduction
- low frequency luminance to be reduced (luminance range compression)
- local logarithmic luminance compression allows details to be enhanced in low light conditions
USE : this model can be used basically for spatio-temporal video effects but also for :
_using the getParvo method output matrix : texture analysiswith enhanced signal to noise ratio and enhanced details robust against input images luminance ranges
_using the getMagno method output matrix : motion analysis also with the previously cited properties
for more information, reer to the following papers :
Benoit A., Caplier A., Durette B., Herault, J., "USING HUMAN VISUAL SYSTEM MODELING FOR BIO-INSPIRED LOW LEVEL IMAGE PROCESSING", Elsevier, Computer Vision and Image Understanding 114 (2010), pp. 758-773, DOI: http://dx.doi.org/10.1016/j.cviu.2010.01.011
Vision: Images, Signals and Neural Networks: Models of Neural Processing in Visual Perception (Progress in Neural Processing),By: Jeanny Herault, ISBN: 9814273686. WAPI (Tower ID): 113266891.
The retina filter includes the research contributions of phd/research collegues from which code has been redrawn by the author :
take a look at the retinacolor.hpp module to discover Brice Chaix de Lavarene color mosaicing/demosaicing and the reference paper:
B. Chaix de Lavarene, D. Alleysson, B. Durette, J. Herault (2007). "Efficient demosaicing through recursive filtering", IEEE International Conference on Image Processing ICIP 2007
take a look at imagelogpolprojection.hpp to discover retina spatial log sampling which originates from Barthelemy Durette phd with Jeanny Herault. A Retina / V1 cortex projection is also proposed and originates from Jeanny's discussions.
more informations in the above cited Jeanny Heraults's book.
*/
class CV_EXPORTS_W Retina : public Algorithm {
public:
/** @brief Retreive retina input buffer size
@return the retina input buffer size
*/
CV_WRAP virtual Size getInputSize()=0;
/** @brief Retreive retina output buffer size that can be different from the input if a spatial log
transformation is applied
@return the retina output buffer size
*/
CV_WRAP virtual Size getOutputSize()=0;
/** @brief Try to open an XML retina parameters file to adjust current retina instance setup
- if the xml file does not exist, then default setup is applied
- warning, Exceptions are thrown if read XML file is not valid
@param retinaParameterFile the parameters filename
@param applyDefaultSetupOnFailure set to true if an error must be thrown on error
You can retrieve the current parameters structure using the method Retina::getParameters and update
it before running method Retina::setup.
*/
CV_WRAP virtual void setup(String retinaParameterFile="", const bool applyDefaultSetupOnFailure=true)=0;
/** @overload
@param fs the open Filestorage which contains retina parameters
@param applyDefaultSetupOnFailure set to true if an error must be thrown on error
*/
virtual void setup(cv::FileStorage &fs, const bool applyDefaultSetupOnFailure=true)=0;
/** @overload
@param newParameters a parameters structures updated with the new target configuration.
*/
virtual void setup(RetinaParameters newParameters)=0;
/**
@return the current parameters setup
*/
virtual RetinaParameters getParameters()=0;
/** @brief Outputs a string showing the used parameters setup
@return a string which contains formated parameters information
*/
CV_WRAP virtual const String printSetup()=0;
/** @brief Write xml/yml formated parameters information
@param fs the filename of the xml file that will be open and writen with formatted parameters
information
*/
CV_WRAP virtual void write( String fs ) const=0;
/** @overload */
virtual void write( FileStorage& fs ) const CV_OVERRIDE = 0;
/** @brief Setup the OPL and IPL parvo channels (see biologocal model)
OPL is referred as Outer Plexiform Layer of the retina, it allows the spatio-temporal filtering
which withens the spectrum and reduces spatio-temporal noise while attenuating global luminance
(low frequency energy) IPL parvo is the OPL next processing stage, it refers to a part of the
Inner Plexiform layer of the retina, it allows high contours sensitivity in foveal vision. See
reference papers for more informations.
for more informations, please have a look at the paper Benoit A., Caplier A., Durette B., Herault, J., "USING HUMAN VISUAL SYSTEM MODELING FOR BIO-INSPIRED LOW LEVEL IMAGE PROCESSING", Elsevier, Computer Vision and Image Understanding 114 (2010), pp. 758-773, DOI: http://dx.doi.org/10.1016/j.cviu.2010.01.011
@param colorMode specifies if (true) color is processed of not (false) to then processing gray
level image
@param normaliseOutput specifies if (true) output is rescaled between 0 and 255 of not (false)
@param photoreceptorsLocalAdaptationSensitivity the photoreceptors sensitivity renage is 0-1
(more log compression effect when value increases)
@param photoreceptorsTemporalConstant the time constant of the first order low pass filter of
the photoreceptors, use it to cut high temporal frequencies (noise or fast motion), unit is
frames, typical value is 1 frame
@param photoreceptorsSpatialConstant the spatial constant of the first order low pass filter of
the photoreceptors, use it to cut high spatial frequencies (noise or thick contours), unit is
pixels, typical value is 1 pixel
@param horizontalCellsGain gain of the horizontal cells network, if 0, then the mean value of
the output is zero, if the parameter is near 1, then, the luminance is not filtered and is
still reachable at the output, typicall value is 0
@param HcellsTemporalConstant the time constant of the first order low pass filter of the
horizontal cells, use it to cut low temporal frequencies (local luminance variations), unit is
frames, typical value is 1 frame, as the photoreceptors
@param HcellsSpatialConstant the spatial constant of the first order low pass filter of the
horizontal cells, use it to cut low spatial frequencies (local luminance), unit is pixels,
typical value is 5 pixel, this value is also used for local contrast computing when computing
the local contrast adaptation at the ganglion cells level (Inner Plexiform Layer parvocellular
channel model)
@param ganglionCellsSensitivity the compression strengh of the ganglion cells local adaptation
output, set a value between 0.6 and 1 for best results, a high value increases more the low
value sensitivity... and the output saturates faster, recommended value: 0.7
*/
CV_WRAP virtual void setupOPLandIPLParvoChannel(const bool colorMode=true, const bool normaliseOutput = true, const float photoreceptorsLocalAdaptationSensitivity=0.7f, const float photoreceptorsTemporalConstant=0.5f, const float photoreceptorsSpatialConstant=0.53f, const float horizontalCellsGain=0.f, const float HcellsTemporalConstant=1.f, const float HcellsSpatialConstant=7.f, const float ganglionCellsSensitivity=0.7f)=0;
/** @brief Set parameters values for the Inner Plexiform Layer (IPL) magnocellular channel
this channel processes signals output from OPL processing stage in peripheral vision, it allows
motion information enhancement. It is decorrelated from the details channel. See reference
papers for more details.
@param normaliseOutput specifies if (true) output is rescaled between 0 and 255 of not (false)
@param parasolCells_beta the low pass filter gain used for local contrast adaptation at the
IPL level of the retina (for ganglion cells local adaptation), typical value is 0
@param parasolCells_tau the low pass filter time constant used for local contrast adaptation
at the IPL level of the retina (for ganglion cells local adaptation), unit is frame, typical
value is 0 (immediate response)
@param parasolCells_k the low pass filter spatial constant used for local contrast adaptation
at the IPL level of the retina (for ganglion cells local adaptation), unit is pixels, typical
value is 5
@param amacrinCellsTemporalCutFrequency the time constant of the first order high pass fiter of
the magnocellular way (motion information channel), unit is frames, typical value is 1.2
@param V0CompressionParameter the compression strengh of the ganglion cells local adaptation
output, set a value between 0.6 and 1 for best results, a high value increases more the low
value sensitivity... and the output saturates faster, recommended value: 0.95
@param localAdaptintegration_tau specifies the temporal constant of the low pas filter
involved in the computation of the local "motion mean" for the local adaptation computation
@param localAdaptintegration_k specifies the spatial constant of the low pas filter involved
in the computation of the local "motion mean" for the local adaptation computation
*/
CV_WRAP virtual void setupIPLMagnoChannel(const bool normaliseOutput = true, const float parasolCells_beta=0.f, const float parasolCells_tau=0.f, const float parasolCells_k=7.f, const float amacrinCellsTemporalCutFrequency=1.2f, const float V0CompressionParameter=0.95f, const float localAdaptintegration_tau=0.f, const float localAdaptintegration_k=7.f)=0;
/** @brief Method which allows retina to be applied on an input image,
after run, encapsulated retina module is ready to deliver its outputs using dedicated
acccessors, see getParvo and getMagno methods
@param inputImage the input Mat image to be processed, can be gray level or BGR coded in any
format (from 8bit to 16bits)
*/
CV_WRAP virtual void run(InputArray inputImage)=0;
/** @brief Method which processes an image in the aim to correct its luminance correct
backlight problems, enhance details in shadows.
This method is designed to perform High Dynamic Range image tone mapping (compress \>8bit/pixel
images to 8bit/pixel). This is a simplified version of the Retina Parvocellular model
(simplified version of the run/getParvo methods call) since it does not include the
spatio-temporal filter modelling the Outer Plexiform Layer of the retina that performs spectral
whitening and many other stuff. However, it works great for tone mapping and in a faster way.
Check the demos and experiments section to see examples and the way to perform tone mapping
using the original retina model and the method.
@param inputImage the input image to process (should be coded in float format : CV_32F,
CV_32FC1, CV_32F_C3, CV_32F_C4, the 4th channel won't be considered).
@param outputToneMappedImage the output 8bit/channel tone mapped image (CV_8U or CV_8UC3 format).
*/
CV_WRAP virtual void applyFastToneMapping(InputArray inputImage, OutputArray outputToneMappedImage)=0;
/** @brief Accessor of the details channel of the retina (models foveal vision).
Warning, getParvoRAW methods return buffers that are not rescaled within range [0;255] while
the non RAW method allows a normalized matrix to be retrieved.
@param retinaOutput_parvo the output buffer (reallocated if necessary), format can be :
- a Mat, this output is rescaled for standard 8bits image processing use in OpenCV
- RAW methods actually return a 1D matrix (encoding is R1, R2, ... Rn, G1, G2, ..., Gn, B1,
B2, ...Bn), this output is the original retina filter model output, without any
quantification or rescaling.
@see getParvoRAW
*/
CV_WRAP virtual void getParvo(OutputArray retinaOutput_parvo)=0;
/** @brief Accessor of the details channel of the retina (models foveal vision).
@see getParvo
*/
CV_WRAP virtual void getParvoRAW(OutputArray retinaOutput_parvo)=0;
/** @brief Accessor of the motion channel of the retina (models peripheral vision).
Warning, getMagnoRAW methods return buffers that are not rescaled within range [0;255] while
the non RAW method allows a normalized matrix to be retrieved.
@param retinaOutput_magno the output buffer (reallocated if necessary), format can be :
- a Mat, this output is rescaled for standard 8bits image processing use in OpenCV
- RAW methods actually return a 1D matrix (encoding is M1, M2,... Mn), this output is the
original retina filter model output, without any quantification or rescaling.
@see getMagnoRAW
*/
CV_WRAP virtual void getMagno(OutputArray retinaOutput_magno)=0;
/** @brief Accessor of the motion channel of the retina (models peripheral vision).
@see getMagno
*/
CV_WRAP virtual void getMagnoRAW(OutputArray retinaOutput_magno)=0;
/** @overload */
CV_WRAP virtual const Mat getMagnoRAW() const=0;
/** @overload */
CV_WRAP virtual const Mat getParvoRAW() const=0;
/** @brief Activate color saturation as the final step of the color demultiplexing process -\> this
saturation is a sigmoide function applied to each channel of the demultiplexed image.
@param saturateColors boolean that activates color saturation (if true) or desactivate (if false)
@param colorSaturationValue the saturation factor : a simple factor applied on the chrominance
buffers
*/
CV_WRAP virtual void setColorSaturation(const bool saturateColors=true, const float colorSaturationValue=4.0f)=0;
/** @brief Clears all retina buffers
(equivalent to opening the eyes after a long period of eye close ;o) whatchout the temporal
transition occuring just after this method call.
*/
CV_WRAP virtual void clearBuffers()=0;
/** @brief Activate/desactivate the Magnocellular pathway processing (motion information extraction), by
default, it is activated
@param activate true if Magnocellular output should be activated, false if not... if activated,
the Magnocellular output can be retrieved using the **getMagno** methods
*/
CV_WRAP virtual void activateMovingContoursProcessing(const bool activate)=0;
/** @brief Activate/desactivate the Parvocellular pathway processing (contours information extraction), by
default, it is activated
@param activate true if Parvocellular (contours information extraction) output should be
activated, false if not... if activated, the Parvocellular output can be retrieved using the
Retina::getParvo methods
*/
CV_WRAP virtual void activateContoursProcessing(const bool activate)=0;
/** @overload */
CV_WRAP static Ptr<Retina> create(Size inputSize);
/** @brief Constructors from standardized interfaces : retreive a smart pointer to a Retina instance
@param inputSize the input frame size
@param colorMode the chosen processing mode : with or without color processing
@param colorSamplingMethod specifies which kind of color sampling will be used :
- cv::bioinspired::RETINA_COLOR_RANDOM: each pixel position is either R, G or B in a random choice
- cv::bioinspired::RETINA_COLOR_DIAGONAL: color sampling is RGBRGBRGB..., line 2 BRGBRGBRG..., line 3, GBRGBRGBR...
- cv::bioinspired::RETINA_COLOR_BAYER: standard bayer sampling
@param useRetinaLogSampling activate retina log sampling, if true, the 2 following parameters can
be used
@param reductionFactor only usefull if param useRetinaLogSampling=true, specifies the reduction
factor of the output frame (as the center (fovea) is high resolution and corners can be
underscaled, then a reduction of the output is allowed without precision leak
@param samplingStrength only usefull if param useRetinaLogSampling=true, specifies the strength of
the log scale that is applied
*/
CV_WRAP static Ptr<Retina> create(Size inputSize, const bool colorMode,
int colorSamplingMethod=RETINA_COLOR_BAYER,
const bool useRetinaLogSampling=false,
const float reductionFactor=1.0f, const float samplingStrength=10.0f);
};
//! @}
}
}
#endif /* __OPENCV_BIOINSPIRED_RETINA_HPP__ */

View File

@@ -0,0 +1,138 @@
/*#******************************************************************************
** IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
**
** By downloading, copying, installing or using the software you agree to this license.
** If you do not agree to this license, do not download, install,
** copy or use the software.
**
**
** bioinspired : interfaces allowing OpenCV users to integrate Human Vision System models. Presented models originate from Jeanny Herault's original research and have been reused and adapted by the author&collaborators for computed vision applications since his thesis with Alice Caplier at Gipsa-Lab.
**
** Maintainers : Listic lab (code author current affiliation & applications) and Gipsa Lab (original research origins & applications)
**
** Creation - enhancement process 2007-2013
** Author: Alexandre Benoit (benoit.alexandre.vision@gmail.com), LISTIC lab, Annecy le vieux, France
**
** Theses algorithm have been developped by Alexandre BENOIT since his thesis with Alice Caplier at Gipsa-Lab (www.gipsa-lab.inpg.fr) and the research he pursues at LISTIC Lab (www.listic.univ-savoie.fr).
** Refer to the following research paper for more information:
** Benoit A., Caplier A., Durette B., Herault, J., "USING HUMAN VISUAL SYSTEM MODELING FOR BIO-INSPIRED LOW LEVEL IMAGE PROCESSING", Elsevier, Computer Vision and Image Understanding 114 (2010), pp. 758-773, DOI: http://dx.doi.org/10.1016/j.cviu.2010.01.011
** This work have been carried out thanks to Jeanny Herault who's research and great discussions are the basis of all this work, please take a look at his book:
** Vision: Images, Signals and Neural Networks: Models of Neural Processing in Visual Perception (Progress in Neural Processing),By: Jeanny Herault, ISBN: 9814273686. WAPI (Tower ID): 113266891.
**
**
**
**
**
** This class is based on image processing tools of the author and already used within the Retina class (this is the same code as method retina::applyFastToneMapping, but in an independent class, it is ligth from a memory requirement point of view). It implements an adaptation of the efficient tone mapping algorithm propose by David Alleyson, Sabine Susstruck and Laurence Meylan's work, please cite:
** -> Meylan L., Alleysson D., and Susstrunk S., A Model of Retinal Local Adaptation for the Tone Mapping of Color Filter Array Images, Journal of Optical Society of America, A, Vol. 24, N 9, September, 1st, 2007, pp. 2807-2816
**
**
** License Agreement
** For Open Source Computer Vision Library
**
** Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
** Copyright (C) 2008-2011, Willow Garage Inc., all rights reserved.
**
** For Human Visual System tools (bioinspired)
** Copyright (C) 2007-2011, LISTIC Lab, Annecy le Vieux and GIPSA Lab, Grenoble, France, all rights reserved.
**
** Third party copyrights are property of their respective owners.
**
** Redistribution and use in source and binary forms, with or without modification,
** are permitted provided that the following conditions are met:
**
** * Redistributions of source code must retain the above copyright notice,
** this list of conditions and the following disclaimer.
**
** * Redistributions in binary form must reproduce the above copyright notice,
** this list of conditions and the following disclaimer in the documentation
** and/or other materials provided with the distribution.
**
** * The name of the copyright holders may not be used to endorse or promote products
** derived from this software without specific prior written permission.
**
** This software is provided by the copyright holders and contributors "as is" and
** any express or implied warranties, including, but not limited to, the implied
** warranties of merchantability and fitness for a particular purpose are disclaimed.
** In no event shall the Intel Corporation or contributors be liable for any direct,
** indirect, incidental, special, exemplary, or consequential damages
** (including, but not limited to, procurement of substitute goods or services;
** loss of use, data, or profits; or business interruption) however caused
** and on any theory of liability, whether in contract, strict liability,
** or tort (including negligence or otherwise) arising in any way out of
** the use of this software, even if advised of the possibility of such damage.
*******************************************************************************/
#ifndef __OPENCV_BIOINSPIRED_RETINAFASTTONEMAPPING_HPP__
#define __OPENCV_BIOINSPIRED_RETINAFASTTONEMAPPING_HPP__
/**
@file
@date May 26, 2013
@author Alexandre Benoit
*/
#include "opencv2/core.hpp" // for all OpenCV core functionalities access, including cv::Exception support
namespace cv{
namespace bioinspired{
//! @addtogroup bioinspired
//! @{
/** @brief a wrapper class which allows the tone mapping algorithm of Meylan&al(2007) to be used with OpenCV.
This algorithm is already implemented in thre Retina class (retina::applyFastToneMapping) but used it does not require all the retina model to be allocated. This allows a light memory use for low memory devices (smartphones, etc.
As a summary, these are the model properties:
- 2 stages of local luminance adaptation with a different local neighborhood for each.
- first stage models the retina photorecetors local luminance adaptation
- second stage models th ganglion cells local information adaptation
- compared to the initial publication, this class uses spatio-temporal low pass filters instead of spatial only filters.
this can help noise robustness and temporal stability for video sequence use cases.
for more information, read to the following papers :
Meylan L., Alleysson D., and Susstrunk S., A Model of Retinal Local Adaptation for the Tone Mapping of Color Filter Array Images, Journal of Optical Society of America, A, Vol. 24, N 9, September, 1st, 2007, pp. 2807-2816Benoit A., Caplier A., Durette B., Herault, J., "USING HUMAN VISUAL SYSTEM MODELING FOR BIO-INSPIRED LOW LEVEL IMAGE PROCESSING", Elsevier, Computer Vision and Image Understanding 114 (2010), pp. 758-773, DOI: http://dx.doi.org/10.1016/j.cviu.2010.01.011
regarding spatio-temporal filter and the bigger retina model :
Vision: Images, Signals and Neural Networks: Models of Neural Processing in Visual Perception (Progress in Neural Processing),By: Jeanny Herault, ISBN: 9814273686. WAPI (Tower ID): 113266891.
*/
class CV_EXPORTS_W RetinaFastToneMapping : public Algorithm
{
public:
/** @brief applies a luminance correction (initially High Dynamic Range (HDR) tone mapping)
using only the 2 local adaptation stages of the retina parvocellular channel : photoreceptors
level and ganlion cells level. Spatio temporal filtering is applied but limited to temporal
smoothing and eventually high frequencies attenuation. This is a lighter method than the one
available using the regular retina::run method. It is then faster but it does not include
complete temporal filtering nor retina spectral whitening. Then, it can have a more limited
effect on images with a very high dynamic range. This is an adptation of the original still
image HDR tone mapping algorithm of David Alleyson, Sabine Susstruck and Laurence Meylan's
work, please cite: -> Meylan L., Alleysson D., and Susstrunk S., A Model of Retinal Local
Adaptation for the Tone Mapping of Color Filter Array Images, Journal of Optical Society of
America, A, Vol. 24, N 9, September, 1st, 2007, pp. 2807-2816
@param inputImage the input image to process RGB or gray levels
@param outputToneMappedImage the output tone mapped image
*/
CV_WRAP virtual void applyFastToneMapping(InputArray inputImage, OutputArray outputToneMappedImage)=0;
/** @brief updates tone mapping behaviors by adjusing the local luminance computation area
@param photoreceptorsNeighborhoodRadius the first stage local adaptation area
@param ganglioncellsNeighborhoodRadius the second stage local adaptation area
@param meanLuminanceModulatorK the factor applied to modulate the meanLuminance information
(default is 1, see reference paper)
*/
CV_WRAP virtual void setup(const float photoreceptorsNeighborhoodRadius=3.f, const float ganglioncellsNeighborhoodRadius=1.f, const float meanLuminanceModulatorK=1.f)=0;
CV_WRAP static Ptr<RetinaFastToneMapping> create(Size inputSize);
};
//! @}
}
}
#endif /* __OPENCV_BIOINSPIRED_RETINAFASTTONEMAPPING_HPP__ */

View File

@@ -0,0 +1,204 @@
/*#******************************************************************************
** IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
**
** By downloading, copying, installing or using the software you agree to this license.
** If you do not agree to this license, do not download, install,
** copy or use the software.
**
**
** bioinspired : interfaces allowing OpenCV users to integrate Human Vision System models.
** TransientAreasSegmentationModule Use: extract areas that present spatio-temporal changes.
** => It should be used at the output of the cv::bioinspired::Retina::getMagnoRAW() output that enhances spatio-temporal changes
**
** Maintainers : Listic lab (code author current affiliation & applications)
**
** Creation - enhancement process 2007-2015
** Author: Alexandre Benoit (benoit.alexandre.vision@gmail.com), LISTIC lab, Annecy le vieux, France
**
** Theses algorithm have been developped by Alexandre BENOIT since his thesis with Alice Caplier at Gipsa-Lab (www.gipsa-lab.inpg.fr) and the research he pursues at LISTIC Lab (www.listic.univ-savoie.fr).
** Refer to the following research paper for more information:
** Strat, S.T.; Benoit, A.; Lambert, P., "Retina enhanced bag of words descriptors for video classification," Signal Processing Conference (EUSIPCO), 2014 Proceedings of the 22nd European , vol., no., pp.1307,1311, 1-5 Sept. 2014 (http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6952461&isnumber=6951911)
** Benoit A., Caplier A., Durette B., Herault, J., "USING HUMAN VISUAL SYSTEM MODELING FOR BIO-INSPIRED LOW LEVEL IMAGE PROCESSING", Elsevier, Computer Vision and Image Understanding 114 (2010), pp. 758-773, DOI: http://dx.doi.org/10.1016/j.cviu.2010.01.011
** This work have been carried out thanks to Jeanny Herault who's research and great discussions are the basis of all this work, please take a look at his book:
** Vision: Images, Signals and Neural Networks: Models of Neural Processing in Visual Perception (Progress in Neural Processing),By: Jeanny Herault, ISBN: 9814273686. WAPI (Tower ID): 113266891.
**
**
** License Agreement
** For Open Source Computer Vision Library
**
** Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
** Copyright (C) 2008-2011, Willow Garage Inc., all rights reserved.
**
** For Human Visual System tools (bioinspired)
** Copyright (C) 2007-2015, LISTIC Lab, Annecy le Vieux and GIPSA Lab, Grenoble, France, all rights reserved.
**
** Third party copyrights are property of their respective owners.
**
** Redistribution and use in source and binary forms, with or without modification,
** are permitted provided that the following conditions are met:
**
** * Redistributions of source code must retain the above copyright notice,
** this list of conditions and the following disclaimer.
**
** * Redistributions in binary form must reproduce the above copyright notice,
** this list of conditions and the following disclaimer in the documentation
** and/or other materials provided with the distribution.
**
** * The name of the copyright holders may not be used to endorse or promote products
** derived from this software without specific prior written permission.
**
** This software is provided by the copyright holders and contributors "as is" and
** any express or implied warranties, including, but not limited to, the implied
** warranties of merchantability and fitness for a particular purpose are disclaimed.
** In no event shall the Intel Corporation or contributors be liable for any direct,
** indirect, incidental, special, exemplary, or consequential damages
** (including, but not limited to, procurement of substitute goods or services;
** loss of use, data, or profits; or business interruption) however caused
** and on any theory of liability, whether in contract, strict liability,
** or tort (including negligence or otherwise) arising in any way out of
** the use of this software, even if advised of the possibility of such damage.
*******************************************************************************/
#ifndef SEGMENTATIONMODULE_HPP_
#define SEGMENTATIONMODULE_HPP_
/**
@file
@date 2007-2013
@author Alexandre BENOIT, benoit.alexandre.vision@gmail.com
*/
#include "opencv2/core.hpp" // for all OpenCV core functionalities access, including cv::Exception support
namespace cv
{
namespace bioinspired
{
//! @addtogroup bioinspired
//! @{
/** @brief parameter structure that stores the transient events detector setup parameters
*/
struct SegmentationParameters{ // CV_EXPORTS_W_MAP to export to python native dictionnaries
// default structure instance construction with default values
SegmentationParameters():
thresholdON(100),
thresholdOFF(100),
localEnergy_temporalConstant(0.5),
localEnergy_spatialConstant(5),
neighborhoodEnergy_temporalConstant(1),
neighborhoodEnergy_spatialConstant(15),
contextEnergy_temporalConstant(1),
contextEnergy_spatialConstant(75){};
// all properties list
float thresholdON;
float thresholdOFF;
//! the time constant of the first order low pass filter, use it to cut high temporal frequencies (noise or fast motion), unit is frames, typical value is 0.5 frame
float localEnergy_temporalConstant;
//! the spatial constant of the first order low pass filter, use it to cut high spatial frequencies (noise or thick contours), unit is pixels, typical value is 5 pixel
float localEnergy_spatialConstant;
//! local neighborhood energy filtering parameters : the aim is to get information about the energy neighborhood to perform a center surround energy analysis
float neighborhoodEnergy_temporalConstant;
float neighborhoodEnergy_spatialConstant;
//! context neighborhood energy filtering parameters : the aim is to get information about the energy on a wide neighborhood area to filtered out local effects
float contextEnergy_temporalConstant;
float contextEnergy_spatialConstant;
};
/** @brief class which provides a transient/moving areas segmentation module
perform a locally adapted segmentation by using the retina magno input data Based on Alexandre
BENOIT thesis: "Le système visuel humain au secours de la vision par ordinateur"
3 spatio temporal filters are used:
- a first one which filters the noise and local variations of the input motion energy
- a second (more powerfull low pass spatial filter) which gives the neighborhood motion energy the
segmentation consists in the comparison of these both outputs, if the local motion energy is higher
to the neighborhood otion energy, then the area is considered as moving and is segmented
- a stronger third low pass filter helps decision by providing a smooth information about the
"motion context" in a wider area
*/
class CV_EXPORTS_W TransientAreasSegmentationModule: public Algorithm
{
public:
/** @brief return the sze of the manage input and output images
*/
CV_WRAP virtual Size getSize()=0;
/** @brief try to open an XML segmentation parameters file to adjust current segmentation instance setup
- if the xml file does not exist, then default setup is applied
- warning, Exceptions are thrown if read XML file is not valid
@param segmentationParameterFile : the parameters filename
@param applyDefaultSetupOnFailure : set to true if an error must be thrown on error
*/
CV_WRAP virtual void setup(String segmentationParameterFile="", const bool applyDefaultSetupOnFailure=true)=0;
/** @brief try to open an XML segmentation parameters file to adjust current segmentation instance setup
- if the xml file does not exist, then default setup is applied
- warning, Exceptions are thrown if read XML file is not valid
@param fs : the open Filestorage which contains segmentation parameters
@param applyDefaultSetupOnFailure : set to true if an error must be thrown on error
*/
virtual void setup(cv::FileStorage &fs, const bool applyDefaultSetupOnFailure=true)=0;
/** @brief try to open an XML segmentation parameters file to adjust current segmentation instance setup
- if the xml file does not exist, then default setup is applied
- warning, Exceptions are thrown if read XML file is not valid
@param newParameters : a parameters structures updated with the new target configuration
*/
virtual void setup(SegmentationParameters newParameters)=0;
/** @brief return the current parameters setup
*/
virtual SegmentationParameters getParameters()=0;
/** @brief parameters setup display method
@return a string which contains formatted parameters information
*/
CV_WRAP virtual const String printSetup()=0;
/** @brief write xml/yml formated parameters information
@param fs : the filename of the xml file that will be open and writen with formatted parameters information
*/
CV_WRAP virtual void write( String fs ) const=0;
/** @brief write xml/yml formated parameters information
@param fs : a cv::Filestorage object ready to be filled
*/
virtual void write( cv::FileStorage& fs ) const CV_OVERRIDE = 0;
/** @brief main processing method, get result using methods getSegmentationPicture()
@param inputToSegment : the image to process, it must match the instance buffer size !
@param channelIndex : the channel to process in case of multichannel images
*/
CV_WRAP virtual void run(InputArray inputToSegment, const int channelIndex=0)=0;
/** @brief access function
return the last segmentation result: a boolean picture which is resampled between 0 and 255 for a display purpose
*/
CV_WRAP virtual void getSegmentationPicture(OutputArray transientAreas)=0;
/** @brief cleans all the buffers of the instance
*/
CV_WRAP virtual void clearAllBuffers()=0;
/** @brief allocator
@param inputSize : size of the images input to segment (output will be the same size)
*/
CV_WRAP static Ptr<TransientAreasSegmentationModule> create(Size inputSize);
};
//! @}
}} // namespaces end : cv and bioinspired
#endif

4253
3rdparty/opencv/inc/opencv2/calib3d.hpp vendored Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,48 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifdef __OPENCV_BUILD
#error this is a compatibility header which should not be used inside the OpenCV library
#endif
#include "opencv2/calib3d.hpp"

View File

@@ -0,0 +1,150 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef OPENCV_CALIB3D_C_H
#define OPENCV_CALIB3D_C_H
#include "opencv2/core/types_c.h"
#ifdef __cplusplus
extern "C" {
#endif
/* Calculates fundamental matrix given a set of corresponding points */
#define CV_FM_7POINT 1
#define CV_FM_8POINT 2
#define CV_LMEDS 4
#define CV_RANSAC 8
#define CV_FM_LMEDS_ONLY CV_LMEDS
#define CV_FM_RANSAC_ONLY CV_RANSAC
#define CV_FM_LMEDS CV_LMEDS
#define CV_FM_RANSAC CV_RANSAC
enum
{
CV_ITERATIVE = 0,
CV_EPNP = 1, // F.Moreno-Noguer, V.Lepetit and P.Fua "EPnP: Efficient Perspective-n-Point Camera Pose Estimation"
CV_P3P = 2, // X.S. Gao, X.-R. Hou, J. Tang, H.-F. Chang; "Complete Solution Classification for the Perspective-Three-Point Problem"
CV_DLS = 3 // Joel A. Hesch and Stergios I. Roumeliotis. "A Direct Least-Squares (DLS) Method for PnP"
};
#define CV_CALIB_CB_ADAPTIVE_THRESH 1
#define CV_CALIB_CB_NORMALIZE_IMAGE 2
#define CV_CALIB_CB_FILTER_QUADS 4
#define CV_CALIB_CB_FAST_CHECK 8
#define CV_CALIB_USE_INTRINSIC_GUESS 1
#define CV_CALIB_FIX_ASPECT_RATIO 2
#define CV_CALIB_FIX_PRINCIPAL_POINT 4
#define CV_CALIB_ZERO_TANGENT_DIST 8
#define CV_CALIB_FIX_FOCAL_LENGTH 16
#define CV_CALIB_FIX_K1 32
#define CV_CALIB_FIX_K2 64
#define CV_CALIB_FIX_K3 128
#define CV_CALIB_FIX_K4 2048
#define CV_CALIB_FIX_K5 4096
#define CV_CALIB_FIX_K6 8192
#define CV_CALIB_RATIONAL_MODEL 16384
#define CV_CALIB_THIN_PRISM_MODEL 32768
#define CV_CALIB_FIX_S1_S2_S3_S4 65536
#define CV_CALIB_TILTED_MODEL 262144
#define CV_CALIB_FIX_TAUX_TAUY 524288
#define CV_CALIB_FIX_TANGENT_DIST 2097152
#define CV_CALIB_NINTRINSIC 18
#define CV_CALIB_FIX_INTRINSIC 256
#define CV_CALIB_SAME_FOCAL_LENGTH 512
#define CV_CALIB_ZERO_DISPARITY 1024
/* stereo correspondence parameters and functions */
#define CV_STEREO_BM_NORMALIZED_RESPONSE 0
#define CV_STEREO_BM_XSOBEL 1
#ifdef __cplusplus
} // extern "C"
//////////////////////////////////////////////////////////////////////////////////////////
class CV_EXPORTS CvLevMarq
{
public:
CvLevMarq();
CvLevMarq( int nparams, int nerrs, CvTermCriteria criteria=
cvTermCriteria(CV_TERMCRIT_EPS+CV_TERMCRIT_ITER,30,DBL_EPSILON),
bool completeSymmFlag=false );
~CvLevMarq();
void init( int nparams, int nerrs, CvTermCriteria criteria=
cvTermCriteria(CV_TERMCRIT_EPS+CV_TERMCRIT_ITER,30,DBL_EPSILON),
bool completeSymmFlag=false );
bool update( const CvMat*& param, CvMat*& J, CvMat*& err );
bool updateAlt( const CvMat*& param, CvMat*& JtJ, CvMat*& JtErr, double*& errNorm );
void clear();
void step();
enum { DONE=0, STARTED=1, CALC_J=2, CHECK_ERR=3 };
cv::Ptr<CvMat> mask;
cv::Ptr<CvMat> prevParam;
cv::Ptr<CvMat> param;
cv::Ptr<CvMat> J;
cv::Ptr<CvMat> err;
cv::Ptr<CvMat> JtJ;
cv::Ptr<CvMat> JtJN;
cv::Ptr<CvMat> JtErr;
cv::Ptr<CvMat> JtJV;
cv::Ptr<CvMat> JtJW;
double prevErrNorm, errNorm;
int lambdaLg10;
CvTermCriteria criteria;
int state;
int iters;
bool completeSymmFlag;
int solveMethod;
};
#endif
#endif /* OPENCV_CALIB3D_C_H */

157
3rdparty/opencv/inc/opencv2/ccalib.hpp vendored Normal file
View File

@@ -0,0 +1,157 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2014, OpenCV Foundation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef __OPENCV_CCALIB_HPP__
#define __OPENCV_CCALIB_HPP__
#include <opencv2/core.hpp>
#include <opencv2/features2d.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/calib3d.hpp>
#include <vector>
/** @defgroup ccalib Custom Calibration Pattern for 3D reconstruction
*/
namespace cv{ namespace ccalib{
//! @addtogroup ccalib
//! @{
class CV_EXPORTS CustomPattern : public Algorithm
{
public:
CustomPattern();
virtual ~CustomPattern();
bool create(InputArray pattern, const Size2f boardSize, OutputArray output = noArray());
bool findPattern(InputArray image, OutputArray matched_features, OutputArray pattern_points, const double ratio = 0.7,
const double proj_error = 8.0, const bool refine_position = false, OutputArray out = noArray(),
OutputArray H = noArray(), OutputArray pattern_corners = noArray());
bool isInitialized();
void getPatternPoints(std::vector<KeyPoint>& original_points);
/**<
Returns a vector<Point> of the original points.
*/
double getPixelSize();
/**<
Get the pixel size of the pattern
*/
bool setFeatureDetector(Ptr<FeatureDetector> featureDetector);
bool setDescriptorExtractor(Ptr<DescriptorExtractor> extractor);
bool setDescriptorMatcher(Ptr<DescriptorMatcher> matcher);
Ptr<FeatureDetector> getFeatureDetector();
Ptr<DescriptorExtractor> getDescriptorExtractor();
Ptr<DescriptorMatcher> getDescriptorMatcher();
double calibrate(InputArrayOfArrays objectPoints, InputArrayOfArrays imagePoints,
Size imageSize, InputOutputArray cameraMatrix, InputOutputArray distCoeffs,
OutputArrayOfArrays rvecs, OutputArrayOfArrays tvecs, int flags = 0,
TermCriteria criteria = TermCriteria(TermCriteria::COUNT + TermCriteria::EPS, 30, DBL_EPSILON));
/**<
Calls the calirateCamera function with the same inputs.
*/
bool findRt(InputArray objectPoints, InputArray imagePoints, InputArray cameraMatrix, InputArray distCoeffs,
InputOutputArray rvec, InputOutputArray tvec, bool useExtrinsicGuess = false, int flags = SOLVEPNP_ITERATIVE);
bool findRt(InputArray image, InputArray cameraMatrix, InputArray distCoeffs,
InputOutputArray rvec, InputOutputArray tvec, bool useExtrinsicGuess = false, int flags = SOLVEPNP_ITERATIVE);
/**<
Uses solvePnP to find the rotation and translation of the pattern
with respect to the camera frame.
*/
bool findRtRANSAC(InputArray objectPoints, InputArray imagePoints, InputArray cameraMatrix, InputArray distCoeffs,
InputOutputArray rvec, InputOutputArray tvec, bool useExtrinsicGuess = false, int iterationsCount = 100,
float reprojectionError = 8.0, int minInliersCount = 100, OutputArray inliers = noArray(), int flags = SOLVEPNP_ITERATIVE);
bool findRtRANSAC(InputArray image, InputArray cameraMatrix, InputArray distCoeffs,
InputOutputArray rvec, InputOutputArray tvec, bool useExtrinsicGuess = false, int iterationsCount = 100,
float reprojectionError = 8.0, int minInliersCount = 100, OutputArray inliers = noArray(), int flags = SOLVEPNP_ITERATIVE);
/**<
Uses solvePnPRansac()
*/
void drawOrientation(InputOutputArray image, InputArray tvec, InputArray rvec, InputArray cameraMatrix,
InputArray distCoeffs, double axis_length = 3, int axis_width = 2);
/**<
pattern_corners -> projected over the image position of the edges of the pattern.
*/
private:
Mat img_roi;
std::vector<Point2f> obj_corners;
double pxSize;
bool initialized;
Ptr<FeatureDetector> detector;
Ptr<DescriptorExtractor> descriptorExtractor;
Ptr<DescriptorMatcher> descriptorMatcher;
std::vector<KeyPoint> keypoints;
std::vector<Point3f> points3d;
Mat descriptor;
bool init(Mat& image, const float pixel_size, OutputArray output = noArray());
bool findPatternPass(const Mat& image, std::vector<Point2f>& matched_features, std::vector<Point3f>& pattern_points,
Mat& H, std::vector<Point2f>& scene_corners, const double pratio, const double proj_error,
const bool refine_position = false, const Mat& mask = Mat(), OutputArray output = noArray());
void scaleFoundPoints(const double squareSize, const std::vector<KeyPoint>& corners, std::vector<Point3f>& pts3d);
void check_matches(std::vector<Point2f>& matched, const std::vector<Point2f>& pattern, std::vector<DMatch>& good, std::vector<Point3f>& pattern_3d, const Mat& H);
void keypoints2points(const std::vector<KeyPoint>& in, std::vector<Point2f>& out);
void updateKeypointsPos(std::vector<KeyPoint>& in, const std::vector<Point2f>& new_pos);
void refinePointsPos(const Mat& img, std::vector<Point2f>& p);
void refineKeypointsPos(const Mat& img, std::vector<KeyPoint>& kp);
};
//! @}
}} // namespace ccalib, cv
#endif

View File

@@ -0,0 +1,212 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2015, Baisheng Lai (laibaisheng@gmail.com), Zhejiang University,
// all rights reserved.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef __OPENCV_MULTICAMERACALIBRATION_HPP__
#define __OPENCV_MULTICAMERACALIBRATION_HPP__
#include "opencv2/ccalib/randpattern.hpp"
#include "opencv2/ccalib/omnidir.hpp"
#include <string>
#include <iostream>
namespace cv { namespace multicalib {
//! @addtogroup ccalib
//! @{
#define HEAD -1
#define INVALID -2
/** @brief Class for multiple camera calibration that supports pinhole camera and omnidirection camera.
For omnidirectional camera model, please refer to omnidir.hpp in ccalib module.
It first calibrate each camera individually, then a bundle adjustment like optimization is applied to
refine extrinsic parameters. So far, it only support "random" pattern for calibration,
see randomPattern.hpp in ccalib module for details.
Images that are used should be named by "cameraIdx-timestamp.*", several images with the same timestamp
means that they are the same pattern that are photographed. cameraIdx should start from 0.
For more details, please refer to paper
B. Li, L. Heng, K. Kevin and M. Pollefeys, "A Multiple-Camera System
Calibration Toolbox Using A Feature Descriptor-Based Calibration
Pattern", in IROS 2013.
*/
class CV_EXPORTS MultiCameraCalibration
{
public:
enum {
PINHOLE,
OMNIDIRECTIONAL
//FISHEYE
};
// an edge connects a camera and pattern
struct edge
{
int cameraVertex; // vertex index for camera in this edge
int photoVertex; // vertex index for pattern in this edge
int photoIndex; // photo index among photos for this camera
Mat transform; // transform from pattern to camera
edge(int cv, int pv, int pi, Mat trans)
{
cameraVertex = cv;
photoVertex = pv;
photoIndex = pi;
transform = trans;
}
};
struct vertex
{
Mat pose; // relative pose to the first camera. For camera vertex, it is the
// transform from the first camera to this camera, for pattern vertex,
// it is the transform from pattern to the first camera
int timestamp; // timestamp of photo, only available for photo vertex
vertex(Mat po, int ts)
{
pose = po;
timestamp = ts;
}
vertex()
{
pose = Mat::eye(4, 4, CV_32F);
timestamp = -1;
}
};
/* @brief Constructor
@param cameraType camera type, PINHOLE or OMNIDIRECTIONAL
@param nCameras number of cameras
@fileName filename of string list that are used for calibration, the file is generated
by imagelist_creator from OpenCv samples. The first one in the list is the pattern filename.
@patternWidth the physical width of pattern, in user defined unit.
@patternHeight the physical height of pattern, in user defined unit.
@showExtration whether show extracted features and feature filtering.
@nMiniMatches minimal number of matched features for a frame.
@flags Calibration flags
@criteria optimization stopping criteria.
@detector feature detector that detect feature points in pattern and images.
@descriptor feature descriptor.
@matcher feature matcher.
*/
MultiCameraCalibration(int cameraType, int nCameras, const std::string& fileName, float patternWidth,
float patternHeight, int verbose = 0, int showExtration = 0, int nMiniMatches = 20, int flags = 0,
TermCriteria criteria = TermCriteria(TermCriteria::COUNT + TermCriteria::EPS, 200, 1e-7),
Ptr<FeatureDetector> detector = AKAZE::create(AKAZE::DESCRIPTOR_MLDB, 0, 3, 0.006f),
Ptr<DescriptorExtractor> descriptor = AKAZE::create(AKAZE::DESCRIPTOR_MLDB,0, 3, 0.006f),
Ptr<DescriptorMatcher> matcher = DescriptorMatcher::create("BruteForce-L1"));
/* @brief load images
*/
void loadImages();
/* @brief initialize multiple camera calibration. It calibrates each camera individually.
*/
void initialize();
/* @brief optimization extrinsic parameters
*/
double optimizeExtrinsics();
/* @brief run multi-camera camera calibration, it runs loadImage(), initialize() and optimizeExtrinsics()
*/
double run();
/* @brief write camera parameters to file.
*/
void writeParameters(const std::string& filename);
private:
std::vector<std::string> readStringList();
int getPhotoVertex(int timestamp);
void graphTraverse(const Mat& G, int begin, std::vector<int>& order, std::vector<int>& pre);
void findRowNonZero(const Mat& row, Mat& idx);
void computeJacobianExtrinsic(const Mat& extrinsicParams, Mat& JTJ_inv, Mat& JTE);
void computePhotoCameraJacobian(const Mat& rvecPhoto, const Mat& tvecPhoto, const Mat& rvecCamera,
const Mat& tvecCamera, Mat& rvecTran, Mat& tvecTran, const Mat& objectPoints, const Mat& imagePoints, const Mat& K,
const Mat& distort, const Mat& xi, Mat& jacobianPhoto, Mat& jacobianCamera, Mat& E);
void compose_motion(InputArray _om1, InputArray _T1, InputArray _om2, InputArray _T2, Mat& om3, Mat& T3, Mat& dom3dom1,
Mat& dom3dT1, Mat& dom3dom2, Mat& dom3dT2, Mat& dT3dom1, Mat& dT3dT1, Mat& dT3dom2, Mat& dT3dT2);
void JRodriguesMatlab(const Mat& src, Mat& dst);
void dAB(InputArray A, InputArray B, OutputArray dABdA, OutputArray dABdB);
double computeProjectError(Mat& parameters);
void vector2parameters(const Mat& parameters, std::vector<Vec3f>& rvecVertex, std::vector<Vec3f>& tvecVertexs);
void parameters2vector(const std::vector<Vec3f>& rvecVertex, const std::vector<Vec3f>& tvecVertex, Mat& parameters);
int _camType; //PINHOLE, FISHEYE or OMNIDIRECTIONAL
int _nCamera;
int _nMiniMatches;
int _flags;
int _verbose;
double _error;
float _patternWidth, _patternHeight;
TermCriteria _criteria;
std::string _filename;
int _showExtraction;
Ptr<FeatureDetector> _detector;
Ptr<DescriptorExtractor> _descriptor;
Ptr<DescriptorMatcher> _matcher;
std::vector<edge> _edgeList;
std::vector<vertex> _vertexList;
std::vector<std::vector<cv::Mat> > _objectPointsForEachCamera;
std::vector<std::vector<cv::Mat> > _imagePointsForEachCamera;
std::vector<cv::Mat> _cameraMatrix;
std::vector<cv::Mat> _distortCoeffs;
std::vector<cv::Mat> _xi;
std::vector<std::vector<Mat> > _omEachCamera, _tEachCamera;
};
//! @}
}} // namespace multicalib, cv
#endif

View File

@@ -0,0 +1,315 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2015, Baisheng Lai (laibaisheng@gmail.com), Zhejiang University,
// all rights reserved.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef __OPENCV_OMNIDIR_HPP__
#define __OPENCV_OMNIDIR_HPP__
#include "opencv2/core.hpp"
#include "opencv2/core/affine.hpp"
#include <vector>
namespace cv
{
namespace omnidir
{
//! @addtogroup ccalib
//! @{
enum {
CALIB_USE_GUESS = 1,
CALIB_FIX_SKEW = 2,
CALIB_FIX_K1 = 4,
CALIB_FIX_K2 = 8,
CALIB_FIX_P1 = 16,
CALIB_FIX_P2 = 32,
CALIB_FIX_XI = 64,
CALIB_FIX_GAMMA = 128,
CALIB_FIX_CENTER = 256
};
enum{
RECTIFY_PERSPECTIVE = 1,
RECTIFY_CYLINDRICAL = 2,
RECTIFY_LONGLATI = 3,
RECTIFY_STEREOGRAPHIC = 4
};
enum{
XYZRGB = 1,
XYZ = 2
};
/**
* This module was accepted as a GSoC 2015 project for OpenCV, authored by
* Baisheng Lai, mentored by Bo Li.
*/
/** @brief Projects points for omnidirectional camera using CMei's model
@param objectPoints Object points in world coordinate, vector of vector of Vec3f or Mat of
1xN/Nx1 3-channel of type CV_32F and N is the number of points. 64F is also acceptable.
@param imagePoints Output array of image points, vector of vector of Vec2f or
1xN/Nx1 2-channel of type CV_32F. 64F is also acceptable.
@param rvec vector of rotation between world coordinate and camera coordinate, i.e., om
@param tvec vector of translation between pattern coordinate and camera coordinate
@param K Camera matrix \f$K = \vecthreethree{f_x}{s}{c_x}{0}{f_y}{c_y}{0}{0}{_1}\f$.
@param D Input vector of distortion coefficients \f$(k_1, k_2, p_1, p_2)\f$.
@param xi The parameter xi for CMei's model
@param jacobian Optional output 2Nx16 of type CV_64F jacobian matrix, contains the derivatives of
image pixel points wrt parameters including \f$om, T, f_x, f_y, s, c_x, c_y, xi, k_1, k_2, p_1, p_2\f$.
This matrix will be used in calibration by optimization.
The function projects object 3D points of world coordinate to image pixels, parameter by intrinsic
and extrinsic parameters. Also, it optionally compute a by-product: the jacobian matrix containing
contains the derivatives of image pixel points wrt intrinsic and extrinsic parameters.
*/
CV_EXPORTS_W void projectPoints(InputArray objectPoints, OutputArray imagePoints, InputArray rvec, InputArray tvec,
InputArray K, double xi, InputArray D, OutputArray jacobian = noArray());
/** @overload */
CV_EXPORTS void projectPoints(InputArray objectPoints, OutputArray imagePoints, const Affine3d& affine,
InputArray K, double xi, InputArray D, OutputArray jacobian = noArray());
/** @brief Undistort 2D image points for omnidirectional camera using CMei's model
@param distorted Array of distorted image points, vector of Vec2f
or 1xN/Nx1 2-channel Mat of type CV_32F, 64F depth is also acceptable
@param K Camera matrix \f$K = \vecthreethree{f_x}{s}{c_x}{0}{f_y}{c_y}{0}{0}{_1}\f$.
@param D Distortion coefficients \f$(k_1, k_2, p_1, p_2)\f$.
@param xi The parameter xi for CMei's model
@param R Rotation trainsform between the original and object space : 3x3 1-channel, or vector: 3x1/1x3
1-channel or 1x1 3-channel
@param undistorted array of normalized object points, vector of Vec2f/Vec2d or 1xN/Nx1 2-channel Mat with the same
depth of distorted points.
*/
CV_EXPORTS_W void undistortPoints(InputArray distorted, OutputArray undistorted, InputArray K, InputArray D, InputArray xi, InputArray R);
/** @brief Computes undistortion and rectification maps for omnidirectional camera image transform by a rotation R.
It output two maps that are used for cv::remap(). If D is empty then zero distortion is used,
if R or P is empty then identity matrices are used.
@param K Camera matrix \f$K = \vecthreethree{f_x}{s}{c_x}{0}{f_y}{c_y}{0}{0}{_1}\f$, with depth CV_32F or CV_64F
@param D Input vector of distortion coefficients \f$(k_1, k_2, p_1, p_2)\f$, with depth CV_32F or CV_64F
@param xi The parameter xi for CMei's model
@param R Rotation transform between the original and object space : 3x3 1-channel, or vector: 3x1/1x3, with depth CV_32F or CV_64F
@param P New camera matrix (3x3) or new projection matrix (3x4)
@param size Undistorted image size.
@param m1type Type of the first output map that can be CV_32FC1 or CV_16SC2 . See convertMaps()
for details.
@param map1 The first output map.
@param map2 The second output map.
@param flags Flags indicates the rectification type, RECTIFY_PERSPECTIVE, RECTIFY_CYLINDRICAL, RECTIFY_LONGLATI and RECTIFY_STEREOGRAPHIC
are supported.
*/
CV_EXPORTS_W void initUndistortRectifyMap(InputArray K, InputArray D, InputArray xi, InputArray R, InputArray P, const cv::Size& size,
int m1type, OutputArray map1, OutputArray map2, int flags);
/** @brief Undistort omnidirectional images to perspective images
@param distorted The input omnidirectional image.
@param undistorted The output undistorted image.
@param K Camera matrix \f$K = \vecthreethree{f_x}{s}{c_x}{0}{f_y}{c_y}{0}{0}{_1}\f$.
@param D Input vector of distortion coefficients \f$(k_1, k_2, p_1, p_2)\f$.
@param xi The parameter xi for CMei's model.
@param flags Flags indicates the rectification type, RECTIFY_PERSPECTIVE, RECTIFY_CYLINDRICAL, RECTIFY_LONGLATI and RECTIFY_STEREOGRAPHIC
@param Knew Camera matrix of the distorted image. If it is not assigned, it is just K.
@param new_size The new image size. By default, it is the size of distorted.
@param R Rotation matrix between the input and output images. By default, it is identity matrix.
*/
CV_EXPORTS_W void undistortImage(InputArray distorted, OutputArray undistorted, InputArray K, InputArray D, InputArray xi, int flags,
InputArray Knew = cv::noArray(), const Size& new_size = Size(), InputArray R = Mat::eye(3, 3, CV_64F));
/** @brief Perform omnidirectional camera calibration, the default depth of outputs is CV_64F.
@param objectPoints Vector of vector of Vec3f object points in world (pattern) coordinate.
It also can be vector of Mat with size 1xN/Nx1 and type CV_32FC3. Data with depth of 64_F is also acceptable.
@param imagePoints Vector of vector of Vec2f corresponding image points of objectPoints. It must be the same
size and the same type with objectPoints.
@param size Image size of calibration images.
@param K Output calibrated camera matrix.
@param xi Output parameter xi for CMei's model
@param D Output distortion parameters \f$(k_1, k_2, p_1, p_2)\f$
@param rvecs Output rotations for each calibration images
@param tvecs Output translation for each calibration images
@param flags The flags that control calibrate
@param criteria Termination criteria for optimization
@param idx Indices of images that pass initialization, which are really used in calibration. So the size of rvecs is the
same as idx.total().
*/
CV_EXPORTS_W double calibrate(InputArrayOfArrays objectPoints, InputArrayOfArrays imagePoints, Size size,
InputOutputArray K, InputOutputArray xi, InputOutputArray D, OutputArrayOfArrays rvecs, OutputArrayOfArrays tvecs,
int flags, TermCriteria criteria, OutputArray idx=noArray());
/** @brief Stereo calibration for omnidirectional camera model. It computes the intrinsic parameters for two
cameras and the extrinsic parameters between two cameras. The default depth of outputs is CV_64F.
@param objectPoints Object points in world (pattern) coordinate. Its type is vector<vector<Vec3f> >.
It also can be vector of Mat with size 1xN/Nx1 and type CV_32FC3. Data with depth of 64_F is also acceptable.
@param imagePoints1 The corresponding image points of the first camera, with type vector<vector<Vec2f> >.
It must be the same size and the same type as objectPoints.
@param imagePoints2 The corresponding image points of the second camera, with type vector<vector<Vec2f> >.
It must be the same size and the same type as objectPoints.
@param imageSize1 Image size of calibration images of the first camera.
@param imageSize2 Image size of calibration images of the second camera.
@param K1 Output camera matrix for the first camera.
@param xi1 Output parameter xi of Mei's model for the first camera
@param D1 Output distortion parameters \f$(k_1, k_2, p_1, p_2)\f$ for the first camera
@param K2 Output camera matrix for the first camera.
@param xi2 Output parameter xi of CMei's model for the second camera
@param D2 Output distortion parameters \f$(k_1, k_2, p_1, p_2)\f$ for the second camera
@param rvec Output rotation between the first and second camera
@param tvec Output translation between the first and second camera
@param rvecsL Output rotation for each image of the first camera
@param tvecsL Output translation for each image of the first camera
@param flags The flags that control stereoCalibrate
@param criteria Termination criteria for optimization
@param idx Indices of image pairs that pass initialization, which are really used in calibration. So the size of rvecs is the
same as idx.total().
@
*/
CV_EXPORTS_W double stereoCalibrate(InputOutputArrayOfArrays objectPoints, InputOutputArrayOfArrays imagePoints1, InputOutputArrayOfArrays imagePoints2,
const Size& imageSize1, const Size& imageSize2, InputOutputArray K1, InputOutputArray xi1, InputOutputArray D1, InputOutputArray K2, InputOutputArray xi2,
InputOutputArray D2, OutputArray rvec, OutputArray tvec, OutputArrayOfArrays rvecsL, OutputArrayOfArrays tvecsL, int flags, TermCriteria criteria, OutputArray idx=noArray());
/** @brief Stereo rectification for omnidirectional camera model. It computes the rectification rotations for two cameras
@param R Rotation between the first and second camera
@param T Translation between the first and second camera
@param R1 Output 3x3 rotation matrix for the first camera
@param R2 Output 3x3 rotation matrix for the second camera
*/
CV_EXPORTS_W void stereoRectify(InputArray R, InputArray T, OutputArray R1, OutputArray R2);
/** @brief Stereo 3D reconstruction from a pair of images
@param image1 The first input image
@param image2 The second input image
@param K1 Input camera matrix of the first camera
@param D1 Input distortion parameters \f$(k_1, k_2, p_1, p_2)\f$ for the first camera
@param xi1 Input parameter xi for the first camera for CMei's model
@param K2 Input camera matrix of the second camera
@param D2 Input distortion parameters \f$(k_1, k_2, p_1, p_2)\f$ for the second camera
@param xi2 Input parameter xi for the second camera for CMei's model
@param R Rotation between the first and second camera
@param T Translation between the first and second camera
@param flag Flag of rectification type, RECTIFY_PERSPECTIVE or RECTIFY_LONGLATI
@param numDisparities The parameter 'numDisparities' in StereoSGBM, see StereoSGBM for details.
@param SADWindowSize The parameter 'SADWindowSize' in StereoSGBM, see StereoSGBM for details.
@param disparity Disparity map generated by stereo matching
@param image1Rec Rectified image of the first image
@param image2Rec rectified image of the second image
@param newSize Image size of rectified image, see omnidir::undistortImage
@param Knew New camera matrix of rectified image, see omnidir::undistortImage
@param pointCloud Point cloud of 3D reconstruction, with type CV_64FC3
@param pointType Point cloud type, it can be XYZRGB or XYZ
*/
CV_EXPORTS_W void stereoReconstruct(InputArray image1, InputArray image2, InputArray K1, InputArray D1, InputArray xi1,
InputArray K2, InputArray D2, InputArray xi2, InputArray R, InputArray T, int flag, int numDisparities, int SADWindowSize,
OutputArray disparity, OutputArray image1Rec, OutputArray image2Rec, const Size& newSize = Size(), InputArray Knew = cv::noArray(),
OutputArray pointCloud = cv::noArray(), int pointType = XYZRGB);
namespace internal
{
void initializeCalibration(InputArrayOfArrays objectPoints, InputArrayOfArrays imagePoints, Size size, OutputArrayOfArrays omAll,
OutputArrayOfArrays tAll, OutputArray K, double& xi, OutputArray idx = noArray());
void initializeStereoCalibration(InputArrayOfArrays objectPoints, InputArrayOfArrays imagePoints1, InputArrayOfArrays imagePoints2,
const Size& size1, const Size& size2, OutputArray om, OutputArray T, OutputArrayOfArrays omL, OutputArrayOfArrays tL, OutputArray K1, OutputArray D1, OutputArray K2, OutputArray D2,
double &xi1, double &xi2, int flags, OutputArray idx);
void computeJacobian(InputArrayOfArrays objectPoints, InputArrayOfArrays imagePoints, InputArray parameters, Mat& JTJ_inv, Mat& JTE, int flags,
double epsilon);
void computeJacobianStereo(InputArrayOfArrays objectPoints, InputArrayOfArrays imagePoints1, InputArrayOfArrays imagePoints2,
InputArray parameters, Mat& JTJ_inv, Mat& JTE, int flags, double epsilon);
void encodeParameters(InputArray K, InputArrayOfArrays omAll, InputArrayOfArrays tAll, InputArray distoaration, double xi, OutputArray parameters);
void encodeParametersStereo(InputArray K1, InputArray K2, InputArray om, InputArray T, InputArrayOfArrays omL, InputArrayOfArrays tL,
InputArray D1, InputArray D2, double xi1, double xi2, OutputArray parameters);
void decodeParameters(InputArray paramsters, OutputArray K, OutputArrayOfArrays omAll, OutputArrayOfArrays tAll, OutputArray distoration, double& xi);
void decodeParametersStereo(InputArray parameters, OutputArray K1, OutputArray K2, OutputArray om, OutputArray T, OutputArrayOfArrays omL,
OutputArrayOfArrays tL, OutputArray D1, OutputArray D2, double& xi1, double& xi2);
void estimateUncertainties(InputArrayOfArrays objectPoints, InputArrayOfArrays imagePoints, InputArray parameters, Mat& errors, Vec2d& std_error, double& rms, int flags);
void estimateUncertaintiesStereo(InputArrayOfArrays objectPoints, InputArrayOfArrays imagePoints1, InputArrayOfArrays imagePoints2, InputArray parameters, Mat& errors,
Vec2d& std_error, double& rms, int flags);
double computeMeanReproErr(InputArrayOfArrays imagePoints, InputArrayOfArrays proImagePoints);
double computeMeanReproErr(InputArrayOfArrays objectPoints, InputArrayOfArrays imagePoints, InputArray K, InputArray D, double xi, InputArrayOfArrays omAll,
InputArrayOfArrays tAll);
double computeMeanReproErrStereo(InputArrayOfArrays objectPoints, InputArrayOfArrays imagePoints1, InputArrayOfArrays imagePoints2, InputArray K1, InputArray K2,
InputArray D1, InputArray D2, double xi1, double xi2, InputArray om, InputArray T, InputArrayOfArrays omL, InputArrayOfArrays TL);
void subMatrix(const Mat& src, Mat& dst, const std::vector<int>& cols, const std::vector<int>& rows);
void flags2idx(int flags, std::vector<int>& idx, int n);
void flags2idxStereo(int flags, std::vector<int>& idx, int n);
void fillFixed(Mat&G, int flags, int n);
void fillFixedStereo(Mat& G, int flags, int n);
double findMedian(const Mat& row);
Vec3d findMedian3(InputArray mat);
void getInterset(InputArray idx1, InputArray idx2, OutputArray inter1, OutputArray inter2, OutputArray inter_ori);
void compose_motion(InputArray _om1, InputArray _T1, InputArray _om2, InputArray _T2, Mat& om3, Mat& T3, Mat& dom3dom1,
Mat& dom3dT1, Mat& dom3dom2, Mat& dom3dT2, Mat& dT3dom1, Mat& dT3dT1, Mat& dT3dom2, Mat& dT3dT2);
//void JRodriguesMatlab(const Mat& src, Mat& dst);
//void dAB(InputArray A, InputArray B, OutputArray dABdA, OutputArray dABdB);
} // internal
//! @}
} // omnidir
} //cv
#endif

View File

@@ -0,0 +1,184 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2015, Baisheng Lai (laibaisheng@gmail.com), Zhejiang University,
// all rights reserved.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef __OPENCV_RANDOMPATTERN_HPP__
#define __OPENCV_RANDOMPATTERN_HPP__
#include "opencv2/features2d.hpp"
#include "opencv2/highgui.hpp"
namespace cv { namespace randpattern {
//! @addtogroup ccalib
//! @{
/** @brief Class for finding features points and corresponding 3D in world coordinate of
a "random" pattern, which can be to be used in calibration. It is useful when pattern is
partly occluded or only a part of pattern can be observed in multiple cameras calibration.
The pattern can be generated by RandomPatternGenerator class described in this file.
Please refer to paper
B. Li, L. Heng, K. Kevin and M. Pollefeys, "A Multiple-Camera System
Calibration Toolbox Using A Feature Descriptor-Based Calibration
Pattern", in IROS 2013.
*/
class CV_EXPORTS RandomPatternCornerFinder
{
public:
/* @brief Construct RandomPatternCornerFinder object
@param patternWidth the real width of "random" pattern in a user defined unit.
@param patternHeight the real height of "random" pattern in a user defined unit.
@param nMiniMatch number of minimal matches, otherwise that image is abandoned
@depth depth of output objectPoints and imagePoints, set it to be CV_32F or CV_64F.
@showExtraction whether show feature extraction, 0 for no and 1 for yes.
@detector feature detector to detect feature points in pattern and images.
@descriptor feature descriptor.
@matcher feature matcher.
*/
RandomPatternCornerFinder(float patternWidth, float patternHeight,
int nminiMatch = 20, int depth = CV_32F, int verbose = 0, int showExtraction = 0,
Ptr<FeatureDetector> detector = AKAZE::create(AKAZE::DESCRIPTOR_MLDB, 0, 3, 0.005f),
Ptr<DescriptorExtractor> descriptor = AKAZE::create(AKAZE::DESCRIPTOR_MLDB,0, 3, 0.005f),
Ptr<DescriptorMatcher> matcher = DescriptorMatcher::create("BruteForce-L1"));
/* @brief Load pattern image and compute features for pattern
@param patternImage image for "random" pattern generated by RandomPatternGenerator, run it first.
*/
void loadPattern(const cv::Mat &patternImage);
/* @brief Load pattern and features
@param patternImage image for "random" pattern generated by RandomPatternGenerator, run it first.
@param patternKeyPoints keyPoints created from a FeatureDetector.
@param patternDescriptors descriptors created from a DescriptorExtractor.
*/
void loadPattern(const cv::Mat &patternImage, const std::vector<cv::KeyPoint> &patternKeyPoints, const cv::Mat &patternDescriptors);
/* @brief Compute matched object points and image points which are used for calibration
The objectPoints (3D) and imagePoints (2D) are stored inside the class. Run getObjectPoints()
and getImagePoints() to get them.
@param inputImages vector of 8-bit grayscale images containing "random" pattern
that are used for calibration.
*/
void computeObjectImagePoints(std::vector<cv::Mat> inputImages);
//void computeObjectImagePoints2(std::vector<cv::Mat> inputImages);
/* @brief Compute object and image points for a single image. It returns a vector<Mat> that
the first element stores the imagePoints and the second one stores the objectPoints.
@param inputImage single input image for calibration
*/
std::vector<cv::Mat> computeObjectImagePointsForSingle(cv::Mat inputImage);
/* @brief Get object(3D) points
*/
const std::vector<cv::Mat> &getObjectPoints();
/* @brief and image(2D) points
*/
const std::vector<cv::Mat> &getImagePoints();
private:
std::vector<cv::Mat> _objectPonits, _imagePoints;
float _patternWidth, _patternHeight;
cv::Size _patternImageSize;
int _nminiMatch;
int _depth;
int _verbose;
Ptr<FeatureDetector> _detector;
Ptr<DescriptorExtractor> _descriptor;
Ptr<DescriptorMatcher> _matcher;
Mat _descriptorPattern;
std::vector<cv::KeyPoint> _keypointsPattern;
Mat _patternImage;
int _showExtraction;
void keyPoints2MatchedLocation(const std::vector<cv::KeyPoint>& imageKeypoints,
const std::vector<cv::KeyPoint>& patternKeypoints, const std::vector<cv::DMatch> matchces,
cv::Mat& matchedImagelocation, cv::Mat& matchedPatternLocation);
void getFilteredLocation(cv::Mat& imageKeypoints, cv::Mat& patternKeypoints, const cv::Mat mask);
void getObjectImagePoints(const cv::Mat& imageKeypoints, const cv::Mat& patternKeypoints);
void crossCheckMatching( cv::Ptr<DescriptorMatcher>& descriptorMatcher,
const Mat& descriptors1, const Mat& descriptors2,
std::vector<DMatch>& filteredMatches12, int knn=1 );
void drawCorrespondence(const Mat& image1, const std::vector<cv::KeyPoint> keypoint1,
const Mat& image2, const std::vector<cv::KeyPoint> keypoint2, const std::vector<cv::DMatch> matchces,
const Mat& mask1, const Mat& mask2, const int step);
};
/* @brief Class to generate "random" pattern image that are used for RandomPatternCornerFinder
Please refer to paper
B. Li, L. Heng, K. Kevin and M. Pollefeys, "A Multiple-Camera System
Calibration Toolbox Using A Feature Descriptor-Based Calibration
Pattern", in IROS 2013.
*/
class CV_EXPORTS RandomPatternGenerator
{
public:
/* @brief Construct RandomPatternGenerator
@param imageWidth image width of the generated pattern image
@param imageHeight image height of the generated pattern image
*/
RandomPatternGenerator(int imageWidth, int imageHeight);
/* @brief Generate pattern
*/
void generatePattern();
/* @brief Get pattern
*/
cv::Mat getPattern();
private:
cv::Mat _pattern;
int _imageWidth, _imageHeight;
};
//! @}
}} //namespace randpattern, cv
#endif

3344
3rdparty/opencv/inc/opencv2/core.hpp vendored Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,678 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef OPENCV_CORE_AFFINE3_HPP
#define OPENCV_CORE_AFFINE3_HPP
#ifdef __cplusplus
#include <opencv2/core.hpp>
namespace cv
{
//! @addtogroup core
//! @{
/** @brief Affine transform
*
* It represents a 4x4 homogeneous transformation matrix \f$T\f$
*
* \f[T =
* \begin{bmatrix}
* R & t\\
* 0 & 1\\
* \end{bmatrix}
* \f]
*
* where \f$R\f$ is a 3x3 rotation matrix and \f$t\f$ is a 3x1 translation vector.
*
* You can specify \f$R\f$ either by a 3x3 rotation matrix or by a 3x1 rotation vector,
* which is converted to a 3x3 rotation matrix by the Rodrigues formula.
*
* To construct a matrix \f$T\f$ representing first rotation around the axis \f$r\f$ with rotation
* angle \f$|r|\f$ in radian (right hand rule) and then translation by the vector \f$t\f$, you can use
*
* @code
* cv::Vec3f r, t;
* cv::Affine3f T(r, t);
* @endcode
*
* If you already have the rotation matrix \f$R\f$, then you can use
*
* @code
* cv::Matx33f R;
* cv::Affine3f T(R, t);
* @endcode
*
* To extract the rotation matrix \f$R\f$ from \f$T\f$, use
*
* @code
* cv::Matx33f R = T.rotation();
* @endcode
*
* To extract the translation vector \f$t\f$ from \f$T\f$, use
*
* @code
* cv::Vec3f t = T.translation();
* @endcode
*
* To extract the rotation vector \f$r\f$ from \f$T\f$, use
*
* @code
* cv::Vec3f r = T.rvec();
* @endcode
*
* Note that since the mapping from rotation vectors to rotation matrices
* is many to one. The returned rotation vector is not necessarily the one
* you used before to set the matrix.
*
* If you have two transformations \f$T = T_1 * T_2\f$, use
*
* @code
* cv::Affine3f T, T1, T2;
* T = T2.concatenate(T1);
* @endcode
*
* To get the inverse transform of \f$T\f$, use
*
* @code
* cv::Affine3f T, T_inv;
* T_inv = T.inv();
* @endcode
*
*/
template<typename T>
class Affine3
{
public:
typedef T float_type;
typedef Matx<float_type, 3, 3> Mat3;
typedef Matx<float_type, 4, 4> Mat4;
typedef Vec<float_type, 3> Vec3;
//! Default constructor. It represents a 4x4 identity matrix.
Affine3();
//! Augmented affine matrix
Affine3(const Mat4& affine);
/**
* The resulting 4x4 matrix is
*
* \f[
* \begin{bmatrix}
* R & t\\
* 0 & 1\\
* \end{bmatrix}
* \f]
*
* @param R 3x3 rotation matrix.
* @param t 3x1 translation vector.
*/
Affine3(const Mat3& R, const Vec3& t = Vec3::all(0));
/**
* Rodrigues vector.
*
* The last row of the current matrix is set to [0,0,0,1].
*
* @param rvec 3x1 rotation vector. Its direction indicates the rotation axis and its length
* indicates the rotation angle in radian (using right hand rule).
* @param t 3x1 translation vector.
*/
Affine3(const Vec3& rvec, const Vec3& t = Vec3::all(0));
/**
* Combines all constructors above. Supports 4x4, 3x4, 3x3, 1x3, 3x1 sizes of data matrix.
*
* The last row of the current matrix is set to [0,0,0,1] when data is not 4x4.
*
* @param data 1-channel matrix.
* when it is 4x4, it is copied to the current matrix and t is not used.
* When it is 3x4, it is copied to the upper part 3x4 of the current matrix and t is not used.
* When it is 3x3, it is copied to the upper left 3x3 part of the current matrix.
* When it is 3x1 or 1x3, it is treated as a rotation vector and the Rodrigues formula is used
* to compute a 3x3 rotation matrix.
* @param t 3x1 translation vector. It is used only when data is neither 4x4 nor 3x4.
*/
explicit Affine3(const Mat& data, const Vec3& t = Vec3::all(0));
//! From 16-element array
explicit Affine3(const float_type* vals);
//! Create an 4x4 identity transform
static Affine3 Identity();
/**
* Rotation matrix.
*
* Copy the rotation matrix to the upper left 3x3 part of the current matrix.
* The remaining elements of the current matrix are not changed.
*
* @param R 3x3 rotation matrix.
*
*/
void rotation(const Mat3& R);
/**
* Rodrigues vector.
*
* It sets the upper left 3x3 part of the matrix. The remaining part is unaffected.
*
* @param rvec 3x1 rotation vector. The direction indicates the rotation axis and
* its length indicates the rotation angle in radian (using the right thumb convention).
*/
void rotation(const Vec3& rvec);
/**
* Combines rotation methods above. Supports 3x3, 1x3, 3x1 sizes of data matrix.
*
* It sets the upper left 3x3 part of the matrix. The remaining part is unaffected.
*
* @param data 1-channel matrix.
* When it is a 3x3 matrix, it sets the upper left 3x3 part of the current matrix.
* When it is a 1x3 or 3x1 matrix, it is used as a rotation vector. The Rodrigues formula
* is used to compute the rotation matrix and sets the upper left 3x3 part of the current matrix.
*/
void rotation(const Mat& data);
/**
* Copy the 3x3 matrix L to the upper left part of the current matrix
*
* It sets the upper left 3x3 part of the matrix. The remaining part is unaffected.
*
* @param L 3x3 matrix.
*/
void linear(const Mat3& L);
/**
* Copy t to the first three elements of the last column of the current matrix
*
* It sets the upper right 3x1 part of the matrix. The remaining part is unaffected.
*
* @param t 3x1 translation vector.
*/
void translation(const Vec3& t);
//! @return the upper left 3x3 part
Mat3 rotation() const;
//! @return the upper left 3x3 part
Mat3 linear() const;
//! @return the upper right 3x1 part
Vec3 translation() const;
//! Rodrigues vector.
//! @return a vector representing the upper left 3x3 rotation matrix of the current matrix.
//! @warning Since the mapping between rotation vectors and rotation matrices is many to one,
//! this function returns only one rotation vector that represents the current rotation matrix,
//! which is not necessarily the same one set by `rotation(const Vec3& rvec)`.
Vec3 rvec() const;
//! @return the inverse of the current matrix.
Affine3 inv(int method = cv::DECOMP_SVD) const;
//! a.rotate(R) is equivalent to Affine(R, 0) * a;
Affine3 rotate(const Mat3& R) const;
//! a.rotate(rvec) is equivalent to Affine(rvec, 0) * a;
Affine3 rotate(const Vec3& rvec) const;
//! a.translate(t) is equivalent to Affine(E, t) * a, where E is an identity matrix
Affine3 translate(const Vec3& t) const;
//! a.concatenate(affine) is equivalent to affine * a;
Affine3 concatenate(const Affine3& affine) const;
template <typename Y> operator Affine3<Y>() const;
template <typename Y> Affine3<Y> cast() const;
Mat4 matrix;
#if defined EIGEN_WORLD_VERSION && defined EIGEN_GEOMETRY_MODULE_H
Affine3(const Eigen::Transform<T, 3, Eigen::Affine, (Eigen::RowMajor)>& affine);
Affine3(const Eigen::Transform<T, 3, Eigen::Affine>& affine);
operator Eigen::Transform<T, 3, Eigen::Affine, (Eigen::RowMajor)>() const;
operator Eigen::Transform<T, 3, Eigen::Affine>() const;
#endif
};
template<typename T> static
Affine3<T> operator*(const Affine3<T>& affine1, const Affine3<T>& affine2);
//! V is a 3-element vector with member fields x, y and z
template<typename T, typename V> static
V operator*(const Affine3<T>& affine, const V& vector);
typedef Affine3<float> Affine3f;
typedef Affine3<double> Affine3d;
static Vec3f operator*(const Affine3f& affine, const Vec3f& vector);
static Vec3d operator*(const Affine3d& affine, const Vec3d& vector);
template<typename _Tp> class DataType< Affine3<_Tp> >
{
public:
typedef Affine3<_Tp> value_type;
typedef Affine3<typename DataType<_Tp>::work_type> work_type;
typedef _Tp channel_type;
enum { generic_type = 0,
channels = 16,
fmt = traits::SafeFmt<channel_type>::fmt + ((channels - 1) << 8)
#ifdef OPENCV_TRAITS_ENABLE_DEPRECATED
,depth = DataType<channel_type>::depth
,type = CV_MAKETYPE(depth, channels)
#endif
};
typedef Vec<channel_type, channels> vec_type;
};
namespace traits {
template<typename _Tp>
struct Depth< Affine3<_Tp> > { enum { value = Depth<_Tp>::value }; };
template<typename _Tp>
struct Type< Affine3<_Tp> > { enum { value = CV_MAKETYPE(Depth<_Tp>::value, 16) }; };
} // namespace
//! @} core
}
//! @cond IGNORED
///////////////////////////////////////////////////////////////////////////////////
// Implementation
template<typename T> inline
cv::Affine3<T>::Affine3()
: matrix(Mat4::eye())
{}
template<typename T> inline
cv::Affine3<T>::Affine3(const Mat4& affine)
: matrix(affine)
{}
template<typename T> inline
cv::Affine3<T>::Affine3(const Mat3& R, const Vec3& t)
{
rotation(R);
translation(t);
matrix.val[12] = matrix.val[13] = matrix.val[14] = 0;
matrix.val[15] = 1;
}
template<typename T> inline
cv::Affine3<T>::Affine3(const Vec3& _rvec, const Vec3& t)
{
rotation(_rvec);
translation(t);
matrix.val[12] = matrix.val[13] = matrix.val[14] = 0;
matrix.val[15] = 1;
}
template<typename T> inline
cv::Affine3<T>::Affine3(const cv::Mat& data, const Vec3& t)
{
CV_Assert(data.type() == cv::traits::Type<T>::value);
CV_Assert(data.channels() == 1);
if (data.cols == 4 && data.rows == 4)
{
data.copyTo(matrix);
return;
}
else if (data.cols == 4 && data.rows == 3)
{
rotation(data(Rect(0, 0, 3, 3)));
translation(data(Rect(3, 0, 1, 3)));
}
else
{
rotation(data);
translation(t);
}
matrix.val[12] = matrix.val[13] = matrix.val[14] = 0;
matrix.val[15] = 1;
}
template<typename T> inline
cv::Affine3<T>::Affine3(const float_type* vals) : matrix(vals)
{}
template<typename T> inline
cv::Affine3<T> cv::Affine3<T>::Identity()
{
return Affine3<T>(cv::Affine3<T>::Mat4::eye());
}
template<typename T> inline
void cv::Affine3<T>::rotation(const Mat3& R)
{
linear(R);
}
template<typename T> inline
void cv::Affine3<T>::rotation(const Vec3& _rvec)
{
double theta = norm(_rvec);
if (theta < DBL_EPSILON)
rotation(Mat3::eye());
else
{
double c = std::cos(theta);
double s = std::sin(theta);
double c1 = 1. - c;
double itheta = (theta != 0) ? 1./theta : 0.;
Point3_<T> r = _rvec*itheta;
Mat3 rrt( r.x*r.x, r.x*r.y, r.x*r.z, r.x*r.y, r.y*r.y, r.y*r.z, r.x*r.z, r.y*r.z, r.z*r.z );
Mat3 r_x( 0, -r.z, r.y, r.z, 0, -r.x, -r.y, r.x, 0 );
// R = cos(theta)*I + (1 - cos(theta))*r*rT + sin(theta)*[r_x]
// where [r_x] is [0 -rz ry; rz 0 -rx; -ry rx 0]
Mat3 R = c*Mat3::eye() + c1*rrt + s*r_x;
rotation(R);
}
}
//Combines rotation methods above. Supports 3x3, 1x3, 3x1 sizes of data matrix;
template<typename T> inline
void cv::Affine3<T>::rotation(const cv::Mat& data)
{
CV_Assert(data.type() == cv::traits::Type<T>::value);
CV_Assert(data.channels() == 1);
if (data.cols == 3 && data.rows == 3)
{
Mat3 R;
data.copyTo(R);
rotation(R);
}
else if ((data.cols == 3 && data.rows == 1) || (data.cols == 1 && data.rows == 3))
{
Vec3 _rvec;
data.reshape(1, 3).copyTo(_rvec);
rotation(_rvec);
}
else
CV_Error(Error::StsError, "Input matrix can only be 3x3, 1x3 or 3x1");
}
template<typename T> inline
void cv::Affine3<T>::linear(const Mat3& L)
{
matrix.val[0] = L.val[0]; matrix.val[1] = L.val[1]; matrix.val[ 2] = L.val[2];
matrix.val[4] = L.val[3]; matrix.val[5] = L.val[4]; matrix.val[ 6] = L.val[5];
matrix.val[8] = L.val[6]; matrix.val[9] = L.val[7]; matrix.val[10] = L.val[8];
}
template<typename T> inline
void cv::Affine3<T>::translation(const Vec3& t)
{
matrix.val[3] = t[0]; matrix.val[7] = t[1]; matrix.val[11] = t[2];
}
template<typename T> inline
typename cv::Affine3<T>::Mat3 cv::Affine3<T>::rotation() const
{
return linear();
}
template<typename T> inline
typename cv::Affine3<T>::Mat3 cv::Affine3<T>::linear() const
{
typename cv::Affine3<T>::Mat3 R;
R.val[0] = matrix.val[0]; R.val[1] = matrix.val[1]; R.val[2] = matrix.val[ 2];
R.val[3] = matrix.val[4]; R.val[4] = matrix.val[5]; R.val[5] = matrix.val[ 6];
R.val[6] = matrix.val[8]; R.val[7] = matrix.val[9]; R.val[8] = matrix.val[10];
return R;
}
template<typename T> inline
typename cv::Affine3<T>::Vec3 cv::Affine3<T>::translation() const
{
return Vec3(matrix.val[3], matrix.val[7], matrix.val[11]);
}
template<typename T> inline
typename cv::Affine3<T>::Vec3 cv::Affine3<T>::rvec() const
{
cv::Vec3d w;
cv::Matx33d u, vt, R = rotation();
cv::SVD::compute(R, w, u, vt, cv::SVD::FULL_UV + cv::SVD::MODIFY_A);
R = u * vt;
double rx = R.val[7] - R.val[5];
double ry = R.val[2] - R.val[6];
double rz = R.val[3] - R.val[1];
double s = std::sqrt((rx*rx + ry*ry + rz*rz)*0.25);
double c = (R.val[0] + R.val[4] + R.val[8] - 1) * 0.5;
c = c > 1.0 ? 1.0 : c < -1.0 ? -1.0 : c;
double theta = std::acos(c);
if( s < 1e-5 )
{
if( c > 0 )
rx = ry = rz = 0;
else
{
double t;
t = (R.val[0] + 1) * 0.5;
rx = std::sqrt(std::max(t, 0.0));
t = (R.val[4] + 1) * 0.5;
ry = std::sqrt(std::max(t, 0.0)) * (R.val[1] < 0 ? -1.0 : 1.0);
t = (R.val[8] + 1) * 0.5;
rz = std::sqrt(std::max(t, 0.0)) * (R.val[2] < 0 ? -1.0 : 1.0);
if( fabs(rx) < fabs(ry) && fabs(rx) < fabs(rz) && (R.val[5] > 0) != (ry*rz > 0) )
rz = -rz;
theta /= std::sqrt(rx*rx + ry*ry + rz*rz);
rx *= theta;
ry *= theta;
rz *= theta;
}
}
else
{
double vth = 1/(2*s);
vth *= theta;
rx *= vth; ry *= vth; rz *= vth;
}
return cv::Vec3d(rx, ry, rz);
}
template<typename T> inline
cv::Affine3<T> cv::Affine3<T>::inv(int method) const
{
return matrix.inv(method);
}
template<typename T> inline
cv::Affine3<T> cv::Affine3<T>::rotate(const Mat3& R) const
{
Mat3 Lc = linear();
Vec3 tc = translation();
Mat4 result;
result.val[12] = result.val[13] = result.val[14] = 0;
result.val[15] = 1;
for(int j = 0; j < 3; ++j)
{
for(int i = 0; i < 3; ++i)
{
float_type value = 0;
for(int k = 0; k < 3; ++k)
value += R(j, k) * Lc(k, i);
result(j, i) = value;
}
result(j, 3) = R.row(j).dot(tc.t());
}
return result;
}
template<typename T> inline
cv::Affine3<T> cv::Affine3<T>::rotate(const Vec3& _rvec) const
{
return rotate(Affine3f(_rvec).rotation());
}
template<typename T> inline
cv::Affine3<T> cv::Affine3<T>::translate(const Vec3& t) const
{
Mat4 m = matrix;
m.val[ 3] += t[0];
m.val[ 7] += t[1];
m.val[11] += t[2];
return m;
}
template<typename T> inline
cv::Affine3<T> cv::Affine3<T>::concatenate(const Affine3<T>& affine) const
{
return (*this).rotate(affine.rotation()).translate(affine.translation());
}
template<typename T> template <typename Y> inline
cv::Affine3<T>::operator Affine3<Y>() const
{
return Affine3<Y>(matrix);
}
template<typename T> template <typename Y> inline
cv::Affine3<Y> cv::Affine3<T>::cast() const
{
return Affine3<Y>(matrix);
}
template<typename T> inline
cv::Affine3<T> cv::operator*(const cv::Affine3<T>& affine1, const cv::Affine3<T>& affine2)
{
return affine2.concatenate(affine1);
}
template<typename T, typename V> inline
V cv::operator*(const cv::Affine3<T>& affine, const V& v)
{
const typename Affine3<T>::Mat4& m = affine.matrix;
V r;
r.x = m.val[0] * v.x + m.val[1] * v.y + m.val[ 2] * v.z + m.val[ 3];
r.y = m.val[4] * v.x + m.val[5] * v.y + m.val[ 6] * v.z + m.val[ 7];
r.z = m.val[8] * v.x + m.val[9] * v.y + m.val[10] * v.z + m.val[11];
return r;
}
static inline
cv::Vec3f cv::operator*(const cv::Affine3f& affine, const cv::Vec3f& v)
{
const cv::Matx44f& m = affine.matrix;
cv::Vec3f r;
r.val[0] = m.val[0] * v[0] + m.val[1] * v[1] + m.val[ 2] * v[2] + m.val[ 3];
r.val[1] = m.val[4] * v[0] + m.val[5] * v[1] + m.val[ 6] * v[2] + m.val[ 7];
r.val[2] = m.val[8] * v[0] + m.val[9] * v[1] + m.val[10] * v[2] + m.val[11];
return r;
}
static inline
cv::Vec3d cv::operator*(const cv::Affine3d& affine, const cv::Vec3d& v)
{
const cv::Matx44d& m = affine.matrix;
cv::Vec3d r;
r.val[0] = m.val[0] * v[0] + m.val[1] * v[1] + m.val[ 2] * v[2] + m.val[ 3];
r.val[1] = m.val[4] * v[0] + m.val[5] * v[1] + m.val[ 6] * v[2] + m.val[ 7];
r.val[2] = m.val[8] * v[0] + m.val[9] * v[1] + m.val[10] * v[2] + m.val[11];
return r;
}
#if defined EIGEN_WORLD_VERSION && defined EIGEN_GEOMETRY_MODULE_H
template<typename T> inline
cv::Affine3<T>::Affine3(const Eigen::Transform<T, 3, Eigen::Affine, (Eigen::RowMajor)>& affine)
{
cv::Mat(4, 4, cv::traits::Type<T>::value, affine.matrix().data()).copyTo(matrix);
}
template<typename T> inline
cv::Affine3<T>::Affine3(const Eigen::Transform<T, 3, Eigen::Affine>& affine)
{
Eigen::Transform<T, 3, Eigen::Affine, (Eigen::RowMajor)> a = affine;
cv::Mat(4, 4, cv::traits::Type<T>::value, a.matrix().data()).copyTo(matrix);
}
template<typename T> inline
cv::Affine3<T>::operator Eigen::Transform<T, 3, Eigen::Affine, (Eigen::RowMajor)>() const
{
Eigen::Transform<T, 3, Eigen::Affine, (Eigen::RowMajor)> r;
cv::Mat hdr(4, 4, cv::traits::Type<T>::value, r.matrix().data());
cv::Mat(matrix, false).copyTo(hdr);
return r;
}
template<typename T> inline
cv::Affine3<T>::operator Eigen::Transform<T, 3, Eigen::Affine>() const
{
return this->operator Eigen::Transform<T, 3, Eigen::Affine, (Eigen::RowMajor)>();
}
#endif /* defined EIGEN_WORLD_VERSION && defined EIGEN_GEOMETRY_MODULE_H */
//! @endcond
#endif /* __cplusplus */
#endif /* OPENCV_CORE_AFFINE3_HPP */

View File

@@ -0,0 +1,105 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
#ifndef OPENCV_CORE_ASYNC_HPP
#define OPENCV_CORE_ASYNC_HPP
#include <opencv2/core/mat.hpp>
#ifdef CV_CXX11
//#include <future>
#include <chrono>
#endif
namespace cv {
/** @addtogroup core_async
@{
*/
/** @brief Returns result of asynchronous operations
Object has attached asynchronous state.
Assignment operator doesn't clone asynchronous state (it is shared between all instances).
Result can be fetched via get() method only once.
*/
class CV_EXPORTS_W AsyncArray
{
public:
~AsyncArray() CV_NOEXCEPT;
CV_WRAP AsyncArray() CV_NOEXCEPT;
AsyncArray(const AsyncArray& o) CV_NOEXCEPT;
AsyncArray& operator=(const AsyncArray& o) CV_NOEXCEPT;
CV_WRAP void release() CV_NOEXCEPT;
/** Fetch the result.
@param[out] dst destination array
Waits for result until container has valid result.
Throws exception if exception was stored as a result.
Throws exception on invalid container state.
@note Result or stored exception can be fetched only once.
*/
CV_WRAP void get(OutputArray dst) const;
/** Retrieving the result with timeout
@param[out] dst destination array
@param[in] timeoutNs timeout in nanoseconds, -1 for infinite wait
@returns true if result is ready, false if the timeout has expired
@note Result or stored exception can be fetched only once.
*/
bool get(OutputArray dst, int64 timeoutNs) const;
CV_WRAP inline
bool get(OutputArray dst, double timeoutNs) const { return get(dst, (int64)timeoutNs); }
bool wait_for(int64 timeoutNs) const;
CV_WRAP inline
bool wait_for(double timeoutNs) const { return wait_for((int64)timeoutNs); }
CV_WRAP bool valid() const CV_NOEXCEPT;
#ifdef CV_CXX11
inline AsyncArray(AsyncArray&& o) { p = o.p; o.p = NULL; }
inline AsyncArray& operator=(AsyncArray&& o) CV_NOEXCEPT { std::swap(p, o.p); return *this; }
template<typename _Rep, typename _Period>
inline bool get(OutputArray dst, const std::chrono::duration<_Rep, _Period>& timeout)
{
return get(dst, (int64)(std::chrono::nanoseconds(timeout).count()));
}
template<typename _Rep, typename _Period>
inline bool wait_for(const std::chrono::duration<_Rep, _Period>& timeout)
{
return wait_for((int64)(std::chrono::nanoseconds(timeout).count()));
}
#if 0
std::future<Mat> getFutureMat() const;
std::future<UMat> getFutureUMat() const;
#endif
#endif
// PImpl
struct Impl; friend struct Impl;
inline void* _getImpl() const CV_NOEXCEPT { return p; }
protected:
Impl* p;
};
//! @}
} // namespace
#endif // OPENCV_CORE_ASYNC_HPP

View File

@@ -0,0 +1,664 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
// Copyright (C) 2014, Itseez Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef OPENCV_CORE_BASE_HPP
#define OPENCV_CORE_BASE_HPP
#ifndef __cplusplus
# error base.hpp header must be compiled as C++
#endif
#include "opencv2/opencv_modules.hpp"
#include <climits>
#include <algorithm>
#include "opencv2/core/cvdef.h"
#include "opencv2/core/cvstd.hpp"
namespace cv
{
//! @addtogroup core_utils
//! @{
namespace Error {
//! error codes
enum Code {
StsOk= 0, //!< everything is ok
StsBackTrace= -1, //!< pseudo error for back trace
StsError= -2, //!< unknown /unspecified error
StsInternal= -3, //!< internal error (bad state)
StsNoMem= -4, //!< insufficient memory
StsBadArg= -5, //!< function arg/param is bad
StsBadFunc= -6, //!< unsupported function
StsNoConv= -7, //!< iteration didn't converge
StsAutoTrace= -8, //!< tracing
HeaderIsNull= -9, //!< image header is NULL
BadImageSize= -10, //!< image size is invalid
BadOffset= -11, //!< offset is invalid
BadDataPtr= -12, //!<
BadStep= -13, //!< image step is wrong, this may happen for a non-continuous matrix.
BadModelOrChSeq= -14, //!<
BadNumChannels= -15, //!< bad number of channels, for example, some functions accept only single channel matrices.
BadNumChannel1U= -16, //!<
BadDepth= -17, //!< input image depth is not supported by the function
BadAlphaChannel= -18, //!<
BadOrder= -19, //!< number of dimensions is out of range
BadOrigin= -20, //!< incorrect input origin
BadAlign= -21, //!< incorrect input align
BadCallBack= -22, //!<
BadTileSize= -23, //!<
BadCOI= -24, //!< input COI is not supported
BadROISize= -25, //!< incorrect input roi
MaskIsTiled= -26, //!<
StsNullPtr= -27, //!< null pointer
StsVecLengthErr= -28, //!< incorrect vector length
StsFilterStructContentErr= -29, //!< incorrect filter structure content
StsKernelStructContentErr= -30, //!< incorrect transform kernel content
StsFilterOffsetErr= -31, //!< incorrect filter offset value
StsBadSize= -201, //!< the input/output structure size is incorrect
StsDivByZero= -202, //!< division by zero
StsInplaceNotSupported= -203, //!< in-place operation is not supported
StsObjectNotFound= -204, //!< request can't be completed
StsUnmatchedFormats= -205, //!< formats of input/output arrays differ
StsBadFlag= -206, //!< flag is wrong or not supported
StsBadPoint= -207, //!< bad CvPoint
StsBadMask= -208, //!< bad format of mask (neither 8uC1 nor 8sC1)
StsUnmatchedSizes= -209, //!< sizes of input/output structures do not match
StsUnsupportedFormat= -210, //!< the data format/type is not supported by the function
StsOutOfRange= -211, //!< some of parameters are out of range
StsParseError= -212, //!< invalid syntax/structure of the parsed file
StsNotImplemented= -213, //!< the requested function/feature is not implemented
StsBadMemBlock= -214, //!< an allocated block has been corrupted
StsAssert= -215, //!< assertion failed
GpuNotSupported= -216, //!< no CUDA support
GpuApiCallError= -217, //!< GPU API call error
OpenGlNotSupported= -218, //!< no OpenGL support
OpenGlApiCallError= -219, //!< OpenGL API call error
OpenCLApiCallError= -220, //!< OpenCL API call error
OpenCLDoubleNotSupported= -221,
OpenCLInitError= -222, //!< OpenCL initialization error
OpenCLNoAMDBlasFft= -223
};
} //Error
//! @} core_utils
//! @addtogroup core_array
//! @{
//! matrix decomposition types
enum DecompTypes {
/** Gaussian elimination with the optimal pivot element chosen. */
DECOMP_LU = 0,
/** singular value decomposition (SVD) method; the system can be over-defined and/or the matrix
src1 can be singular */
DECOMP_SVD = 1,
/** eigenvalue decomposition; the matrix src1 must be symmetrical */
DECOMP_EIG = 2,
/** Cholesky \f$LL^T\f$ factorization; the matrix src1 must be symmetrical and positively
defined */
DECOMP_CHOLESKY = 3,
/** QR factorization; the system can be over-defined and/or the matrix src1 can be singular */
DECOMP_QR = 4,
/** while all the previous flags are mutually exclusive, this flag can be used together with
any of the previous; it means that the normal equations
\f$\texttt{src1}^T\cdot\texttt{src1}\cdot\texttt{dst}=\texttt{src1}^T\texttt{src2}\f$ are
solved instead of the original system
\f$\texttt{src1}\cdot\texttt{dst}=\texttt{src2}\f$ */
DECOMP_NORMAL = 16
};
/** norm types
src1 and src2 denote input arrays.
*/
enum NormTypes {
/**
\f[
norm = \forkthree
{\|\texttt{src1}\|_{L_{\infty}} = \max _I | \texttt{src1} (I)|}{if \(\texttt{normType} = \texttt{NORM_INF}\) }
{\|\texttt{src1}-\texttt{src2}\|_{L_{\infty}} = \max _I | \texttt{src1} (I) - \texttt{src2} (I)|}{if \(\texttt{normType} = \texttt{NORM_INF}\) }
{\frac{\|\texttt{src1}-\texttt{src2}\|_{L_{\infty}} }{\|\texttt{src2}\|_{L_{\infty}} }}{if \(\texttt{normType} = \texttt{NORM_RELATIVE | NORM_INF}\) }
\f]
*/
NORM_INF = 1,
/**
\f[
norm = \forkthree
{\| \texttt{src1} \| _{L_1} = \sum _I | \texttt{src1} (I)|}{if \(\texttt{normType} = \texttt{NORM_L1}\)}
{ \| \texttt{src1} - \texttt{src2} \| _{L_1} = \sum _I | \texttt{src1} (I) - \texttt{src2} (I)|}{if \(\texttt{normType} = \texttt{NORM_L1}\) }
{ \frac{\|\texttt{src1}-\texttt{src2}\|_{L_1} }{\|\texttt{src2}\|_{L_1}} }{if \(\texttt{normType} = \texttt{NORM_RELATIVE | NORM_L1}\) }
\f]*/
NORM_L1 = 2,
/**
\f[
norm = \forkthree
{ \| \texttt{src1} \| _{L_2} = \sqrt{\sum_I \texttt{src1}(I)^2} }{if \(\texttt{normType} = \texttt{NORM_L2}\) }
{ \| \texttt{src1} - \texttt{src2} \| _{L_2} = \sqrt{\sum_I (\texttt{src1}(I) - \texttt{src2}(I))^2} }{if \(\texttt{normType} = \texttt{NORM_L2}\) }
{ \frac{\|\texttt{src1}-\texttt{src2}\|_{L_2} }{\|\texttt{src2}\|_{L_2}} }{if \(\texttt{normType} = \texttt{NORM_RELATIVE | NORM_L2}\) }
\f]
*/
NORM_L2 = 4,
/**
\f[
norm = \forkthree
{ \| \texttt{src1} \| _{L_2} ^{2} = \sum_I \texttt{src1}(I)^2} {if \(\texttt{normType} = \texttt{NORM_L2SQR}\)}
{ \| \texttt{src1} - \texttt{src2} \| _{L_2} ^{2} = \sum_I (\texttt{src1}(I) - \texttt{src2}(I))^2 }{if \(\texttt{normType} = \texttt{NORM_L2SQR}\) }
{ \left(\frac{\|\texttt{src1}-\texttt{src2}\|_{L_2} }{\|\texttt{src2}\|_{L_2}}\right)^2 }{if \(\texttt{normType} = \texttt{NORM_RELATIVE | NORM_L2SQR}\) }
\f]
*/
NORM_L2SQR = 5,
/**
In the case of one input array, calculates the Hamming distance of the array from zero,
In the case of two input arrays, calculates the Hamming distance between the arrays.
*/
NORM_HAMMING = 6,
/**
Similar to NORM_HAMMING, but in the calculation, each two bits of the input sequence will
be added and treated as a single bit to be used in the same calculation as NORM_HAMMING.
*/
NORM_HAMMING2 = 7,
NORM_TYPE_MASK = 7, //!< bit-mask which can be used to separate norm type from norm flags
NORM_RELATIVE = 8, //!< flag
NORM_MINMAX = 32 //!< flag
};
//! comparison types
enum CmpTypes { CMP_EQ = 0, //!< src1 is equal to src2.
CMP_GT = 1, //!< src1 is greater than src2.
CMP_GE = 2, //!< src1 is greater than or equal to src2.
CMP_LT = 3, //!< src1 is less than src2.
CMP_LE = 4, //!< src1 is less than or equal to src2.
CMP_NE = 5 //!< src1 is unequal to src2.
};
//! generalized matrix multiplication flags
enum GemmFlags { GEMM_1_T = 1, //!< transposes src1
GEMM_2_T = 2, //!< transposes src2
GEMM_3_T = 4 //!< transposes src3
};
enum DftFlags {
/** performs an inverse 1D or 2D transform instead of the default forward
transform. */
DFT_INVERSE = 1,
/** scales the result: divide it by the number of array elements. Normally, it is
combined with DFT_INVERSE. */
DFT_SCALE = 2,
/** performs a forward or inverse transform of every individual row of the input
matrix; this flag enables you to transform multiple vectors simultaneously and can be used to
decrease the overhead (which is sometimes several times larger than the processing itself) to
perform 3D and higher-dimensional transformations and so forth.*/
DFT_ROWS = 4,
/** performs a forward transformation of 1D or 2D real array; the result,
though being a complex array, has complex-conjugate symmetry (*CCS*, see the function
description below for details), and such an array can be packed into a real array of the same
size as input, which is the fastest option and which is what the function does by default;
however, you may wish to get a full complex array (for simpler spectrum analysis, and so on) -
pass the flag to enable the function to produce a full-size complex output array. */
DFT_COMPLEX_OUTPUT = 16,
/** performs an inverse transformation of a 1D or 2D complex array; the
result is normally a complex array of the same size, however, if the input array has
conjugate-complex symmetry (for example, it is a result of forward transformation with
DFT_COMPLEX_OUTPUT flag), the output is a real array; while the function itself does not
check whether the input is symmetrical or not, you can pass the flag and then the function
will assume the symmetry and produce the real output array (note that when the input is packed
into a real array and inverse transformation is executed, the function treats the input as a
packed complex-conjugate symmetrical array, and the output will also be a real array). */
DFT_REAL_OUTPUT = 32,
/** specifies that input is complex input. If this flag is set, the input must have 2 channels.
On the other hand, for backwards compatibility reason, if input has 2 channels, input is
already considered complex. */
DFT_COMPLEX_INPUT = 64,
/** performs an inverse 1D or 2D transform instead of the default forward transform. */
DCT_INVERSE = DFT_INVERSE,
/** performs a forward or inverse transform of every individual row of the input
matrix. This flag enables you to transform multiple vectors simultaneously and can be used to
decrease the overhead (which is sometimes several times larger than the processing itself) to
perform 3D and higher-dimensional transforms and so forth.*/
DCT_ROWS = DFT_ROWS
};
//! Various border types, image boundaries are denoted with `|`
//! @see borderInterpolate, copyMakeBorder
enum BorderTypes {
BORDER_CONSTANT = 0, //!< `iiiiii|abcdefgh|iiiiiii` with some specified `i`
BORDER_REPLICATE = 1, //!< `aaaaaa|abcdefgh|hhhhhhh`
BORDER_REFLECT = 2, //!< `fedcba|abcdefgh|hgfedcb`
BORDER_WRAP = 3, //!< `cdefgh|abcdefgh|abcdefg`
BORDER_REFLECT_101 = 4, //!< `gfedcb|abcdefgh|gfedcba`
BORDER_TRANSPARENT = 5, //!< `uvwxyz|abcdefgh|ijklmno`
BORDER_REFLECT101 = BORDER_REFLECT_101, //!< same as BORDER_REFLECT_101
BORDER_DEFAULT = BORDER_REFLECT_101, //!< same as BORDER_REFLECT_101
BORDER_ISOLATED = 16 //!< do not look outside of ROI
};
//! @} core_array
//! @addtogroup core_utils
//! @{
/*! @brief Signals an error and raises the exception.
By default the function prints information about the error to stderr,
then it either stops if setBreakOnError() had been called before or raises the exception.
It is possible to alternate error processing by using redirectError().
@param _code - error code (Error::Code)
@param _err - error description
@param _func - function name. Available only when the compiler supports getting it
@param _file - source file name where the error has occurred
@param _line - line number in the source file where the error has occurred
@see CV_Error, CV_Error_, CV_Assert, CV_DbgAssert
*/
CV_EXPORTS CV_NORETURN void error(int _code, const String& _err, const char* _func, const char* _file, int _line);
#ifdef CV_STATIC_ANALYSIS
// In practice, some macro are not processed correctly (noreturn is not detected).
// We need to use simplified definition for them.
#define CV_Error(code, msg) do { (void)(code); (void)(msg); abort(); } while (0)
#define CV_Error_(code, args) do { (void)(code); (void)(cv::format args); abort(); } while (0)
#define CV_Assert( expr ) do { if (!(expr)) abort(); } while (0)
#else // CV_STATIC_ANALYSIS
/** @brief Call the error handler.
Currently, the error handler prints the error code and the error message to the standard
error stream `stderr`. In the Debug configuration, it then provokes memory access violation, so that
the execution stack and all the parameters can be analyzed by the debugger. In the Release
configuration, the exception is thrown.
@param code one of Error::Code
@param msg error message
*/
#define CV_Error( code, msg ) cv::error( code, msg, CV_Func, __FILE__, __LINE__ )
/** @brief Call the error handler.
This macro can be used to construct an error message on-fly to include some dynamic information,
for example:
@code
// note the extra parentheses around the formatted text message
CV_Error_(Error::StsOutOfRange,
("the value at (%d, %d)=%g is out of range", badPt.x, badPt.y, badValue));
@endcode
@param code one of Error::Code
@param args printf-like formatted error message in parentheses
*/
#define CV_Error_( code, args ) cv::error( code, cv::format args, CV_Func, __FILE__, __LINE__ )
/** @brief Checks a condition at runtime and throws exception if it fails
The macros CV_Assert (and CV_DbgAssert(expr)) evaluate the specified expression. If it is 0, the macros
raise an error (see cv::error). The macro CV_Assert checks the condition in both Debug and Release
configurations while CV_DbgAssert is only retained in the Debug configuration.
*/
#define CV_Assert( expr ) do { if(!!(expr)) ; else cv::error( cv::Error::StsAssert, #expr, CV_Func, __FILE__, __LINE__ ); } while(0)
#endif // CV_STATIC_ANALYSIS
//! @cond IGNORED
#if !defined(__OPENCV_BUILD) // TODO: backward compatibility only
#ifndef CV_ErrorNoReturn
#define CV_ErrorNoReturn CV_Error
#endif
#ifndef CV_ErrorNoReturn_
#define CV_ErrorNoReturn_ CV_Error_
#endif
#endif
#define CV_Assert_1 CV_Assert
#define CV_Assert_2( expr, ... ) CV_Assert_1(expr); __CV_EXPAND(CV_Assert_1( __VA_ARGS__ ))
#define CV_Assert_3( expr, ... ) CV_Assert_1(expr); __CV_EXPAND(CV_Assert_2( __VA_ARGS__ ))
#define CV_Assert_4( expr, ... ) CV_Assert_1(expr); __CV_EXPAND(CV_Assert_3( __VA_ARGS__ ))
#define CV_Assert_5( expr, ... ) CV_Assert_1(expr); __CV_EXPAND(CV_Assert_4( __VA_ARGS__ ))
#define CV_Assert_6( expr, ... ) CV_Assert_1(expr); __CV_EXPAND(CV_Assert_5( __VA_ARGS__ ))
#define CV_Assert_7( expr, ... ) CV_Assert_1(expr); __CV_EXPAND(CV_Assert_6( __VA_ARGS__ ))
#define CV_Assert_8( expr, ... ) CV_Assert_1(expr); __CV_EXPAND(CV_Assert_7( __VA_ARGS__ ))
#define CV_Assert_9( expr, ... ) CV_Assert_1(expr); __CV_EXPAND(CV_Assert_8( __VA_ARGS__ ))
#define CV_Assert_10( expr, ... ) CV_Assert_1(expr); __CV_EXPAND(CV_Assert_9( __VA_ARGS__ ))
#define CV_Assert_N(...) do { __CV_EXPAND(__CV_CAT(CV_Assert_, __CV_VA_NUM_ARGS(__VA_ARGS__)) (__VA_ARGS__)); } while(0)
//! @endcond
#if defined _DEBUG || defined CV_STATIC_ANALYSIS
# define CV_DbgAssert(expr) CV_Assert(expr)
#else
/** replaced with CV_Assert(expr) in Debug configuration */
# define CV_DbgAssert(expr)
#endif
/*
* Hamming distance functor - counts the bit differences between two strings - useful for the Brief descriptor
* bit count of A exclusive XOR'ed with B
*/
struct CV_EXPORTS Hamming
{
static const NormTypes normType = NORM_HAMMING;
typedef unsigned char ValueType;
typedef int ResultType;
/** this will count the bits in a ^ b
*/
ResultType operator()( const unsigned char* a, const unsigned char* b, int size ) const;
};
typedef Hamming HammingLUT;
/////////////////////////////////// inline norms ////////////////////////////////////
template<typename _Tp> inline _Tp cv_abs(_Tp x) { return std::abs(x); }
inline int cv_abs(uchar x) { return x; }
inline int cv_abs(schar x) { return std::abs(x); }
inline int cv_abs(ushort x) { return x; }
inline int cv_abs(short x) { return std::abs(x); }
template<typename _Tp, typename _AccTp> static inline
_AccTp normL2Sqr(const _Tp* a, int n)
{
_AccTp s = 0;
int i=0;
#if CV_ENABLE_UNROLLED
for( ; i <= n - 4; i += 4 )
{
_AccTp v0 = a[i], v1 = a[i+1], v2 = a[i+2], v3 = a[i+3];
s += v0*v0 + v1*v1 + v2*v2 + v3*v3;
}
#endif
for( ; i < n; i++ )
{
_AccTp v = a[i];
s += v*v;
}
return s;
}
template<typename _Tp, typename _AccTp> static inline
_AccTp normL1(const _Tp* a, int n)
{
_AccTp s = 0;
int i = 0;
#if CV_ENABLE_UNROLLED
for(; i <= n - 4; i += 4 )
{
s += (_AccTp)cv_abs(a[i]) + (_AccTp)cv_abs(a[i+1]) +
(_AccTp)cv_abs(a[i+2]) + (_AccTp)cv_abs(a[i+3]);
}
#endif
for( ; i < n; i++ )
s += cv_abs(a[i]);
return s;
}
template<typename _Tp, typename _AccTp> static inline
_AccTp normInf(const _Tp* a, int n)
{
_AccTp s = 0;
for( int i = 0; i < n; i++ )
s = std::max(s, (_AccTp)cv_abs(a[i]));
return s;
}
template<typename _Tp, typename _AccTp> static inline
_AccTp normL2Sqr(const _Tp* a, const _Tp* b, int n)
{
_AccTp s = 0;
int i= 0;
#if CV_ENABLE_UNROLLED
for(; i <= n - 4; i += 4 )
{
_AccTp v0 = _AccTp(a[i] - b[i]), v1 = _AccTp(a[i+1] - b[i+1]), v2 = _AccTp(a[i+2] - b[i+2]), v3 = _AccTp(a[i+3] - b[i+3]);
s += v0*v0 + v1*v1 + v2*v2 + v3*v3;
}
#endif
for( ; i < n; i++ )
{
_AccTp v = _AccTp(a[i] - b[i]);
s += v*v;
}
return s;
}
static inline float normL2Sqr(const float* a, const float* b, int n)
{
float s = 0.f;
for( int i = 0; i < n; i++ )
{
float v = a[i] - b[i];
s += v*v;
}
return s;
}
template<typename _Tp, typename _AccTp> static inline
_AccTp normL1(const _Tp* a, const _Tp* b, int n)
{
_AccTp s = 0;
int i= 0;
#if CV_ENABLE_UNROLLED
for(; i <= n - 4; i += 4 )
{
_AccTp v0 = _AccTp(a[i] - b[i]), v1 = _AccTp(a[i+1] - b[i+1]), v2 = _AccTp(a[i+2] - b[i+2]), v3 = _AccTp(a[i+3] - b[i+3]);
s += std::abs(v0) + std::abs(v1) + std::abs(v2) + std::abs(v3);
}
#endif
for( ; i < n; i++ )
{
_AccTp v = _AccTp(a[i] - b[i]);
s += std::abs(v);
}
return s;
}
inline float normL1(const float* a, const float* b, int n)
{
float s = 0.f;
for( int i = 0; i < n; i++ )
{
s += std::abs(a[i] - b[i]);
}
return s;
}
inline int normL1(const uchar* a, const uchar* b, int n)
{
int s = 0;
for( int i = 0; i < n; i++ )
{
s += std::abs(a[i] - b[i]);
}
return s;
}
template<typename _Tp, typename _AccTp> static inline
_AccTp normInf(const _Tp* a, const _Tp* b, int n)
{
_AccTp s = 0;
for( int i = 0; i < n; i++ )
{
_AccTp v0 = a[i] - b[i];
s = std::max(s, std::abs(v0));
}
return s;
}
/** @brief Computes the cube root of an argument.
The function cubeRoot computes \f$\sqrt[3]{\texttt{val}}\f$. Negative arguments are handled correctly.
NaN and Inf are not handled. The accuracy approaches the maximum possible accuracy for
single-precision data.
@param val A function argument.
*/
CV_EXPORTS_W float cubeRoot(float val);
/** @overload
cubeRoot with argument of `double` type calls `std::cbrt(double)`
*/
static inline
double cubeRoot(double val)
{
return std::cbrt(val);
}
/** @brief Calculates the angle of a 2D vector in degrees.
The function fastAtan2 calculates the full-range angle of an input 2D vector. The angle is measured
in degrees and varies from 0 to 360 degrees. The accuracy is about 0.3 degrees.
@param x x-coordinate of the vector.
@param y y-coordinate of the vector.
*/
CV_EXPORTS_W float fastAtan2(float y, float x);
/** proxy for hal::LU */
CV_EXPORTS int LU(float* A, size_t astep, int m, float* b, size_t bstep, int n);
/** proxy for hal::LU */
CV_EXPORTS int LU(double* A, size_t astep, int m, double* b, size_t bstep, int n);
/** proxy for hal::Cholesky */
CV_EXPORTS bool Cholesky(float* A, size_t astep, int m, float* b, size_t bstep, int n);
/** proxy for hal::Cholesky */
CV_EXPORTS bool Cholesky(double* A, size_t astep, int m, double* b, size_t bstep, int n);
////////////////// forward declarations for important OpenCV types //////////////////
//! @cond IGNORED
template<typename _Tp, int cn> class Vec;
template<typename _Tp, int m, int n> class Matx;
template<typename _Tp> class Complex;
template<typename _Tp> class Point_;
template<typename _Tp> class Point3_;
template<typename _Tp> class Size_;
template<typename _Tp> class Rect_;
template<typename _Tp> class Scalar_;
class CV_EXPORTS RotatedRect;
class CV_EXPORTS Range;
class CV_EXPORTS TermCriteria;
class CV_EXPORTS KeyPoint;
class CV_EXPORTS DMatch;
class CV_EXPORTS RNG;
class CV_EXPORTS Mat;
class CV_EXPORTS MatExpr;
class CV_EXPORTS UMat;
class CV_EXPORTS SparseMat;
typedef Mat MatND;
template<typename _Tp> class Mat_;
template<typename _Tp> class SparseMat_;
class CV_EXPORTS MatConstIterator;
class CV_EXPORTS SparseMatIterator;
class CV_EXPORTS SparseMatConstIterator;
template<typename _Tp> class MatIterator_;
template<typename _Tp> class MatConstIterator_;
template<typename _Tp> class SparseMatIterator_;
template<typename _Tp> class SparseMatConstIterator_;
namespace ogl
{
class CV_EXPORTS Buffer;
class CV_EXPORTS Texture2D;
class CV_EXPORTS Arrays;
}
namespace cuda
{
class CV_EXPORTS GpuMat;
class CV_EXPORTS HostMem;
class CV_EXPORTS Stream;
class CV_EXPORTS Event;
}
namespace cudev
{
template <typename _Tp> class GpuMat_;
}
namespace ipp
{
CV_EXPORTS unsigned long long getIppFeatures();
CV_EXPORTS void setIppStatus(int status, const char * const funcname = NULL, const char * const filename = NULL,
int line = 0);
CV_EXPORTS int getIppStatus();
CV_EXPORTS String getIppErrorLocation();
CV_EXPORTS_W bool useIPP();
CV_EXPORTS_W void setUseIPP(bool flag);
CV_EXPORTS_W String getIppVersion();
// IPP Not-Exact mode. This function may force use of IPP then both IPP and OpenCV provide proper results
// but have internal accuracy differences which have too much direct or indirect impact on accuracy tests.
CV_EXPORTS_W bool useIPP_NotExact();
CV_EXPORTS_W void setUseIPP_NotExact(bool flag);
#ifndef DISABLE_OPENCV_3_COMPATIBILITY
static inline bool useIPP_NE() { return useIPP_NotExact(); }
static inline void setUseIPP_NE(bool flag) { setUseIPP_NotExact(flag); }
#endif
} // ipp
//! @endcond
//! @} core_utils
} // cv
#include "opencv2/core/neon_utils.hpp"
#include "opencv2/core/vsx_utils.hpp"
#include "opencv2/core/check.hpp"
#endif //OPENCV_CORE_BASE_HPP

View File

@@ -0,0 +1,233 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
#ifndef OPENCV_CORE_BINDINGS_UTILS_HPP
#define OPENCV_CORE_BINDINGS_UTILS_HPP
#include <opencv2/core/async.hpp>
#include <opencv2/core/detail/async_promise.hpp>
#include <opencv2/core/utils/logger.hpp>
#include <stdexcept>
namespace cv { namespace utils {
//! @addtogroup core_utils
//! @{
CV_EXPORTS_W String dumpInputArray(InputArray argument);
CV_EXPORTS_W String dumpInputArrayOfArrays(InputArrayOfArrays argument);
CV_EXPORTS_W String dumpInputOutputArray(InputOutputArray argument);
CV_EXPORTS_W String dumpInputOutputArrayOfArrays(InputOutputArrayOfArrays argument);
CV_WRAP static inline
String dumpBool(bool argument)
{
return (argument) ? String("Bool: True") : String("Bool: False");
}
CV_WRAP static inline
String dumpInt(int argument)
{
return cv::format("Int: %d", argument);
}
CV_WRAP static inline
String dumpSizeT(size_t argument)
{
std::ostringstream oss("size_t: ", std::ios::ate);
oss << argument;
return oss.str();
}
CV_WRAP static inline
String dumpFloat(float argument)
{
return cv::format("Float: %.2f", argument);
}
CV_WRAP static inline
String dumpDouble(double argument)
{
return cv::format("Double: %.2f", argument);
}
CV_WRAP static inline
String dumpCString(const char* argument)
{
return cv::format("String: %s", argument);
}
CV_WRAP static inline
String dumpString(const String& argument)
{
return cv::format("String: %s", argument.c_str());
}
CV_WRAP static inline
String testOverloadResolution(int value, const Point& point = Point(42, 24))
{
return format("overload (int=%d, point=(x=%d, y=%d))", value, point.x,
point.y);
}
CV_WRAP static inline
String testOverloadResolution(const Rect& rect)
{
return format("overload (rect=(x=%d, y=%d, w=%d, h=%d))", rect.x, rect.y,
rect.width, rect.height);
}
CV_WRAP static inline
String dumpRect(const Rect& argument)
{
return format("rect: (x=%d, y=%d, w=%d, h=%d)", argument.x, argument.y,
argument.width, argument.height);
}
CV_WRAP static inline
String dumpTermCriteria(const TermCriteria& argument)
{
return format("term_criteria: (type=%d, max_count=%d, epsilon=%lf",
argument.type, argument.maxCount, argument.epsilon);
}
CV_WRAP static inline
String dumpRotatedRect(const RotatedRect& argument)
{
return format("rotated_rect: (c_x=%f, c_y=%f, w=%f, h=%f, a=%f)",
argument.center.x, argument.center.y, argument.size.width,
argument.size.height, argument.angle);
}
CV_WRAP static inline
String dumpRange(const Range& argument)
{
if (argument == Range::all())
{
return "range: all";
}
else
{
return format("range: (s=%d, e=%d)", argument.start, argument.end);
}
}
CV_WRAP static inline
int testOverwriteNativeMethod(int argument)
{
return argument;
}
CV_WRAP static inline
String testReservedKeywordConversion(int positional_argument, int lambda = 2, int from = 3)
{
return format("arg=%d, lambda=%d, from=%d", positional_argument, lambda, from);
}
CV_EXPORTS_W String dumpVectorOfInt(const std::vector<int>& vec);
CV_EXPORTS_W String dumpVectorOfDouble(const std::vector<double>& vec);
CV_EXPORTS_W String dumpVectorOfRect(const std::vector<Rect>& vec);
CV_WRAP static inline
void generateVectorOfRect(size_t len, CV_OUT std::vector<Rect>& vec)
{
vec.resize(len);
if (len > 0)
{
RNG rng(12345);
Mat tmp(static_cast<int>(len), 1, CV_32SC4);
rng.fill(tmp, RNG::UNIFORM, 10, 20);
tmp.copyTo(vec);
}
}
CV_WRAP static inline
void generateVectorOfInt(size_t len, CV_OUT std::vector<int>& vec)
{
vec.resize(len);
if (len > 0)
{
RNG rng(554433);
Mat tmp(static_cast<int>(len), 1, CV_32SC1);
rng.fill(tmp, RNG::UNIFORM, -10, 10);
tmp.copyTo(vec);
}
}
CV_WRAP static inline
void generateVectorOfMat(size_t len, int rows, int cols, int dtype, CV_OUT std::vector<Mat>& vec)
{
vec.resize(len);
if (len > 0)
{
RNG rng(65431);
for (size_t i = 0; i < len; ++i)
{
vec[i].create(rows, cols, dtype);
rng.fill(vec[i], RNG::UNIFORM, 0, 10);
}
}
}
CV_WRAP static inline
void testRaiseGeneralException()
{
throw std::runtime_error("exception text");
}
CV_WRAP static inline
AsyncArray testAsyncArray(InputArray argument)
{
AsyncPromise p;
p.setValue(argument);
return p.getArrayResult();
}
CV_WRAP static inline
AsyncArray testAsyncException()
{
AsyncPromise p;
try
{
CV_Error(Error::StsOk, "Test: Generated async error");
}
catch (const cv::Exception& e)
{
p.setException(e);
}
return p.getArrayResult();
}
namespace fs {
CV_EXPORTS_W cv::String getCacheDirectoryForDownloads();
} // namespace fs
//! @} // core_utils
} // namespace cv::utils
//! @cond IGNORED
CV_WRAP static inline
int setLogLevel(int level)
{
// NB: Binding generators doesn't work with enums properly yet, so we define separate overload here
return cv::utils::logging::setLogLevel((cv::utils::logging::LogLevel)level);
}
CV_WRAP static inline
int getLogLevel()
{
return cv::utils::logging::getLogLevel();
}
//! @endcond IGNORED
} // namespaces cv / utils
#endif // OPENCV_CORE_BINDINGS_UTILS_HPP

View File

@@ -0,0 +1,40 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
//
// Copyright (C) 2014, Advanced Micro Devices, Inc., all rights reserved.
#ifndef OPENCV_CORE_BUFFER_POOL_HPP
#define OPENCV_CORE_BUFFER_POOL_HPP
#ifdef _MSC_VER
#pragma warning(push)
#pragma warning(disable: 4265)
#endif
namespace cv
{
//! @addtogroup core
//! @{
class BufferPoolController
{
protected:
~BufferPoolController() { }
public:
virtual size_t getReservedSize() const = 0;
virtual size_t getMaxReservedSize() const = 0;
virtual void setMaxReservedSize(size_t size) = 0;
virtual void freeAllReservedBuffers() = 0;
};
//! @}
}
#ifdef _MSC_VER
#pragma warning(pop)
#endif
#endif // OPENCV_CORE_BUFFER_POOL_HPP

View File

@@ -0,0 +1,160 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
#ifndef OPENCV_CORE_CHECK_HPP
#define OPENCV_CORE_CHECK_HPP
#include <opencv2/core/base.hpp>
namespace cv {
/** Returns string of cv::Mat depth value: CV_8U -> "CV_8U" or "<invalid depth>" */
CV_EXPORTS const char* depthToString(int depth);
/** Returns string of cv::Mat depth value: CV_8UC3 -> "CV_8UC3" or "<invalid type>" */
CV_EXPORTS const String typeToString(int type);
//! @cond IGNORED
namespace detail {
/** Returns string of cv::Mat depth value: CV_8U -> "CV_8U" or NULL */
CV_EXPORTS const char* depthToString_(int depth);
/** Returns string of cv::Mat depth value: CV_8UC3 -> "CV_8UC3" or cv::String() */
CV_EXPORTS const cv::String typeToString_(int type);
enum TestOp {
TEST_CUSTOM = 0,
TEST_EQ = 1,
TEST_NE = 2,
TEST_LE = 3,
TEST_LT = 4,
TEST_GE = 5,
TEST_GT = 6,
CV__LAST_TEST_OP
};
struct CheckContext {
const char* func;
const char* file;
int line;
enum TestOp testOp;
const char* message;
const char* p1_str;
const char* p2_str;
};
#ifndef CV__CHECK_FILENAME
# define CV__CHECK_FILENAME __FILE__
#endif
#ifndef CV__CHECK_FUNCTION
# if defined _MSC_VER
# define CV__CHECK_FUNCTION __FUNCSIG__
# elif defined __GNUC__
# define CV__CHECK_FUNCTION __PRETTY_FUNCTION__
# else
# define CV__CHECK_FUNCTION "<unknown>"
# endif
#endif
#define CV__CHECK_LOCATION_VARNAME(id) CVAUX_CONCAT(CVAUX_CONCAT(__cv_check_, id), __LINE__)
#define CV__DEFINE_CHECK_CONTEXT(id, message, testOp, p1_str, p2_str) \
static const cv::detail::CheckContext CV__CHECK_LOCATION_VARNAME(id) = \
{ CV__CHECK_FUNCTION, CV__CHECK_FILENAME, __LINE__, testOp, "" message, "" p1_str, "" p2_str }
CV_EXPORTS void CV_NORETURN check_failed_auto(const int v1, const int v2, const CheckContext& ctx);
CV_EXPORTS void CV_NORETURN check_failed_auto(const size_t v1, const size_t v2, const CheckContext& ctx);
CV_EXPORTS void CV_NORETURN check_failed_auto(const float v1, const float v2, const CheckContext& ctx);
CV_EXPORTS void CV_NORETURN check_failed_auto(const double v1, const double v2, const CheckContext& ctx);
CV_EXPORTS void CV_NORETURN check_failed_auto(const Size_<int> v1, const Size_<int> v2, const CheckContext& ctx);
CV_EXPORTS void CV_NORETURN check_failed_MatDepth(const int v1, const int v2, const CheckContext& ctx);
CV_EXPORTS void CV_NORETURN check_failed_MatType(const int v1, const int v2, const CheckContext& ctx);
CV_EXPORTS void CV_NORETURN check_failed_MatChannels(const int v1, const int v2, const CheckContext& ctx);
CV_EXPORTS void CV_NORETURN check_failed_auto(const int v, const CheckContext& ctx);
CV_EXPORTS void CV_NORETURN check_failed_auto(const size_t v, const CheckContext& ctx);
CV_EXPORTS void CV_NORETURN check_failed_auto(const float v, const CheckContext& ctx);
CV_EXPORTS void CV_NORETURN check_failed_auto(const double v, const CheckContext& ctx);
CV_EXPORTS void CV_NORETURN check_failed_auto(const Size_<int> v, const CheckContext& ctx);
CV_EXPORTS void CV_NORETURN check_failed_auto(const std::string& v1, const CheckContext& ctx);
CV_EXPORTS void CV_NORETURN check_failed_MatDepth(const int v, const CheckContext& ctx);
CV_EXPORTS void CV_NORETURN check_failed_MatType(const int v, const CheckContext& ctx);
CV_EXPORTS void CV_NORETURN check_failed_MatChannels(const int v, const CheckContext& ctx);
#define CV__TEST_EQ(v1, v2) ((v1) == (v2))
#define CV__TEST_NE(v1, v2) ((v1) != (v2))
#define CV__TEST_LE(v1, v2) ((v1) <= (v2))
#define CV__TEST_LT(v1, v2) ((v1) < (v2))
#define CV__TEST_GE(v1, v2) ((v1) >= (v2))
#define CV__TEST_GT(v1, v2) ((v1) > (v2))
#define CV__CHECK(id, op, type, v1, v2, v1_str, v2_str, msg_str) do { \
if(CV__TEST_##op((v1), (v2))) ; else { \
CV__DEFINE_CHECK_CONTEXT(id, msg_str, cv::detail::TEST_ ## op, v1_str, v2_str); \
cv::detail::check_failed_ ## type((v1), (v2), CV__CHECK_LOCATION_VARNAME(id)); \
} \
} while (0)
#define CV__CHECK_CUSTOM_TEST(id, type, v, test_expr, v_str, test_expr_str, msg_str) do { \
if(!!(test_expr)) ; else { \
CV__DEFINE_CHECK_CONTEXT(id, msg_str, cv::detail::TEST_CUSTOM, v_str, test_expr_str); \
cv::detail::check_failed_ ## type((v), CV__CHECK_LOCATION_VARNAME(id)); \
} \
} while (0)
} // namespace
//! @endcond
/// Supported values of these types: int, float, double
#define CV_CheckEQ(v1, v2, msg) CV__CHECK(_, EQ, auto, v1, v2, #v1, #v2, msg)
#define CV_CheckNE(v1, v2, msg) CV__CHECK(_, NE, auto, v1, v2, #v1, #v2, msg)
#define CV_CheckLE(v1, v2, msg) CV__CHECK(_, LE, auto, v1, v2, #v1, #v2, msg)
#define CV_CheckLT(v1, v2, msg) CV__CHECK(_, LT, auto, v1, v2, #v1, #v2, msg)
#define CV_CheckGE(v1, v2, msg) CV__CHECK(_, GE, auto, v1, v2, #v1, #v2, msg)
#define CV_CheckGT(v1, v2, msg) CV__CHECK(_, GT, auto, v1, v2, #v1, #v2, msg)
/// Check with additional "decoding" of type values in error message
#define CV_CheckTypeEQ(t1, t2, msg) CV__CHECK(_, EQ, MatType, t1, t2, #t1, #t2, msg)
/// Check with additional "decoding" of depth values in error message
#define CV_CheckDepthEQ(d1, d2, msg) CV__CHECK(_, EQ, MatDepth, d1, d2, #d1, #d2, msg)
#define CV_CheckChannelsEQ(c1, c2, msg) CV__CHECK(_, EQ, MatChannels, c1, c2, #c1, #c2, msg)
/// Example: type == CV_8UC1 || type == CV_8UC3
#define CV_CheckType(t, test_expr, msg) CV__CHECK_CUSTOM_TEST(_, MatType, t, (test_expr), #t, #test_expr, msg)
/// Example: depth == CV_32F || depth == CV_64F
#define CV_CheckDepth(t, test_expr, msg) CV__CHECK_CUSTOM_TEST(_, MatDepth, t, (test_expr), #t, #test_expr, msg)
/// Example: v == A || v == B
#define CV_Check(v, test_expr, msg) CV__CHECK_CUSTOM_TEST(_, auto, v, (test_expr), #v, #test_expr, msg)
/// Some complex conditions: CV_Check(src2, src2.empty() || (src2.type() == src1.type() && src2.size() == src1.size()), "src2 should have same size/type as src1")
// TODO define pretty-printers
#ifndef NDEBUG
#define CV_DbgCheck(v, test_expr, msg) CV__CHECK_CUSTOM_TEST(_, auto, v, (test_expr), #v, #test_expr, msg)
#define CV_DbgCheckEQ(v1, v2, msg) CV__CHECK(_, EQ, auto, v1, v2, #v1, #v2, msg)
#define CV_DbgCheckNE(v1, v2, msg) CV__CHECK(_, NE, auto, v1, v2, #v1, #v2, msg)
#define CV_DbgCheckLE(v1, v2, msg) CV__CHECK(_, LE, auto, v1, v2, #v1, #v2, msg)
#define CV_DbgCheckLT(v1, v2, msg) CV__CHECK(_, LT, auto, v1, v2, #v1, #v2, msg)
#define CV_DbgCheckGE(v1, v2, msg) CV__CHECK(_, GE, auto, v1, v2, #v1, #v2, msg)
#define CV_DbgCheckGT(v1, v2, msg) CV__CHECK(_, GT, auto, v1, v2, #v1, #v2, msg)
#else
#define CV_DbgCheck(v, test_expr, msg) do { } while (0)
#define CV_DbgCheckEQ(v1, v2, msg) do { } while (0)
#define CV_DbgCheckNE(v1, v2, msg) do { } while (0)
#define CV_DbgCheckLE(v1, v2, msg) do { } while (0)
#define CV_DbgCheckLT(v1, v2, msg) do { } while (0)
#define CV_DbgCheckGE(v1, v2, msg) do { } while (0)
#define CV_DbgCheckGT(v1, v2, msg) do { } while (0)
#endif
} // namespace
#endif // OPENCV_CORE_CHECK_HPP

View File

@@ -0,0 +1,48 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifdef __OPENCV_BUILD
#error this is a compatibility header which should not be used inside the OpenCV library
#endif
#include "opencv2/core.hpp"

3125
3rdparty/opencv/inc/opencv2/core/core_c.h vendored Normal file

File diff suppressed because it is too large Load Diff

1270
3rdparty/opencv/inc/opencv2/core/cuda.hpp vendored Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,723 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef OPENCV_CORE_CUDAINL_HPP
#define OPENCV_CORE_CUDAINL_HPP
#include "opencv2/core/cuda.hpp"
//! @cond IGNORED
namespace cv { namespace cuda {
//===================================================================================
// GpuMat
//===================================================================================
inline
GpuMat::GpuMat(Allocator* allocator_)
: flags(0), rows(0), cols(0), step(0), data(0), refcount(0), datastart(0), dataend(0), allocator(allocator_)
{}
inline
GpuMat::GpuMat(int rows_, int cols_, int type_, Allocator* allocator_)
: flags(0), rows(0), cols(0), step(0), data(0), refcount(0), datastart(0), dataend(0), allocator(allocator_)
{
if (rows_ > 0 && cols_ > 0)
create(rows_, cols_, type_);
}
inline
GpuMat::GpuMat(Size size_, int type_, Allocator* allocator_)
: flags(0), rows(0), cols(0), step(0), data(0), refcount(0), datastart(0), dataend(0), allocator(allocator_)
{
if (size_.height > 0 && size_.width > 0)
create(size_.height, size_.width, type_);
}
inline
GpuMat::GpuMat(int rows_, int cols_, int type_, Scalar s_, Allocator* allocator_)
: flags(0), rows(0), cols(0), step(0), data(0), refcount(0), datastart(0), dataend(0), allocator(allocator_)
{
if (rows_ > 0 && cols_ > 0)
{
create(rows_, cols_, type_);
setTo(s_);
}
}
inline
GpuMat::GpuMat(Size size_, int type_, Scalar s_, Allocator* allocator_)
: flags(0), rows(0), cols(0), step(0), data(0), refcount(0), datastart(0), dataend(0), allocator(allocator_)
{
if (size_.height > 0 && size_.width > 0)
{
create(size_.height, size_.width, type_);
setTo(s_);
}
}
inline
GpuMat::GpuMat(const GpuMat& m)
: flags(m.flags), rows(m.rows), cols(m.cols), step(m.step), data(m.data), refcount(m.refcount), datastart(m.datastart), dataend(m.dataend), allocator(m.allocator)
{
if (refcount)
CV_XADD(refcount, 1);
}
inline
GpuMat::GpuMat(InputArray arr, Allocator* allocator_) :
flags(0), rows(0), cols(0), step(0), data(0), refcount(0), datastart(0), dataend(0), allocator(allocator_)
{
upload(arr);
}
inline
GpuMat::~GpuMat()
{
release();
}
inline
GpuMat& GpuMat::operator =(const GpuMat& m)
{
if (this != &m)
{
GpuMat temp(m);
swap(temp);
}
return *this;
}
inline
void GpuMat::create(Size size_, int type_)
{
create(size_.height, size_.width, type_);
}
inline
void GpuMat::swap(GpuMat& b)
{
std::swap(flags, b.flags);
std::swap(rows, b.rows);
std::swap(cols, b.cols);
std::swap(step, b.step);
std::swap(data, b.data);
std::swap(datastart, b.datastart);
std::swap(dataend, b.dataend);
std::swap(refcount, b.refcount);
std::swap(allocator, b.allocator);
}
inline
GpuMat GpuMat::clone() const
{
GpuMat m;
copyTo(m);
return m;
}
inline
void GpuMat::copyTo(OutputArray dst, InputArray mask) const
{
copyTo(dst, mask, Stream::Null());
}
inline
GpuMat& GpuMat::setTo(Scalar s)
{
return setTo(s, Stream::Null());
}
inline
GpuMat& GpuMat::setTo(Scalar s, InputArray mask)
{
return setTo(s, mask, Stream::Null());
}
inline
void GpuMat::convertTo(OutputArray dst, int rtype) const
{
convertTo(dst, rtype, Stream::Null());
}
inline
void GpuMat::convertTo(OutputArray dst, int rtype, double alpha, double beta) const
{
convertTo(dst, rtype, alpha, beta, Stream::Null());
}
inline
void GpuMat::convertTo(OutputArray dst, int rtype, double alpha, Stream& stream) const
{
convertTo(dst, rtype, alpha, 0.0, stream);
}
inline
void GpuMat::assignTo(GpuMat& m, int _type) const
{
if (_type < 0)
m = *this;
else
convertTo(m, _type);
}
inline
uchar* GpuMat::ptr(int y)
{
CV_DbgAssert( (unsigned)y < (unsigned)rows );
return data + step * y;
}
inline
const uchar* GpuMat::ptr(int y) const
{
CV_DbgAssert( (unsigned)y < (unsigned)rows );
return data + step * y;
}
template<typename _Tp> inline
_Tp* GpuMat::ptr(int y)
{
return (_Tp*)ptr(y);
}
template<typename _Tp> inline
const _Tp* GpuMat::ptr(int y) const
{
return (const _Tp*)ptr(y);
}
template <class T> inline
GpuMat::operator PtrStepSz<T>() const
{
return PtrStepSz<T>(rows, cols, (T*)data, step);
}
template <class T> inline
GpuMat::operator PtrStep<T>() const
{
return PtrStep<T>((T*)data, step);
}
inline
GpuMat GpuMat::row(int y) const
{
return GpuMat(*this, Range(y, y+1), Range::all());
}
inline
GpuMat GpuMat::col(int x) const
{
return GpuMat(*this, Range::all(), Range(x, x+1));
}
inline
GpuMat GpuMat::rowRange(int startrow, int endrow) const
{
return GpuMat(*this, Range(startrow, endrow), Range::all());
}
inline
GpuMat GpuMat::rowRange(Range r) const
{
return GpuMat(*this, r, Range::all());
}
inline
GpuMat GpuMat::colRange(int startcol, int endcol) const
{
return GpuMat(*this, Range::all(), Range(startcol, endcol));
}
inline
GpuMat GpuMat::colRange(Range r) const
{
return GpuMat(*this, Range::all(), r);
}
inline
GpuMat GpuMat::operator ()(Range rowRange_, Range colRange_) const
{
return GpuMat(*this, rowRange_, colRange_);
}
inline
GpuMat GpuMat::operator ()(Rect roi) const
{
return GpuMat(*this, roi);
}
inline
bool GpuMat::isContinuous() const
{
return (flags & Mat::CONTINUOUS_FLAG) != 0;
}
inline
size_t GpuMat::elemSize() const
{
return CV_ELEM_SIZE(flags);
}
inline
size_t GpuMat::elemSize1() const
{
return CV_ELEM_SIZE1(flags);
}
inline
int GpuMat::type() const
{
return CV_MAT_TYPE(flags);
}
inline
int GpuMat::depth() const
{
return CV_MAT_DEPTH(flags);
}
inline
int GpuMat::channels() const
{
return CV_MAT_CN(flags);
}
inline
size_t GpuMat::step1() const
{
return step / elemSize1();
}
inline
Size GpuMat::size() const
{
return Size(cols, rows);
}
inline
bool GpuMat::empty() const
{
return data == 0;
}
inline
void* GpuMat::cudaPtr() const
{
return data;
}
static inline
GpuMat createContinuous(int rows, int cols, int type)
{
GpuMat m;
createContinuous(rows, cols, type, m);
return m;
}
static inline
void createContinuous(Size size, int type, OutputArray arr)
{
createContinuous(size.height, size.width, type, arr);
}
static inline
GpuMat createContinuous(Size size, int type)
{
GpuMat m;
createContinuous(size, type, m);
return m;
}
static inline
void ensureSizeIsEnough(Size size, int type, OutputArray arr)
{
ensureSizeIsEnough(size.height, size.width, type, arr);
}
static inline
void swap(GpuMat& a, GpuMat& b)
{
a.swap(b);
}
//===================================================================================
// GpuMatND
//===================================================================================
inline
GpuMatND::GpuMatND() :
flags(0), dims(0), data(nullptr), offset(0)
{
}
inline
GpuMatND::GpuMatND(SizeArray _size, int _type) :
flags(0), dims(0), data(nullptr), offset(0)
{
create(std::move(_size), _type);
}
inline
void GpuMatND::swap(GpuMatND& m) noexcept
{
std::swap(*this, m);
}
inline
bool GpuMatND::isContinuous() const
{
return (flags & Mat::CONTINUOUS_FLAG) != 0;
}
inline
bool GpuMatND::isSubmatrix() const
{
return (flags & Mat::SUBMATRIX_FLAG) != 0;
}
inline
size_t GpuMatND::elemSize() const
{
return CV_ELEM_SIZE(flags);
}
inline
size_t GpuMatND::elemSize1() const
{
return CV_ELEM_SIZE1(flags);
}
inline
bool GpuMatND::empty() const
{
return data == nullptr;
}
inline
bool GpuMatND::external() const
{
return !empty() && data_.use_count() == 0;
}
inline
uchar* GpuMatND::getDevicePtr() const
{
return data + offset;
}
inline
size_t GpuMatND::total() const
{
size_t p = 1;
for(auto s : size)
p *= s;
return p;
}
inline
size_t GpuMatND::totalMemSize() const
{
return size[0] * step[0];
}
inline
int GpuMatND::type() const
{
return CV_MAT_TYPE(flags);
}
//===================================================================================
// HostMem
//===================================================================================
inline
HostMem::HostMem(AllocType alloc_type_)
: flags(0), rows(0), cols(0), step(0), data(0), refcount(0), datastart(0), dataend(0), alloc_type(alloc_type_)
{
}
inline
HostMem::HostMem(const HostMem& m)
: flags(m.flags), rows(m.rows), cols(m.cols), step(m.step), data(m.data), refcount(m.refcount), datastart(m.datastart), dataend(m.dataend), alloc_type(m.alloc_type)
{
if( refcount )
CV_XADD(refcount, 1);
}
inline
HostMem::HostMem(int rows_, int cols_, int type_, AllocType alloc_type_)
: flags(0), rows(0), cols(0), step(0), data(0), refcount(0), datastart(0), dataend(0), alloc_type(alloc_type_)
{
if (rows_ > 0 && cols_ > 0)
create(rows_, cols_, type_);
}
inline
HostMem::HostMem(Size size_, int type_, AllocType alloc_type_)
: flags(0), rows(0), cols(0), step(0), data(0), refcount(0), datastart(0), dataend(0), alloc_type(alloc_type_)
{
if (size_.height > 0 && size_.width > 0)
create(size_.height, size_.width, type_);
}
inline
HostMem::HostMem(InputArray arr, AllocType alloc_type_)
: flags(0), rows(0), cols(0), step(0), data(0), refcount(0), datastart(0), dataend(0), alloc_type(alloc_type_)
{
arr.getMat().copyTo(*this);
}
inline
HostMem::~HostMem()
{
release();
}
inline
HostMem& HostMem::operator =(const HostMem& m)
{
if (this != &m)
{
HostMem temp(m);
swap(temp);
}
return *this;
}
inline
void HostMem::swap(HostMem& b)
{
std::swap(flags, b.flags);
std::swap(rows, b.rows);
std::swap(cols, b.cols);
std::swap(step, b.step);
std::swap(data, b.data);
std::swap(datastart, b.datastart);
std::swap(dataend, b.dataend);
std::swap(refcount, b.refcount);
std::swap(alloc_type, b.alloc_type);
}
inline
HostMem HostMem::clone() const
{
HostMem m(size(), type(), alloc_type);
createMatHeader().copyTo(m);
return m;
}
inline
void HostMem::create(Size size_, int type_)
{
create(size_.height, size_.width, type_);
}
inline
Mat HostMem::createMatHeader() const
{
return Mat(size(), type(), data, step);
}
inline
bool HostMem::isContinuous() const
{
return (flags & Mat::CONTINUOUS_FLAG) != 0;
}
inline
size_t HostMem::elemSize() const
{
return CV_ELEM_SIZE(flags);
}
inline
size_t HostMem::elemSize1() const
{
return CV_ELEM_SIZE1(flags);
}
inline
int HostMem::type() const
{
return CV_MAT_TYPE(flags);
}
inline
int HostMem::depth() const
{
return CV_MAT_DEPTH(flags);
}
inline
int HostMem::channels() const
{
return CV_MAT_CN(flags);
}
inline
size_t HostMem::step1() const
{
return step / elemSize1();
}
inline
Size HostMem::size() const
{
return Size(cols, rows);
}
inline
bool HostMem::empty() const
{
return data == 0;
}
static inline
void swap(HostMem& a, HostMem& b)
{
a.swap(b);
}
//===================================================================================
// Stream
//===================================================================================
inline
Stream::Stream(const Ptr<Impl>& impl)
: impl_(impl)
{
}
//===================================================================================
// Event
//===================================================================================
inline
Event::Event(const Ptr<Impl>& impl)
: impl_(impl)
{
}
//===================================================================================
// Initialization & Info
//===================================================================================
inline
bool TargetArchs::has(int major, int minor)
{
return hasPtx(major, minor) || hasBin(major, minor);
}
inline
bool TargetArchs::hasEqualOrGreater(int major, int minor)
{
return hasEqualOrGreaterPtx(major, minor) || hasEqualOrGreaterBin(major, minor);
}
inline
DeviceInfo::DeviceInfo()
{
device_id_ = getDevice();
}
inline
DeviceInfo::DeviceInfo(int device_id)
{
CV_Assert( device_id >= 0 && device_id < getCudaEnabledDeviceCount() );
device_id_ = device_id;
}
inline
int DeviceInfo::deviceID() const
{
return device_id_;
}
inline
size_t DeviceInfo::freeMemory() const
{
size_t _totalMemory = 0, _freeMemory = 0;
queryMemory(_totalMemory, _freeMemory);
return _freeMemory;
}
inline
size_t DeviceInfo::totalMemory() const
{
size_t _totalMemory = 0, _freeMemory = 0;
queryMemory(_totalMemory, _freeMemory);
return _totalMemory;
}
inline
bool DeviceInfo::supports(FeatureSet feature_set) const
{
int version = majorVersion() * 10 + minorVersion();
return version >= feature_set;
}
}} // namespace cv { namespace cuda {
//===================================================================================
// Mat
//===================================================================================
namespace cv {
inline
Mat::Mat(const cuda::GpuMat& m)
: flags(0), dims(0), rows(0), cols(0), data(0), datastart(0), dataend(0), datalimit(0), allocator(0), u(0), size(&rows)
{
m.download(*this);
}
}
//! @endcond
#endif // OPENCV_CORE_CUDAINL_HPP

View File

@@ -0,0 +1,211 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef OPENCV_CUDA_DEVICE_BLOCK_HPP
#define OPENCV_CUDA_DEVICE_BLOCK_HPP
/** @file
* @deprecated Use @ref cudev instead.
*/
//! @cond IGNORED
namespace cv { namespace cuda { namespace device
{
struct Block
{
static __device__ __forceinline__ unsigned int id()
{
return blockIdx.x;
}
static __device__ __forceinline__ unsigned int stride()
{
return blockDim.x * blockDim.y * blockDim.z;
}
static __device__ __forceinline__ void sync()
{
__syncthreads();
}
static __device__ __forceinline__ int flattenedThreadId()
{
return threadIdx.z * blockDim.x * blockDim.y + threadIdx.y * blockDim.x + threadIdx.x;
}
template<typename It, typename T>
static __device__ __forceinline__ void fill(It beg, It end, const T& value)
{
int STRIDE = stride();
It t = beg + flattenedThreadId();
for(; t < end; t += STRIDE)
*t = value;
}
template<typename OutIt, typename T>
static __device__ __forceinline__ void yota(OutIt beg, OutIt end, T value)
{
int STRIDE = stride();
int tid = flattenedThreadId();
value += tid;
for(OutIt t = beg + tid; t < end; t += STRIDE, value += STRIDE)
*t = value;
}
template<typename InIt, typename OutIt>
static __device__ __forceinline__ void copy(InIt beg, InIt end, OutIt out)
{
int STRIDE = stride();
InIt t = beg + flattenedThreadId();
OutIt o = out + (t - beg);
for(; t < end; t += STRIDE, o += STRIDE)
*o = *t;
}
template<typename InIt, typename OutIt, class UnOp>
static __device__ __forceinline__ void transform(InIt beg, InIt end, OutIt out, UnOp op)
{
int STRIDE = stride();
InIt t = beg + flattenedThreadId();
OutIt o = out + (t - beg);
for(; t < end; t += STRIDE, o += STRIDE)
*o = op(*t);
}
template<typename InIt1, typename InIt2, typename OutIt, class BinOp>
static __device__ __forceinline__ void transform(InIt1 beg1, InIt1 end1, InIt2 beg2, OutIt out, BinOp op)
{
int STRIDE = stride();
InIt1 t1 = beg1 + flattenedThreadId();
InIt2 t2 = beg2 + flattenedThreadId();
OutIt o = out + (t1 - beg1);
for(; t1 < end1; t1 += STRIDE, t2 += STRIDE, o += STRIDE)
*o = op(*t1, *t2);
}
template<int CTA_SIZE, typename T, class BinOp>
static __device__ __forceinline__ void reduce(volatile T* buffer, BinOp op)
{
int tid = flattenedThreadId();
T val = buffer[tid];
if (CTA_SIZE >= 1024) { if (tid < 512) buffer[tid] = val = op(val, buffer[tid + 512]); __syncthreads(); }
if (CTA_SIZE >= 512) { if (tid < 256) buffer[tid] = val = op(val, buffer[tid + 256]); __syncthreads(); }
if (CTA_SIZE >= 256) { if (tid < 128) buffer[tid] = val = op(val, buffer[tid + 128]); __syncthreads(); }
if (CTA_SIZE >= 128) { if (tid < 64) buffer[tid] = val = op(val, buffer[tid + 64]); __syncthreads(); }
if (tid < 32)
{
if (CTA_SIZE >= 64) { buffer[tid] = val = op(val, buffer[tid + 32]); }
if (CTA_SIZE >= 32) { buffer[tid] = val = op(val, buffer[tid + 16]); }
if (CTA_SIZE >= 16) { buffer[tid] = val = op(val, buffer[tid + 8]); }
if (CTA_SIZE >= 8) { buffer[tid] = val = op(val, buffer[tid + 4]); }
if (CTA_SIZE >= 4) { buffer[tid] = val = op(val, buffer[tid + 2]); }
if (CTA_SIZE >= 2) { buffer[tid] = val = op(val, buffer[tid + 1]); }
}
}
template<int CTA_SIZE, typename T, class BinOp>
static __device__ __forceinline__ T reduce(volatile T* buffer, T init, BinOp op)
{
int tid = flattenedThreadId();
T val = buffer[tid] = init;
__syncthreads();
if (CTA_SIZE >= 1024) { if (tid < 512) buffer[tid] = val = op(val, buffer[tid + 512]); __syncthreads(); }
if (CTA_SIZE >= 512) { if (tid < 256) buffer[tid] = val = op(val, buffer[tid + 256]); __syncthreads(); }
if (CTA_SIZE >= 256) { if (tid < 128) buffer[tid] = val = op(val, buffer[tid + 128]); __syncthreads(); }
if (CTA_SIZE >= 128) { if (tid < 64) buffer[tid] = val = op(val, buffer[tid + 64]); __syncthreads(); }
if (tid < 32)
{
if (CTA_SIZE >= 64) { buffer[tid] = val = op(val, buffer[tid + 32]); }
if (CTA_SIZE >= 32) { buffer[tid] = val = op(val, buffer[tid + 16]); }
if (CTA_SIZE >= 16) { buffer[tid] = val = op(val, buffer[tid + 8]); }
if (CTA_SIZE >= 8) { buffer[tid] = val = op(val, buffer[tid + 4]); }
if (CTA_SIZE >= 4) { buffer[tid] = val = op(val, buffer[tid + 2]); }
if (CTA_SIZE >= 2) { buffer[tid] = val = op(val, buffer[tid + 1]); }
}
__syncthreads();
return buffer[0];
}
template <typename T, class BinOp>
static __device__ __forceinline__ void reduce_n(T* data, unsigned int n, BinOp op)
{
int ftid = flattenedThreadId();
int sft = stride();
if (sft < n)
{
for (unsigned int i = sft + ftid; i < n; i += sft)
data[ftid] = op(data[ftid], data[i]);
__syncthreads();
n = sft;
}
while (n > 1)
{
unsigned int half = n/2;
if (ftid < half)
data[ftid] = op(data[ftid], data[n - ftid - 1]);
__syncthreads();
n = n - half;
}
}
};
}}}
//! @endcond
#endif /* OPENCV_CUDA_DEVICE_BLOCK_HPP */

View File

@@ -0,0 +1,722 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef OPENCV_CUDA_BORDER_INTERPOLATE_HPP
#define OPENCV_CUDA_BORDER_INTERPOLATE_HPP
#include "saturate_cast.hpp"
#include "vec_traits.hpp"
#include "vec_math.hpp"
/** @file
* @deprecated Use @ref cudev instead.
*/
//! @cond IGNORED
namespace cv { namespace cuda { namespace device
{
//////////////////////////////////////////////////////////////
// BrdConstant
template <typename D> struct BrdRowConstant
{
typedef D result_type;
explicit __host__ __device__ __forceinline__ BrdRowConstant(int width_, const D& val_ = VecTraits<D>::all(0)) : width(width_), val(val_) {}
template <typename T> __device__ __forceinline__ D at_low(int x, const T* data) const
{
return x >= 0 ? saturate_cast<D>(data[x]) : val;
}
template <typename T> __device__ __forceinline__ D at_high(int x, const T* data) const
{
return x < width ? saturate_cast<D>(data[x]) : val;
}
template <typename T> __device__ __forceinline__ D at(int x, const T* data) const
{
return (x >= 0 && x < width) ? saturate_cast<D>(data[x]) : val;
}
int width;
D val;
};
template <typename D> struct BrdColConstant
{
typedef D result_type;
explicit __host__ __device__ __forceinline__ BrdColConstant(int height_, const D& val_ = VecTraits<D>::all(0)) : height(height_), val(val_) {}
template <typename T> __device__ __forceinline__ D at_low(int y, const T* data, size_t step) const
{
return y >= 0 ? saturate_cast<D>(*(const T*)((const char*)data + y * step)) : val;
}
template <typename T> __device__ __forceinline__ D at_high(int y, const T* data, size_t step) const
{
return y < height ? saturate_cast<D>(*(const T*)((const char*)data + y * step)) : val;
}
template <typename T> __device__ __forceinline__ D at(int y, const T* data, size_t step) const
{
return (y >= 0 && y < height) ? saturate_cast<D>(*(const T*)((const char*)data + y * step)) : val;
}
int height;
D val;
};
template <typename D> struct BrdConstant
{
typedef D result_type;
__host__ __device__ __forceinline__ BrdConstant(int height_, int width_, const D& val_ = VecTraits<D>::all(0)) : height(height_), width(width_), val(val_)
{
}
template <typename T> __device__ __forceinline__ D at(int y, int x, const T* data, size_t step) const
{
return (x >= 0 && x < width && y >= 0 && y < height) ? saturate_cast<D>(((const T*)((const uchar*)data + y * step))[x]) : val;
}
template <typename Ptr2D> __device__ __forceinline__ D at(typename Ptr2D::index_type y, typename Ptr2D::index_type x, const Ptr2D& src) const
{
return (x >= 0 && x < width && y >= 0 && y < height) ? saturate_cast<D>(src(y, x)) : val;
}
int height;
int width;
D val;
};
//////////////////////////////////////////////////////////////
// BrdReplicate
template <typename D> struct BrdRowReplicate
{
typedef D result_type;
explicit __host__ __device__ __forceinline__ BrdRowReplicate(int width) : last_col(width - 1) {}
template <typename U> __host__ __device__ __forceinline__ BrdRowReplicate(int width, U) : last_col(width - 1) {}
__device__ __forceinline__ int idx_col_low(int x) const
{
return ::max(x, 0);
}
__device__ __forceinline__ int idx_col_high(int x) const
{
return ::min(x, last_col);
}
__device__ __forceinline__ int idx_col(int x) const
{
return idx_col_low(idx_col_high(x));
}
template <typename T> __device__ __forceinline__ D at_low(int x, const T* data) const
{
return saturate_cast<D>(data[idx_col_low(x)]);
}
template <typename T> __device__ __forceinline__ D at_high(int x, const T* data) const
{
return saturate_cast<D>(data[idx_col_high(x)]);
}
template <typename T> __device__ __forceinline__ D at(int x, const T* data) const
{
return saturate_cast<D>(data[idx_col(x)]);
}
int last_col;
};
template <typename D> struct BrdColReplicate
{
typedef D result_type;
explicit __host__ __device__ __forceinline__ BrdColReplicate(int height) : last_row(height - 1) {}
template <typename U> __host__ __device__ __forceinline__ BrdColReplicate(int height, U) : last_row(height - 1) {}
__device__ __forceinline__ int idx_row_low(int y) const
{
return ::max(y, 0);
}
__device__ __forceinline__ int idx_row_high(int y) const
{
return ::min(y, last_row);
}
__device__ __forceinline__ int idx_row(int y) const
{
return idx_row_low(idx_row_high(y));
}
template <typename T> __device__ __forceinline__ D at_low(int y, const T* data, size_t step) const
{
return saturate_cast<D>(*(const T*)((const char*)data + idx_row_low(y) * step));
}
template <typename T> __device__ __forceinline__ D at_high(int y, const T* data, size_t step) const
{
return saturate_cast<D>(*(const T*)((const char*)data + idx_row_high(y) * step));
}
template <typename T> __device__ __forceinline__ D at(int y, const T* data, size_t step) const
{
return saturate_cast<D>(*(const T*)((const char*)data + idx_row(y) * step));
}
int last_row;
};
template <typename D> struct BrdReplicate
{
typedef D result_type;
__host__ __device__ __forceinline__ BrdReplicate(int height, int width) : last_row(height - 1), last_col(width - 1) {}
template <typename U> __host__ __device__ __forceinline__ BrdReplicate(int height, int width, U) : last_row(height - 1), last_col(width - 1) {}
__device__ __forceinline__ int idx_row_low(int y) const
{
return ::max(y, 0);
}
__device__ __forceinline__ int idx_row_high(int y) const
{
return ::min(y, last_row);
}
__device__ __forceinline__ int idx_row(int y) const
{
return idx_row_low(idx_row_high(y));
}
__device__ __forceinline__ int idx_col_low(int x) const
{
return ::max(x, 0);
}
__device__ __forceinline__ int idx_col_high(int x) const
{
return ::min(x, last_col);
}
__device__ __forceinline__ int idx_col(int x) const
{
return idx_col_low(idx_col_high(x));
}
template <typename T> __device__ __forceinline__ D at(int y, int x, const T* data, size_t step) const
{
return saturate_cast<D>(((const T*)((const char*)data + idx_row(y) * step))[idx_col(x)]);
}
template <typename Ptr2D> __device__ __forceinline__ D at(typename Ptr2D::index_type y, typename Ptr2D::index_type x, const Ptr2D& src) const
{
return saturate_cast<D>(src(idx_row(y), idx_col(x)));
}
int last_row;
int last_col;
};
//////////////////////////////////////////////////////////////
// BrdReflect101
template <typename D> struct BrdRowReflect101
{
typedef D result_type;
explicit __host__ __device__ __forceinline__ BrdRowReflect101(int width) : last_col(width - 1) {}
template <typename U> __host__ __device__ __forceinline__ BrdRowReflect101(int width, U) : last_col(width - 1) {}
__device__ __forceinline__ int idx_col_low(int x) const
{
return ::abs(x) % (last_col + 1);
}
__device__ __forceinline__ int idx_col_high(int x) const
{
return ::abs(last_col - ::abs(last_col - x)) % (last_col + 1);
}
__device__ __forceinline__ int idx_col(int x) const
{
return idx_col_low(idx_col_high(x));
}
template <typename T> __device__ __forceinline__ D at_low(int x, const T* data) const
{
return saturate_cast<D>(data[idx_col_low(x)]);
}
template <typename T> __device__ __forceinline__ D at_high(int x, const T* data) const
{
return saturate_cast<D>(data[idx_col_high(x)]);
}
template <typename T> __device__ __forceinline__ D at(int x, const T* data) const
{
return saturate_cast<D>(data[idx_col(x)]);
}
int last_col;
};
template <typename D> struct BrdColReflect101
{
typedef D result_type;
explicit __host__ __device__ __forceinline__ BrdColReflect101(int height) : last_row(height - 1) {}
template <typename U> __host__ __device__ __forceinline__ BrdColReflect101(int height, U) : last_row(height - 1) {}
__device__ __forceinline__ int idx_row_low(int y) const
{
return ::abs(y) % (last_row + 1);
}
__device__ __forceinline__ int idx_row_high(int y) const
{
return ::abs(last_row - ::abs(last_row - y)) % (last_row + 1);
}
__device__ __forceinline__ int idx_row(int y) const
{
return idx_row_low(idx_row_high(y));
}
template <typename T> __device__ __forceinline__ D at_low(int y, const T* data, size_t step) const
{
return saturate_cast<D>(*(const D*)((const char*)data + idx_row_low(y) * step));
}
template <typename T> __device__ __forceinline__ D at_high(int y, const T* data, size_t step) const
{
return saturate_cast<D>(*(const D*)((const char*)data + idx_row_high(y) * step));
}
template <typename T> __device__ __forceinline__ D at(int y, const T* data, size_t step) const
{
return saturate_cast<D>(*(const D*)((const char*)data + idx_row(y) * step));
}
int last_row;
};
template <typename D> struct BrdReflect101
{
typedef D result_type;
__host__ __device__ __forceinline__ BrdReflect101(int height, int width) : last_row(height - 1), last_col(width - 1) {}
template <typename U> __host__ __device__ __forceinline__ BrdReflect101(int height, int width, U) : last_row(height - 1), last_col(width - 1) {}
__device__ __forceinline__ int idx_row_low(int y) const
{
return ::abs(y) % (last_row + 1);
}
__device__ __forceinline__ int idx_row_high(int y) const
{
return ::abs(last_row - ::abs(last_row - y)) % (last_row + 1);
}
__device__ __forceinline__ int idx_row(int y) const
{
return idx_row_low(idx_row_high(y));
}
__device__ __forceinline__ int idx_col_low(int x) const
{
return ::abs(x) % (last_col + 1);
}
__device__ __forceinline__ int idx_col_high(int x) const
{
return ::abs(last_col - ::abs(last_col - x)) % (last_col + 1);
}
__device__ __forceinline__ int idx_col(int x) const
{
return idx_col_low(idx_col_high(x));
}
template <typename T> __device__ __forceinline__ D at(int y, int x, const T* data, size_t step) const
{
return saturate_cast<D>(((const T*)((const char*)data + idx_row(y) * step))[idx_col(x)]);
}
template <typename Ptr2D> __device__ __forceinline__ D at(typename Ptr2D::index_type y, typename Ptr2D::index_type x, const Ptr2D& src) const
{
return saturate_cast<D>(src(idx_row(y), idx_col(x)));
}
int last_row;
int last_col;
};
//////////////////////////////////////////////////////////////
// BrdReflect
template <typename D> struct BrdRowReflect
{
typedef D result_type;
explicit __host__ __device__ __forceinline__ BrdRowReflect(int width) : last_col(width - 1) {}
template <typename U> __host__ __device__ __forceinline__ BrdRowReflect(int width, U) : last_col(width - 1) {}
__device__ __forceinline__ int idx_col_low(int x) const
{
return (::abs(x) - (x < 0)) % (last_col + 1);
}
__device__ __forceinline__ int idx_col_high(int x) const
{
return ::abs(last_col - ::abs(last_col - x) + (x > last_col)) % (last_col + 1);
}
__device__ __forceinline__ int idx_col(int x) const
{
return idx_col_high(::abs(x) - (x < 0));
}
template <typename T> __device__ __forceinline__ D at_low(int x, const T* data) const
{
return saturate_cast<D>(data[idx_col_low(x)]);
}
template <typename T> __device__ __forceinline__ D at_high(int x, const T* data) const
{
return saturate_cast<D>(data[idx_col_high(x)]);
}
template <typename T> __device__ __forceinline__ D at(int x, const T* data) const
{
return saturate_cast<D>(data[idx_col(x)]);
}
int last_col;
};
template <typename D> struct BrdColReflect
{
typedef D result_type;
explicit __host__ __device__ __forceinline__ BrdColReflect(int height) : last_row(height - 1) {}
template <typename U> __host__ __device__ __forceinline__ BrdColReflect(int height, U) : last_row(height - 1) {}
__device__ __forceinline__ int idx_row_low(int y) const
{
return (::abs(y) - (y < 0)) % (last_row + 1);
}
__device__ __forceinline__ int idx_row_high(int y) const
{
return ::abs(last_row - ::abs(last_row - y) + (y > last_row)) % (last_row + 1);
}
__device__ __forceinline__ int idx_row(int y) const
{
return idx_row_high(::abs(y) - (y < 0));
}
template <typename T> __device__ __forceinline__ D at_low(int y, const T* data, size_t step) const
{
return saturate_cast<D>(*(const D*)((const char*)data + idx_row_low(y) * step));
}
template <typename T> __device__ __forceinline__ D at_high(int y, const T* data, size_t step) const
{
return saturate_cast<D>(*(const D*)((const char*)data + idx_row_high(y) * step));
}
template <typename T> __device__ __forceinline__ D at(int y, const T* data, size_t step) const
{
return saturate_cast<D>(*(const D*)((const char*)data + idx_row(y) * step));
}
int last_row;
};
template <typename D> struct BrdReflect
{
typedef D result_type;
__host__ __device__ __forceinline__ BrdReflect(int height, int width) : last_row(height - 1), last_col(width - 1) {}
template <typename U> __host__ __device__ __forceinline__ BrdReflect(int height, int width, U) : last_row(height - 1), last_col(width - 1) {}
__device__ __forceinline__ int idx_row_low(int y) const
{
return (::abs(y) - (y < 0)) % (last_row + 1);
}
__device__ __forceinline__ int idx_row_high(int y) const
{
return /*::abs*/(last_row - ::abs(last_row - y) + (y > last_row)) /*% (last_row + 1)*/;
}
__device__ __forceinline__ int idx_row(int y) const
{
return idx_row_low(idx_row_high(y));
}
__device__ __forceinline__ int idx_col_low(int x) const
{
return (::abs(x) - (x < 0)) % (last_col + 1);
}
__device__ __forceinline__ int idx_col_high(int x) const
{
return (last_col - ::abs(last_col - x) + (x > last_col));
}
__device__ __forceinline__ int idx_col(int x) const
{
return idx_col_low(idx_col_high(x));
}
template <typename T> __device__ __forceinline__ D at(int y, int x, const T* data, size_t step) const
{
return saturate_cast<D>(((const T*)((const char*)data + idx_row(y) * step))[idx_col(x)]);
}
template <typename Ptr2D> __device__ __forceinline__ D at(typename Ptr2D::index_type y, typename Ptr2D::index_type x, const Ptr2D& src) const
{
return saturate_cast<D>(src(idx_row(y), idx_col(x)));
}
int last_row;
int last_col;
};
//////////////////////////////////////////////////////////////
// BrdWrap
template <typename D> struct BrdRowWrap
{
typedef D result_type;
explicit __host__ __device__ __forceinline__ BrdRowWrap(int width_) : width(width_) {}
template <typename U> __host__ __device__ __forceinline__ BrdRowWrap(int width_, U) : width(width_) {}
__device__ __forceinline__ int idx_col_low(int x) const
{
return (x >= 0) * x + (x < 0) * (x - ((x - width + 1) / width) * width);
}
__device__ __forceinline__ int idx_col_high(int x) const
{
return (x < width) * x + (x >= width) * (x % width);
}
__device__ __forceinline__ int idx_col(int x) const
{
return idx_col_high(idx_col_low(x));
}
template <typename T> __device__ __forceinline__ D at_low(int x, const T* data) const
{
return saturate_cast<D>(data[idx_col_low(x)]);
}
template <typename T> __device__ __forceinline__ D at_high(int x, const T* data) const
{
return saturate_cast<D>(data[idx_col_high(x)]);
}
template <typename T> __device__ __forceinline__ D at(int x, const T* data) const
{
return saturate_cast<D>(data[idx_col(x)]);
}
int width;
};
template <typename D> struct BrdColWrap
{
typedef D result_type;
explicit __host__ __device__ __forceinline__ BrdColWrap(int height_) : height(height_) {}
template <typename U> __host__ __device__ __forceinline__ BrdColWrap(int height_, U) : height(height_) {}
__device__ __forceinline__ int idx_row_low(int y) const
{
return (y >= 0) * y + (y < 0) * (y - ((y - height + 1) / height) * height);
}
__device__ __forceinline__ int idx_row_high(int y) const
{
return (y < height) * y + (y >= height) * (y % height);
}
__device__ __forceinline__ int idx_row(int y) const
{
return idx_row_high(idx_row_low(y));
}
template <typename T> __device__ __forceinline__ D at_low(int y, const T* data, size_t step) const
{
return saturate_cast<D>(*(const D*)((const char*)data + idx_row_low(y) * step));
}
template <typename T> __device__ __forceinline__ D at_high(int y, const T* data, size_t step) const
{
return saturate_cast<D>(*(const D*)((const char*)data + idx_row_high(y) * step));
}
template <typename T> __device__ __forceinline__ D at(int y, const T* data, size_t step) const
{
return saturate_cast<D>(*(const D*)((const char*)data + idx_row(y) * step));
}
int height;
};
template <typename D> struct BrdWrap
{
typedef D result_type;
__host__ __device__ __forceinline__ BrdWrap(int height_, int width_) :
height(height_), width(width_)
{
}
template <typename U>
__host__ __device__ __forceinline__ BrdWrap(int height_, int width_, U) :
height(height_), width(width_)
{
}
__device__ __forceinline__ int idx_row_low(int y) const
{
return (y >= 0) ? y : (y - ((y - height + 1) / height) * height);
}
__device__ __forceinline__ int idx_row_high(int y) const
{
return (y < height) ? y : (y % height);
}
__device__ __forceinline__ int idx_row(int y) const
{
return idx_row_high(idx_row_low(y));
}
__device__ __forceinline__ int idx_col_low(int x) const
{
return (x >= 0) ? x : (x - ((x - width + 1) / width) * width);
}
__device__ __forceinline__ int idx_col_high(int x) const
{
return (x < width) ? x : (x % width);
}
__device__ __forceinline__ int idx_col(int x) const
{
return idx_col_high(idx_col_low(x));
}
template <typename T> __device__ __forceinline__ D at(int y, int x, const T* data, size_t step) const
{
return saturate_cast<D>(((const T*)((const char*)data + idx_row(y) * step))[idx_col(x)]);
}
template <typename Ptr2D> __device__ __forceinline__ D at(typename Ptr2D::index_type y, typename Ptr2D::index_type x, const Ptr2D& src) const
{
return saturate_cast<D>(src(idx_row(y), idx_col(x)));
}
int height;
int width;
};
//////////////////////////////////////////////////////////////
// BorderReader
template <typename Ptr2D, typename B> struct BorderReader
{
typedef typename B::result_type elem_type;
typedef typename Ptr2D::index_type index_type;
__host__ __device__ __forceinline__ BorderReader(const Ptr2D& ptr_, const B& b_) : ptr(ptr_), b(b_) {}
__device__ __forceinline__ elem_type operator ()(index_type y, index_type x) const
{
return b.at(y, x, ptr);
}
Ptr2D ptr;
B b;
};
// under win32 there is some bug with templated types that passed as kernel parameters
// with this specialization all works fine
template <typename Ptr2D, typename D> struct BorderReader< Ptr2D, BrdConstant<D> >
{
typedef typename BrdConstant<D>::result_type elem_type;
typedef typename Ptr2D::index_type index_type;
__host__ __device__ __forceinline__ BorderReader(const Ptr2D& src_, const BrdConstant<D>& b) :
src(src_), height(b.height), width(b.width), val(b.val)
{
}
__device__ __forceinline__ D operator ()(index_type y, index_type x) const
{
return (x >= 0 && x < width && y >= 0 && y < height) ? saturate_cast<D>(src(y, x)) : val;
}
Ptr2D src;
int height;
int width;
D val;
};
}}} // namespace cv { namespace cuda { namespace cudev
//! @endcond
#endif // OPENCV_CUDA_BORDER_INTERPOLATE_HPP

View File

@@ -0,0 +1,309 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef OPENCV_CUDA_COLOR_HPP
#define OPENCV_CUDA_COLOR_HPP
#include "detail/color_detail.hpp"
/** @file
* @deprecated Use @ref cudev instead.
*/
//! @cond IGNORED
namespace cv { namespace cuda { namespace device
{
// All OPENCV_CUDA_IMPLEMENT_*_TRAITS(ColorSpace1_to_ColorSpace2, ...) macros implements
// template <typename T> class ColorSpace1_to_ColorSpace2_traits
// {
// typedef ... functor_type;
// static __host__ __device__ functor_type create_functor();
// };
OPENCV_CUDA_IMPLEMENT_RGB2RGB_TRAITS(bgr_to_rgb, 3, 3, 2)
OPENCV_CUDA_IMPLEMENT_RGB2RGB_TRAITS(bgr_to_bgra, 3, 4, 0)
OPENCV_CUDA_IMPLEMENT_RGB2RGB_TRAITS(bgr_to_rgba, 3, 4, 2)
OPENCV_CUDA_IMPLEMENT_RGB2RGB_TRAITS(bgra_to_bgr, 4, 3, 0)
OPENCV_CUDA_IMPLEMENT_RGB2RGB_TRAITS(bgra_to_rgb, 4, 3, 2)
OPENCV_CUDA_IMPLEMENT_RGB2RGB_TRAITS(bgra_to_rgba, 4, 4, 2)
#undef OPENCV_CUDA_IMPLEMENT_RGB2RGB_TRAITS
OPENCV_CUDA_IMPLEMENT_RGB2RGB5x5_TRAITS(bgr_to_bgr555, 3, 0, 5)
OPENCV_CUDA_IMPLEMENT_RGB2RGB5x5_TRAITS(bgr_to_bgr565, 3, 0, 6)
OPENCV_CUDA_IMPLEMENT_RGB2RGB5x5_TRAITS(rgb_to_bgr555, 3, 2, 5)
OPENCV_CUDA_IMPLEMENT_RGB2RGB5x5_TRAITS(rgb_to_bgr565, 3, 2, 6)
OPENCV_CUDA_IMPLEMENT_RGB2RGB5x5_TRAITS(bgra_to_bgr555, 4, 0, 5)
OPENCV_CUDA_IMPLEMENT_RGB2RGB5x5_TRAITS(bgra_to_bgr565, 4, 0, 6)
OPENCV_CUDA_IMPLEMENT_RGB2RGB5x5_TRAITS(rgba_to_bgr555, 4, 2, 5)
OPENCV_CUDA_IMPLEMENT_RGB2RGB5x5_TRAITS(rgba_to_bgr565, 4, 2, 6)
#undef OPENCV_CUDA_IMPLEMENT_RGB2RGB5x5_TRAITS
OPENCV_CUDA_IMPLEMENT_RGB5x52RGB_TRAITS(bgr555_to_rgb, 3, 2, 5)
OPENCV_CUDA_IMPLEMENT_RGB5x52RGB_TRAITS(bgr565_to_rgb, 3, 2, 6)
OPENCV_CUDA_IMPLEMENT_RGB5x52RGB_TRAITS(bgr555_to_bgr, 3, 0, 5)
OPENCV_CUDA_IMPLEMENT_RGB5x52RGB_TRAITS(bgr565_to_bgr, 3, 0, 6)
OPENCV_CUDA_IMPLEMENT_RGB5x52RGB_TRAITS(bgr555_to_rgba, 4, 2, 5)
OPENCV_CUDA_IMPLEMENT_RGB5x52RGB_TRAITS(bgr565_to_rgba, 4, 2, 6)
OPENCV_CUDA_IMPLEMENT_RGB5x52RGB_TRAITS(bgr555_to_bgra, 4, 0, 5)
OPENCV_CUDA_IMPLEMENT_RGB5x52RGB_TRAITS(bgr565_to_bgra, 4, 0, 6)
#undef OPENCV_CUDA_IMPLEMENT_RGB5x52RGB_TRAITS
OPENCV_CUDA_IMPLEMENT_GRAY2RGB_TRAITS(gray_to_bgr, 3)
OPENCV_CUDA_IMPLEMENT_GRAY2RGB_TRAITS(gray_to_bgra, 4)
#undef OPENCV_CUDA_IMPLEMENT_GRAY2RGB_TRAITS
OPENCV_CUDA_IMPLEMENT_GRAY2RGB5x5_TRAITS(gray_to_bgr555, 5)
OPENCV_CUDA_IMPLEMENT_GRAY2RGB5x5_TRAITS(gray_to_bgr565, 6)
#undef OPENCV_CUDA_IMPLEMENT_GRAY2RGB5x5_TRAITS
OPENCV_CUDA_IMPLEMENT_RGB5x52GRAY_TRAITS(bgr555_to_gray, 5)
OPENCV_CUDA_IMPLEMENT_RGB5x52GRAY_TRAITS(bgr565_to_gray, 6)
#undef OPENCV_CUDA_IMPLEMENT_RGB5x52GRAY_TRAITS
OPENCV_CUDA_IMPLEMENT_RGB2GRAY_TRAITS(rgb_to_gray, 3, 2)
OPENCV_CUDA_IMPLEMENT_RGB2GRAY_TRAITS(bgr_to_gray, 3, 0)
OPENCV_CUDA_IMPLEMENT_RGB2GRAY_TRAITS(rgba_to_gray, 4, 2)
OPENCV_CUDA_IMPLEMENT_RGB2GRAY_TRAITS(bgra_to_gray, 4, 0)
#undef OPENCV_CUDA_IMPLEMENT_RGB2GRAY_TRAITS
OPENCV_CUDA_IMPLEMENT_RGB2YUV_TRAITS(rgb_to_yuv, 3, 3, 2)
OPENCV_CUDA_IMPLEMENT_RGB2YUV_TRAITS(rgba_to_yuv, 4, 3, 2)
OPENCV_CUDA_IMPLEMENT_RGB2YUV_TRAITS(rgb_to_yuv4, 3, 4, 2)
OPENCV_CUDA_IMPLEMENT_RGB2YUV_TRAITS(rgba_to_yuv4, 4, 4, 2)
OPENCV_CUDA_IMPLEMENT_RGB2YUV_TRAITS(bgr_to_yuv, 3, 3, 0)
OPENCV_CUDA_IMPLEMENT_RGB2YUV_TRAITS(bgra_to_yuv, 4, 3, 0)
OPENCV_CUDA_IMPLEMENT_RGB2YUV_TRAITS(bgr_to_yuv4, 3, 4, 0)
OPENCV_CUDA_IMPLEMENT_RGB2YUV_TRAITS(bgra_to_yuv4, 4, 4, 0)
#undef OPENCV_CUDA_IMPLEMENT_RGB2YUV_TRAITS
OPENCV_CUDA_IMPLEMENT_YUV2RGB_TRAITS(yuv_to_rgb, 3, 3, 2)
OPENCV_CUDA_IMPLEMENT_YUV2RGB_TRAITS(yuv_to_rgba, 3, 4, 2)
OPENCV_CUDA_IMPLEMENT_YUV2RGB_TRAITS(yuv4_to_rgb, 4, 3, 2)
OPENCV_CUDA_IMPLEMENT_YUV2RGB_TRAITS(yuv4_to_rgba, 4, 4, 2)
OPENCV_CUDA_IMPLEMENT_YUV2RGB_TRAITS(yuv_to_bgr, 3, 3, 0)
OPENCV_CUDA_IMPLEMENT_YUV2RGB_TRAITS(yuv_to_bgra, 3, 4, 0)
OPENCV_CUDA_IMPLEMENT_YUV2RGB_TRAITS(yuv4_to_bgr, 4, 3, 0)
OPENCV_CUDA_IMPLEMENT_YUV2RGB_TRAITS(yuv4_to_bgra, 4, 4, 0)
#undef OPENCV_CUDA_IMPLEMENT_YUV2RGB_TRAITS
OPENCV_CUDA_IMPLEMENT_RGB2YCrCb_TRAITS(rgb_to_YCrCb, 3, 3, 2)
OPENCV_CUDA_IMPLEMENT_RGB2YCrCb_TRAITS(rgba_to_YCrCb, 4, 3, 2)
OPENCV_CUDA_IMPLEMENT_RGB2YCrCb_TRAITS(rgb_to_YCrCb4, 3, 4, 2)
OPENCV_CUDA_IMPLEMENT_RGB2YCrCb_TRAITS(rgba_to_YCrCb4, 4, 4, 2)
OPENCV_CUDA_IMPLEMENT_RGB2YCrCb_TRAITS(bgr_to_YCrCb, 3, 3, 0)
OPENCV_CUDA_IMPLEMENT_RGB2YCrCb_TRAITS(bgra_to_YCrCb, 4, 3, 0)
OPENCV_CUDA_IMPLEMENT_RGB2YCrCb_TRAITS(bgr_to_YCrCb4, 3, 4, 0)
OPENCV_CUDA_IMPLEMENT_RGB2YCrCb_TRAITS(bgra_to_YCrCb4, 4, 4, 0)
#undef OPENCV_CUDA_IMPLEMENT_RGB2YCrCb_TRAITS
OPENCV_CUDA_IMPLEMENT_YCrCb2RGB_TRAITS(YCrCb_to_rgb, 3, 3, 2)
OPENCV_CUDA_IMPLEMENT_YCrCb2RGB_TRAITS(YCrCb_to_rgba, 3, 4, 2)
OPENCV_CUDA_IMPLEMENT_YCrCb2RGB_TRAITS(YCrCb4_to_rgb, 4, 3, 2)
OPENCV_CUDA_IMPLEMENT_YCrCb2RGB_TRAITS(YCrCb4_to_rgba, 4, 4, 2)
OPENCV_CUDA_IMPLEMENT_YCrCb2RGB_TRAITS(YCrCb_to_bgr, 3, 3, 0)
OPENCV_CUDA_IMPLEMENT_YCrCb2RGB_TRAITS(YCrCb_to_bgra, 3, 4, 0)
OPENCV_CUDA_IMPLEMENT_YCrCb2RGB_TRAITS(YCrCb4_to_bgr, 4, 3, 0)
OPENCV_CUDA_IMPLEMENT_YCrCb2RGB_TRAITS(YCrCb4_to_bgra, 4, 4, 0)
#undef OPENCV_CUDA_IMPLEMENT_YCrCb2RGB_TRAITS
OPENCV_CUDA_IMPLEMENT_RGB2XYZ_TRAITS(rgb_to_xyz, 3, 3, 2)
OPENCV_CUDA_IMPLEMENT_RGB2XYZ_TRAITS(rgba_to_xyz, 4, 3, 2)
OPENCV_CUDA_IMPLEMENT_RGB2XYZ_TRAITS(rgb_to_xyz4, 3, 4, 2)
OPENCV_CUDA_IMPLEMENT_RGB2XYZ_TRAITS(rgba_to_xyz4, 4, 4, 2)
OPENCV_CUDA_IMPLEMENT_RGB2XYZ_TRAITS(bgr_to_xyz, 3, 3, 0)
OPENCV_CUDA_IMPLEMENT_RGB2XYZ_TRAITS(bgra_to_xyz, 4, 3, 0)
OPENCV_CUDA_IMPLEMENT_RGB2XYZ_TRAITS(bgr_to_xyz4, 3, 4, 0)
OPENCV_CUDA_IMPLEMENT_RGB2XYZ_TRAITS(bgra_to_xyz4, 4, 4, 0)
#undef OPENCV_CUDA_IMPLEMENT_RGB2XYZ_TRAITS
OPENCV_CUDA_IMPLEMENT_XYZ2RGB_TRAITS(xyz_to_rgb, 3, 3, 2)
OPENCV_CUDA_IMPLEMENT_XYZ2RGB_TRAITS(xyz4_to_rgb, 4, 3, 2)
OPENCV_CUDA_IMPLEMENT_XYZ2RGB_TRAITS(xyz_to_rgba, 3, 4, 2)
OPENCV_CUDA_IMPLEMENT_XYZ2RGB_TRAITS(xyz4_to_rgba, 4, 4, 2)
OPENCV_CUDA_IMPLEMENT_XYZ2RGB_TRAITS(xyz_to_bgr, 3, 3, 0)
OPENCV_CUDA_IMPLEMENT_XYZ2RGB_TRAITS(xyz4_to_bgr, 4, 3, 0)
OPENCV_CUDA_IMPLEMENT_XYZ2RGB_TRAITS(xyz_to_bgra, 3, 4, 0)
OPENCV_CUDA_IMPLEMENT_XYZ2RGB_TRAITS(xyz4_to_bgra, 4, 4, 0)
#undef OPENCV_CUDA_IMPLEMENT_XYZ2RGB_TRAITS
OPENCV_CUDA_IMPLEMENT_RGB2HSV_TRAITS(rgb_to_hsv, 3, 3, 2)
OPENCV_CUDA_IMPLEMENT_RGB2HSV_TRAITS(rgba_to_hsv, 4, 3, 2)
OPENCV_CUDA_IMPLEMENT_RGB2HSV_TRAITS(rgb_to_hsv4, 3, 4, 2)
OPENCV_CUDA_IMPLEMENT_RGB2HSV_TRAITS(rgba_to_hsv4, 4, 4, 2)
OPENCV_CUDA_IMPLEMENT_RGB2HSV_TRAITS(bgr_to_hsv, 3, 3, 0)
OPENCV_CUDA_IMPLEMENT_RGB2HSV_TRAITS(bgra_to_hsv, 4, 3, 0)
OPENCV_CUDA_IMPLEMENT_RGB2HSV_TRAITS(bgr_to_hsv4, 3, 4, 0)
OPENCV_CUDA_IMPLEMENT_RGB2HSV_TRAITS(bgra_to_hsv4, 4, 4, 0)
#undef OPENCV_CUDA_IMPLEMENT_RGB2HSV_TRAITS
OPENCV_CUDA_IMPLEMENT_HSV2RGB_TRAITS(hsv_to_rgb, 3, 3, 2)
OPENCV_CUDA_IMPLEMENT_HSV2RGB_TRAITS(hsv_to_rgba, 3, 4, 2)
OPENCV_CUDA_IMPLEMENT_HSV2RGB_TRAITS(hsv4_to_rgb, 4, 3, 2)
OPENCV_CUDA_IMPLEMENT_HSV2RGB_TRAITS(hsv4_to_rgba, 4, 4, 2)
OPENCV_CUDA_IMPLEMENT_HSV2RGB_TRAITS(hsv_to_bgr, 3, 3, 0)
OPENCV_CUDA_IMPLEMENT_HSV2RGB_TRAITS(hsv_to_bgra, 3, 4, 0)
OPENCV_CUDA_IMPLEMENT_HSV2RGB_TRAITS(hsv4_to_bgr, 4, 3, 0)
OPENCV_CUDA_IMPLEMENT_HSV2RGB_TRAITS(hsv4_to_bgra, 4, 4, 0)
#undef OPENCV_CUDA_IMPLEMENT_HSV2RGB_TRAITS
OPENCV_CUDA_IMPLEMENT_RGB2HLS_TRAITS(rgb_to_hls, 3, 3, 2)
OPENCV_CUDA_IMPLEMENT_RGB2HLS_TRAITS(rgba_to_hls, 4, 3, 2)
OPENCV_CUDA_IMPLEMENT_RGB2HLS_TRAITS(rgb_to_hls4, 3, 4, 2)
OPENCV_CUDA_IMPLEMENT_RGB2HLS_TRAITS(rgba_to_hls4, 4, 4, 2)
OPENCV_CUDA_IMPLEMENT_RGB2HLS_TRAITS(bgr_to_hls, 3, 3, 0)
OPENCV_CUDA_IMPLEMENT_RGB2HLS_TRAITS(bgra_to_hls, 4, 3, 0)
OPENCV_CUDA_IMPLEMENT_RGB2HLS_TRAITS(bgr_to_hls4, 3, 4, 0)
OPENCV_CUDA_IMPLEMENT_RGB2HLS_TRAITS(bgra_to_hls4, 4, 4, 0)
#undef OPENCV_CUDA_IMPLEMENT_RGB2HLS_TRAITS
OPENCV_CUDA_IMPLEMENT_HLS2RGB_TRAITS(hls_to_rgb, 3, 3, 2)
OPENCV_CUDA_IMPLEMENT_HLS2RGB_TRAITS(hls_to_rgba, 3, 4, 2)
OPENCV_CUDA_IMPLEMENT_HLS2RGB_TRAITS(hls4_to_rgb, 4, 3, 2)
OPENCV_CUDA_IMPLEMENT_HLS2RGB_TRAITS(hls4_to_rgba, 4, 4, 2)
OPENCV_CUDA_IMPLEMENT_HLS2RGB_TRAITS(hls_to_bgr, 3, 3, 0)
OPENCV_CUDA_IMPLEMENT_HLS2RGB_TRAITS(hls_to_bgra, 3, 4, 0)
OPENCV_CUDA_IMPLEMENT_HLS2RGB_TRAITS(hls4_to_bgr, 4, 3, 0)
OPENCV_CUDA_IMPLEMENT_HLS2RGB_TRAITS(hls4_to_bgra, 4, 4, 0)
#undef OPENCV_CUDA_IMPLEMENT_HLS2RGB_TRAITS
OPENCV_CUDA_IMPLEMENT_RGB2Lab_TRAITS(rgb_to_lab, 3, 3, true, 2)
OPENCV_CUDA_IMPLEMENT_RGB2Lab_TRAITS(rgba_to_lab, 4, 3, true, 2)
OPENCV_CUDA_IMPLEMENT_RGB2Lab_TRAITS(rgb_to_lab4, 3, 4, true, 2)
OPENCV_CUDA_IMPLEMENT_RGB2Lab_TRAITS(rgba_to_lab4, 4, 4, true, 2)
OPENCV_CUDA_IMPLEMENT_RGB2Lab_TRAITS(bgr_to_lab, 3, 3, true, 0)
OPENCV_CUDA_IMPLEMENT_RGB2Lab_TRAITS(bgra_to_lab, 4, 3, true, 0)
OPENCV_CUDA_IMPLEMENT_RGB2Lab_TRAITS(bgr_to_lab4, 3, 4, true, 0)
OPENCV_CUDA_IMPLEMENT_RGB2Lab_TRAITS(bgra_to_lab4, 4, 4, true, 0)
OPENCV_CUDA_IMPLEMENT_RGB2Lab_TRAITS(lrgb_to_lab, 3, 3, false, 2)
OPENCV_CUDA_IMPLEMENT_RGB2Lab_TRAITS(lrgba_to_lab, 4, 3, false, 2)
OPENCV_CUDA_IMPLEMENT_RGB2Lab_TRAITS(lrgb_to_lab4, 3, 4, false, 2)
OPENCV_CUDA_IMPLEMENT_RGB2Lab_TRAITS(lrgba_to_lab4, 4, 4, false, 2)
OPENCV_CUDA_IMPLEMENT_RGB2Lab_TRAITS(lbgr_to_lab, 3, 3, false, 0)
OPENCV_CUDA_IMPLEMENT_RGB2Lab_TRAITS(lbgra_to_lab, 4, 3, false, 0)
OPENCV_CUDA_IMPLEMENT_RGB2Lab_TRAITS(lbgr_to_lab4, 3, 4, false, 0)
OPENCV_CUDA_IMPLEMENT_RGB2Lab_TRAITS(lbgra_to_lab4, 4, 4, false, 0)
#undef OPENCV_CUDA_IMPLEMENT_RGB2Lab_TRAITS
OPENCV_CUDA_IMPLEMENT_Lab2RGB_TRAITS(lab_to_rgb, 3, 3, true, 2)
OPENCV_CUDA_IMPLEMENT_Lab2RGB_TRAITS(lab4_to_rgb, 4, 3, true, 2)
OPENCV_CUDA_IMPLEMENT_Lab2RGB_TRAITS(lab_to_rgba, 3, 4, true, 2)
OPENCV_CUDA_IMPLEMENT_Lab2RGB_TRAITS(lab4_to_rgba, 4, 4, true, 2)
OPENCV_CUDA_IMPLEMENT_Lab2RGB_TRAITS(lab_to_bgr, 3, 3, true, 0)
OPENCV_CUDA_IMPLEMENT_Lab2RGB_TRAITS(lab4_to_bgr, 4, 3, true, 0)
OPENCV_CUDA_IMPLEMENT_Lab2RGB_TRAITS(lab_to_bgra, 3, 4, true, 0)
OPENCV_CUDA_IMPLEMENT_Lab2RGB_TRAITS(lab4_to_bgra, 4, 4, true, 0)
OPENCV_CUDA_IMPLEMENT_Lab2RGB_TRAITS(lab_to_lrgb, 3, 3, false, 2)
OPENCV_CUDA_IMPLEMENT_Lab2RGB_TRAITS(lab4_to_lrgb, 4, 3, false, 2)
OPENCV_CUDA_IMPLEMENT_Lab2RGB_TRAITS(lab_to_lrgba, 3, 4, false, 2)
OPENCV_CUDA_IMPLEMENT_Lab2RGB_TRAITS(lab4_to_lrgba, 4, 4, false, 2)
OPENCV_CUDA_IMPLEMENT_Lab2RGB_TRAITS(lab_to_lbgr, 3, 3, false, 0)
OPENCV_CUDA_IMPLEMENT_Lab2RGB_TRAITS(lab4_to_lbgr, 4, 3, false, 0)
OPENCV_CUDA_IMPLEMENT_Lab2RGB_TRAITS(lab_to_lbgra, 3, 4, false, 0)
OPENCV_CUDA_IMPLEMENT_Lab2RGB_TRAITS(lab4_to_lbgra, 4, 4, false, 0)
#undef OPENCV_CUDA_IMPLEMENT_Lab2RGB_TRAITS
OPENCV_CUDA_IMPLEMENT_RGB2Luv_TRAITS(rgb_to_luv, 3, 3, true, 2)
OPENCV_CUDA_IMPLEMENT_RGB2Luv_TRAITS(rgba_to_luv, 4, 3, true, 2)
OPENCV_CUDA_IMPLEMENT_RGB2Luv_TRAITS(rgb_to_luv4, 3, 4, true, 2)
OPENCV_CUDA_IMPLEMENT_RGB2Luv_TRAITS(rgba_to_luv4, 4, 4, true, 2)
OPENCV_CUDA_IMPLEMENT_RGB2Luv_TRAITS(bgr_to_luv, 3, 3, true, 0)
OPENCV_CUDA_IMPLEMENT_RGB2Luv_TRAITS(bgra_to_luv, 4, 3, true, 0)
OPENCV_CUDA_IMPLEMENT_RGB2Luv_TRAITS(bgr_to_luv4, 3, 4, true, 0)
OPENCV_CUDA_IMPLEMENT_RGB2Luv_TRAITS(bgra_to_luv4, 4, 4, true, 0)
OPENCV_CUDA_IMPLEMENT_RGB2Luv_TRAITS(lrgb_to_luv, 3, 3, false, 2)
OPENCV_CUDA_IMPLEMENT_RGB2Luv_TRAITS(lrgba_to_luv, 4, 3, false, 2)
OPENCV_CUDA_IMPLEMENT_RGB2Luv_TRAITS(lrgb_to_luv4, 3, 4, false, 2)
OPENCV_CUDA_IMPLEMENT_RGB2Luv_TRAITS(lrgba_to_luv4, 4, 4, false, 2)
OPENCV_CUDA_IMPLEMENT_RGB2Luv_TRAITS(lbgr_to_luv, 3, 3, false, 0)
OPENCV_CUDA_IMPLEMENT_RGB2Luv_TRAITS(lbgra_to_luv, 4, 3, false, 0)
OPENCV_CUDA_IMPLEMENT_RGB2Luv_TRAITS(lbgr_to_luv4, 3, 4, false, 0)
OPENCV_CUDA_IMPLEMENT_RGB2Luv_TRAITS(lbgra_to_luv4, 4, 4, false, 0)
#undef OPENCV_CUDA_IMPLEMENT_RGB2Luv_TRAITS
OPENCV_CUDA_IMPLEMENT_Luv2RGB_TRAITS(luv_to_rgb, 3, 3, true, 2)
OPENCV_CUDA_IMPLEMENT_Luv2RGB_TRAITS(luv4_to_rgb, 4, 3, true, 2)
OPENCV_CUDA_IMPLEMENT_Luv2RGB_TRAITS(luv_to_rgba, 3, 4, true, 2)
OPENCV_CUDA_IMPLEMENT_Luv2RGB_TRAITS(luv4_to_rgba, 4, 4, true, 2)
OPENCV_CUDA_IMPLEMENT_Luv2RGB_TRAITS(luv_to_bgr, 3, 3, true, 0)
OPENCV_CUDA_IMPLEMENT_Luv2RGB_TRAITS(luv4_to_bgr, 4, 3, true, 0)
OPENCV_CUDA_IMPLEMENT_Luv2RGB_TRAITS(luv_to_bgra, 3, 4, true, 0)
OPENCV_CUDA_IMPLEMENT_Luv2RGB_TRAITS(luv4_to_bgra, 4, 4, true, 0)
OPENCV_CUDA_IMPLEMENT_Luv2RGB_TRAITS(luv_to_lrgb, 3, 3, false, 2)
OPENCV_CUDA_IMPLEMENT_Luv2RGB_TRAITS(luv4_to_lrgb, 4, 3, false, 2)
OPENCV_CUDA_IMPLEMENT_Luv2RGB_TRAITS(luv_to_lrgba, 3, 4, false, 2)
OPENCV_CUDA_IMPLEMENT_Luv2RGB_TRAITS(luv4_to_lrgba, 4, 4, false, 2)
OPENCV_CUDA_IMPLEMENT_Luv2RGB_TRAITS(luv_to_lbgr, 3, 3, false, 0)
OPENCV_CUDA_IMPLEMENT_Luv2RGB_TRAITS(luv4_to_lbgr, 4, 3, false, 0)
OPENCV_CUDA_IMPLEMENT_Luv2RGB_TRAITS(luv_to_lbgra, 3, 4, false, 0)
OPENCV_CUDA_IMPLEMENT_Luv2RGB_TRAITS(luv4_to_lbgra, 4, 4, false, 0)
#undef OPENCV_CUDA_IMPLEMENT_Luv2RGB_TRAITS
}}} // namespace cv { namespace cuda { namespace cudev
//! @endcond
#endif // OPENCV_CUDA_COLOR_HPP

View File

@@ -0,0 +1,123 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef OPENCV_CUDA_COMMON_HPP
#define OPENCV_CUDA_COMMON_HPP
#include <cuda_runtime.h>
#include "opencv2/core/cuda_types.hpp"
#include "opencv2/core/cvdef.h"
#include "opencv2/core/base.hpp"
/** @file
* @deprecated Use @ref cudev instead.
*/
//! @cond IGNORED
#ifndef CV_PI_F
#ifndef CV_PI
#define CV_PI_F 3.14159265f
#else
#define CV_PI_F ((float)CV_PI)
#endif
#endif
namespace cv { namespace cuda {
static inline void checkCudaError(cudaError_t err, const char* file, const int line, const char* func)
{
if (cudaSuccess != err)
cv::error(cv::Error::GpuApiCallError, cudaGetErrorString(err), func, file, line);
}
}}
#ifndef cudaSafeCall
#define cudaSafeCall(expr) cv::cuda::checkCudaError(expr, __FILE__, __LINE__, CV_Func)
#endif
namespace cv { namespace cuda
{
template <typename T> static inline bool isAligned(const T* ptr, size_t size)
{
return reinterpret_cast<size_t>(ptr) % size == 0;
}
static inline bool isAligned(size_t step, size_t size)
{
return step % size == 0;
}
}}
namespace cv { namespace cuda
{
namespace device
{
__host__ __device__ __forceinline__ int divUp(int total, int grain)
{
return (total + grain - 1) / grain;
}
template<class T> inline void bindTexture(const textureReference* tex, const PtrStepSz<T>& img)
{
cudaChannelFormatDesc desc = cudaCreateChannelDesc<T>();
cudaSafeCall( cudaBindTexture2D(0, tex, img.ptr(), &desc, img.cols, img.rows, img.step) );
}
template<class T> inline void createTextureObjectPitch2D(cudaTextureObject_t* tex, PtrStepSz<T>& img, const cudaTextureDesc& texDesc)
{
cudaResourceDesc resDesc;
memset(&resDesc, 0, sizeof(resDesc));
resDesc.resType = cudaResourceTypePitch2D;
resDesc.res.pitch2D.devPtr = static_cast<void*>(img.ptr());
resDesc.res.pitch2D.height = img.rows;
resDesc.res.pitch2D.width = img.cols;
resDesc.res.pitch2D.pitchInBytes = img.step;
resDesc.res.pitch2D.desc = cudaCreateChannelDesc<T>();
cudaSafeCall( cudaCreateTextureObject(tex, &resDesc, &texDesc, NULL) );
}
}
}}
//! @endcond
#endif // OPENCV_CUDA_COMMON_HPP

View File

@@ -0,0 +1,113 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef OPENCV_CUDA_DATAMOV_UTILS_HPP
#define OPENCV_CUDA_DATAMOV_UTILS_HPP
#include "common.hpp"
/** @file
* @deprecated Use @ref cudev instead.
*/
//! @cond IGNORED
namespace cv { namespace cuda { namespace device
{
#if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 200
// for Fermi memory space is detected automatically
template <typename T> struct ForceGlob
{
__device__ __forceinline__ static void Load(const T* ptr, int offset, T& val) { val = ptr[offset]; }
};
#else // __CUDA_ARCH__ >= 200
#if defined(_WIN64) || defined(__LP64__)
// 64-bit register modifier for inlined asm
#define OPENCV_CUDA_ASM_PTR "l"
#else
// 32-bit register modifier for inlined asm
#define OPENCV_CUDA_ASM_PTR "r"
#endif
template<class T> struct ForceGlob;
#define OPENCV_CUDA_DEFINE_FORCE_GLOB(base_type, ptx_type, reg_mod) \
template <> struct ForceGlob<base_type> \
{ \
__device__ __forceinline__ static void Load(const base_type* ptr, int offset, base_type& val) \
{ \
asm("ld.global."#ptx_type" %0, [%1];" : "="#reg_mod(val) : OPENCV_CUDA_ASM_PTR(ptr + offset)); \
} \
};
#define OPENCV_CUDA_DEFINE_FORCE_GLOB_B(base_type, ptx_type) \
template <> struct ForceGlob<base_type> \
{ \
__device__ __forceinline__ static void Load(const base_type* ptr, int offset, base_type& val) \
{ \
asm("ld.global."#ptx_type" %0, [%1];" : "=r"(*reinterpret_cast<uint*>(&val)) : OPENCV_CUDA_ASM_PTR(ptr + offset)); \
} \
};
OPENCV_CUDA_DEFINE_FORCE_GLOB_B(uchar, u8)
OPENCV_CUDA_DEFINE_FORCE_GLOB_B(schar, s8)
OPENCV_CUDA_DEFINE_FORCE_GLOB_B(char, b8)
OPENCV_CUDA_DEFINE_FORCE_GLOB (ushort, u16, h)
OPENCV_CUDA_DEFINE_FORCE_GLOB (short, s16, h)
OPENCV_CUDA_DEFINE_FORCE_GLOB (uint, u32, r)
OPENCV_CUDA_DEFINE_FORCE_GLOB (int, s32, r)
OPENCV_CUDA_DEFINE_FORCE_GLOB (float, f32, f)
OPENCV_CUDA_DEFINE_FORCE_GLOB (double, f64, d)
#undef OPENCV_CUDA_DEFINE_FORCE_GLOB
#undef OPENCV_CUDA_DEFINE_FORCE_GLOB_B
#undef OPENCV_CUDA_ASM_PTR
#endif // __CUDA_ARCH__ >= 200
}}} // namespace cv { namespace cuda { namespace cudev
//! @endcond
#endif // OPENCV_CUDA_DATAMOV_UTILS_HPP

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,365 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef OPENCV_CUDA_REDUCE_DETAIL_HPP
#define OPENCV_CUDA_REDUCE_DETAIL_HPP
#include <thrust/tuple.h>
#include "../warp.hpp"
#include "../warp_shuffle.hpp"
//! @cond IGNORED
namespace cv { namespace cuda { namespace device
{
namespace reduce_detail
{
template <typename T> struct GetType;
template <typename T> struct GetType<T*>
{
typedef T type;
};
template <typename T> struct GetType<volatile T*>
{
typedef T type;
};
template <typename T> struct GetType<T&>
{
typedef T type;
};
template <unsigned int I, unsigned int N>
struct For
{
template <class PointerTuple, class ValTuple>
static __device__ void loadToSmem(const PointerTuple& smem, const ValTuple& val, unsigned int tid)
{
thrust::get<I>(smem)[tid] = thrust::get<I>(val);
For<I + 1, N>::loadToSmem(smem, val, tid);
}
template <class PointerTuple, class ValTuple>
static __device__ void loadFromSmem(const PointerTuple& smem, const ValTuple& val, unsigned int tid)
{
thrust::get<I>(val) = thrust::get<I>(smem)[tid];
For<I + 1, N>::loadFromSmem(smem, val, tid);
}
template <class PointerTuple, class ValTuple, class OpTuple>
static __device__ void merge(const PointerTuple& smem, const ValTuple& val, unsigned int tid, unsigned int delta, const OpTuple& op)
{
typename GetType<typename thrust::tuple_element<I, PointerTuple>::type>::type reg = thrust::get<I>(smem)[tid + delta];
thrust::get<I>(smem)[tid] = thrust::get<I>(val) = thrust::get<I>(op)(thrust::get<I>(val), reg);
For<I + 1, N>::merge(smem, val, tid, delta, op);
}
template <class ValTuple, class OpTuple>
static __device__ void mergeShfl(const ValTuple& val, unsigned int delta, unsigned int width, const OpTuple& op)
{
typename GetType<typename thrust::tuple_element<I, ValTuple>::type>::type reg = shfl_down(thrust::get<I>(val), delta, width);
thrust::get<I>(val) = thrust::get<I>(op)(thrust::get<I>(val), reg);
For<I + 1, N>::mergeShfl(val, delta, width, op);
}
};
template <unsigned int N>
struct For<N, N>
{
template <class PointerTuple, class ValTuple>
static __device__ void loadToSmem(const PointerTuple&, const ValTuple&, unsigned int)
{
}
template <class PointerTuple, class ValTuple>
static __device__ void loadFromSmem(const PointerTuple&, const ValTuple&, unsigned int)
{
}
template <class PointerTuple, class ValTuple, class OpTuple>
static __device__ void merge(const PointerTuple&, const ValTuple&, unsigned int, unsigned int, const OpTuple&)
{
}
template <class ValTuple, class OpTuple>
static __device__ void mergeShfl(const ValTuple&, unsigned int, unsigned int, const OpTuple&)
{
}
};
template <typename T>
__device__ __forceinline__ void loadToSmem(volatile T* smem, T& val, unsigned int tid)
{
smem[tid] = val;
}
template <typename T>
__device__ __forceinline__ void loadFromSmem(volatile T* smem, T& val, unsigned int tid)
{
val = smem[tid];
}
template <typename P0, typename P1, typename P2, typename P3, typename P4, typename P5, typename P6, typename P7, typename P8, typename P9,
typename R0, typename R1, typename R2, typename R3, typename R4, typename R5, typename R6, typename R7, typename R8, typename R9>
__device__ __forceinline__ void loadToSmem(const thrust::tuple<P0, P1, P2, P3, P4, P5, P6, P7, P8, P9>& smem,
const thrust::tuple<R0, R1, R2, R3, R4, R5, R6, R7, R8, R9>& val,
unsigned int tid)
{
For<0, thrust::tuple_size<thrust::tuple<P0, P1, P2, P3, P4, P5, P6, P7, P8, P9> >::value>::loadToSmem(smem, val, tid);
}
template <typename P0, typename P1, typename P2, typename P3, typename P4, typename P5, typename P6, typename P7, typename P8, typename P9,
typename R0, typename R1, typename R2, typename R3, typename R4, typename R5, typename R6, typename R7, typename R8, typename R9>
__device__ __forceinline__ void loadFromSmem(const thrust::tuple<P0, P1, P2, P3, P4, P5, P6, P7, P8, P9>& smem,
const thrust::tuple<R0, R1, R2, R3, R4, R5, R6, R7, R8, R9>& val,
unsigned int tid)
{
For<0, thrust::tuple_size<thrust::tuple<P0, P1, P2, P3, P4, P5, P6, P7, P8, P9> >::value>::loadFromSmem(smem, val, tid);
}
template <typename T, class Op>
__device__ __forceinline__ void merge(volatile T* smem, T& val, unsigned int tid, unsigned int delta, const Op& op)
{
T reg = smem[tid + delta];
smem[tid] = val = op(val, reg);
}
template <typename T, class Op>
__device__ __forceinline__ void mergeShfl(T& val, unsigned int delta, unsigned int width, const Op& op)
{
T reg = shfl_down(val, delta, width);
val = op(val, reg);
}
template <typename P0, typename P1, typename P2, typename P3, typename P4, typename P5, typename P6, typename P7, typename P8, typename P9,
typename R0, typename R1, typename R2, typename R3, typename R4, typename R5, typename R6, typename R7, typename R8, typename R9,
class Op0, class Op1, class Op2, class Op3, class Op4, class Op5, class Op6, class Op7, class Op8, class Op9>
__device__ __forceinline__ void merge(const thrust::tuple<P0, P1, P2, P3, P4, P5, P6, P7, P8, P9>& smem,
const thrust::tuple<R0, R1, R2, R3, R4, R5, R6, R7, R8, R9>& val,
unsigned int tid,
unsigned int delta,
const thrust::tuple<Op0, Op1, Op2, Op3, Op4, Op5, Op6, Op7, Op8, Op9>& op)
{
For<0, thrust::tuple_size<thrust::tuple<P0, P1, P2, P3, P4, P5, P6, P7, P8, P9> >::value>::merge(smem, val, tid, delta, op);
}
template <typename R0, typename R1, typename R2, typename R3, typename R4, typename R5, typename R6, typename R7, typename R8, typename R9,
class Op0, class Op1, class Op2, class Op3, class Op4, class Op5, class Op6, class Op7, class Op8, class Op9>
__device__ __forceinline__ void mergeShfl(const thrust::tuple<R0, R1, R2, R3, R4, R5, R6, R7, R8, R9>& val,
unsigned int delta,
unsigned int width,
const thrust::tuple<Op0, Op1, Op2, Op3, Op4, Op5, Op6, Op7, Op8, Op9>& op)
{
For<0, thrust::tuple_size<thrust::tuple<R0, R1, R2, R3, R4, R5, R6, R7, R8, R9> >::value>::mergeShfl(val, delta, width, op);
}
template <unsigned int N> struct Generic
{
template <typename Pointer, typename Reference, class Op>
static __device__ void reduce(Pointer smem, Reference val, unsigned int tid, Op op)
{
loadToSmem(smem, val, tid);
if (N >= 32)
__syncthreads();
if (N >= 2048)
{
if (tid < 1024)
merge(smem, val, tid, 1024, op);
__syncthreads();
}
if (N >= 1024)
{
if (tid < 512)
merge(smem, val, tid, 512, op);
__syncthreads();
}
if (N >= 512)
{
if (tid < 256)
merge(smem, val, tid, 256, op);
__syncthreads();
}
if (N >= 256)
{
if (tid < 128)
merge(smem, val, tid, 128, op);
__syncthreads();
}
if (N >= 128)
{
if (tid < 64)
merge(smem, val, tid, 64, op);
__syncthreads();
}
if (N >= 64)
{
if (tid < 32)
merge(smem, val, tid, 32, op);
}
if (tid < 16)
{
merge(smem, val, tid, 16, op);
merge(smem, val, tid, 8, op);
merge(smem, val, tid, 4, op);
merge(smem, val, tid, 2, op);
merge(smem, val, tid, 1, op);
}
}
};
template <unsigned int I, typename Pointer, typename Reference, class Op>
struct Unroll
{
static __device__ void loopShfl(Reference val, Op op, unsigned int N)
{
mergeShfl(val, I, N, op);
Unroll<I / 2, Pointer, Reference, Op>::loopShfl(val, op, N);
}
static __device__ void loop(Pointer smem, Reference val, unsigned int tid, Op op)
{
merge(smem, val, tid, I, op);
Unroll<I / 2, Pointer, Reference, Op>::loop(smem, val, tid, op);
}
};
template <typename Pointer, typename Reference, class Op>
struct Unroll<0, Pointer, Reference, Op>
{
static __device__ void loopShfl(Reference, Op, unsigned int)
{
}
static __device__ void loop(Pointer, Reference, unsigned int, Op)
{
}
};
template <unsigned int N> struct WarpOptimized
{
template <typename Pointer, typename Reference, class Op>
static __device__ void reduce(Pointer smem, Reference val, unsigned int tid, Op op)
{
#if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 300
CV_UNUSED(smem);
CV_UNUSED(tid);
Unroll<N / 2, Pointer, Reference, Op>::loopShfl(val, op, N);
#else
loadToSmem(smem, val, tid);
if (tid < N / 2)
Unroll<N / 2, Pointer, Reference, Op>::loop(smem, val, tid, op);
#endif
}
};
template <unsigned int N> struct GenericOptimized32
{
enum { M = N / 32 };
template <typename Pointer, typename Reference, class Op>
static __device__ void reduce(Pointer smem, Reference val, unsigned int tid, Op op)
{
const unsigned int laneId = Warp::laneId();
#if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 300
Unroll<16, Pointer, Reference, Op>::loopShfl(val, op, warpSize);
if (laneId == 0)
loadToSmem(smem, val, tid / 32);
#else
loadToSmem(smem, val, tid);
if (laneId < 16)
Unroll<16, Pointer, Reference, Op>::loop(smem, val, tid, op);
__syncthreads();
if (laneId == 0)
loadToSmem(smem, val, tid / 32);
#endif
__syncthreads();
loadFromSmem(smem, val, tid);
if (tid < 32)
{
#if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 300
Unroll<M / 2, Pointer, Reference, Op>::loopShfl(val, op, M);
#else
Unroll<M / 2, Pointer, Reference, Op>::loop(smem, val, tid, op);
#endif
}
}
};
template <bool val, class T1, class T2> struct StaticIf;
template <class T1, class T2> struct StaticIf<true, T1, T2>
{
typedef T1 type;
};
template <class T1, class T2> struct StaticIf<false, T1, T2>
{
typedef T2 type;
};
template <unsigned int N> struct IsPowerOf2
{
enum { value = ((N != 0) && !(N & (N - 1))) };
};
template <unsigned int N> struct Dispatcher
{
typedef typename StaticIf<
(N <= 32) && IsPowerOf2<N>::value,
WarpOptimized<N>,
typename StaticIf<
(N <= 1024) && IsPowerOf2<N>::value,
GenericOptimized32<N>,
Generic<N>
>::type
>::type reductor;
};
}
}}}
//! @endcond
#endif // OPENCV_CUDA_REDUCE_DETAIL_HPP

View File

@@ -0,0 +1,502 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef OPENCV_CUDA_PRED_VAL_REDUCE_DETAIL_HPP
#define OPENCV_CUDA_PRED_VAL_REDUCE_DETAIL_HPP
#include <thrust/tuple.h>
#include "../warp.hpp"
#include "../warp_shuffle.hpp"
//! @cond IGNORED
namespace cv { namespace cuda { namespace device
{
namespace reduce_key_val_detail
{
template <typename T> struct GetType;
template <typename T> struct GetType<T*>
{
typedef T type;
};
template <typename T> struct GetType<volatile T*>
{
typedef T type;
};
template <typename T> struct GetType<T&>
{
typedef T type;
};
template <unsigned int I, unsigned int N>
struct For
{
template <class PointerTuple, class ReferenceTuple>
static __device__ void loadToSmem(const PointerTuple& smem, const ReferenceTuple& data, unsigned int tid)
{
thrust::get<I>(smem)[tid] = thrust::get<I>(data);
For<I + 1, N>::loadToSmem(smem, data, tid);
}
template <class PointerTuple, class ReferenceTuple>
static __device__ void loadFromSmem(const PointerTuple& smem, const ReferenceTuple& data, unsigned int tid)
{
thrust::get<I>(data) = thrust::get<I>(smem)[tid];
For<I + 1, N>::loadFromSmem(smem, data, tid);
}
template <class ReferenceTuple>
static __device__ void copyShfl(const ReferenceTuple& val, unsigned int delta, int width)
{
thrust::get<I>(val) = shfl_down(thrust::get<I>(val), delta, width);
For<I + 1, N>::copyShfl(val, delta, width);
}
template <class PointerTuple, class ReferenceTuple>
static __device__ void copy(const PointerTuple& svals, const ReferenceTuple& val, unsigned int tid, unsigned int delta)
{
thrust::get<I>(svals)[tid] = thrust::get<I>(val) = thrust::get<I>(svals)[tid + delta];
For<I + 1, N>::copy(svals, val, tid, delta);
}
template <class KeyReferenceTuple, class ValReferenceTuple, class CmpTuple>
static __device__ void mergeShfl(const KeyReferenceTuple& key, const ValReferenceTuple& val, const CmpTuple& cmp, unsigned int delta, int width)
{
typename GetType<typename thrust::tuple_element<I, KeyReferenceTuple>::type>::type reg = shfl_down(thrust::get<I>(key), delta, width);
if (thrust::get<I>(cmp)(reg, thrust::get<I>(key)))
{
thrust::get<I>(key) = reg;
thrust::get<I>(val) = shfl_down(thrust::get<I>(val), delta, width);
}
For<I + 1, N>::mergeShfl(key, val, cmp, delta, width);
}
template <class KeyPointerTuple, class KeyReferenceTuple, class ValPointerTuple, class ValReferenceTuple, class CmpTuple>
static __device__ void merge(const KeyPointerTuple& skeys, const KeyReferenceTuple& key,
const ValPointerTuple& svals, const ValReferenceTuple& val,
const CmpTuple& cmp,
unsigned int tid, unsigned int delta)
{
typename GetType<typename thrust::tuple_element<I, KeyPointerTuple>::type>::type reg = thrust::get<I>(skeys)[tid + delta];
if (thrust::get<I>(cmp)(reg, thrust::get<I>(key)))
{
thrust::get<I>(skeys)[tid] = thrust::get<I>(key) = reg;
thrust::get<I>(svals)[tid] = thrust::get<I>(val) = thrust::get<I>(svals)[tid + delta];
}
For<I + 1, N>::merge(skeys, key, svals, val, cmp, tid, delta);
}
};
template <unsigned int N>
struct For<N, N>
{
template <class PointerTuple, class ReferenceTuple>
static __device__ void loadToSmem(const PointerTuple&, const ReferenceTuple&, unsigned int)
{
}
template <class PointerTuple, class ReferenceTuple>
static __device__ void loadFromSmem(const PointerTuple&, const ReferenceTuple&, unsigned int)
{
}
template <class ReferenceTuple>
static __device__ void copyShfl(const ReferenceTuple&, unsigned int, int)
{
}
template <class PointerTuple, class ReferenceTuple>
static __device__ void copy(const PointerTuple&, const ReferenceTuple&, unsigned int, unsigned int)
{
}
template <class KeyReferenceTuple, class ValReferenceTuple, class CmpTuple>
static __device__ void mergeShfl(const KeyReferenceTuple&, const ValReferenceTuple&, const CmpTuple&, unsigned int, int)
{
}
template <class KeyPointerTuple, class KeyReferenceTuple, class ValPointerTuple, class ValReferenceTuple, class CmpTuple>
static __device__ void merge(const KeyPointerTuple&, const KeyReferenceTuple&,
const ValPointerTuple&, const ValReferenceTuple&,
const CmpTuple&,
unsigned int, unsigned int)
{
}
};
//////////////////////////////////////////////////////
// loadToSmem
template <typename T>
__device__ __forceinline__ void loadToSmem(volatile T* smem, T& data, unsigned int tid)
{
smem[tid] = data;
}
template <typename T>
__device__ __forceinline__ void loadFromSmem(volatile T* smem, T& data, unsigned int tid)
{
data = smem[tid];
}
template <typename VP0, typename VP1, typename VP2, typename VP3, typename VP4, typename VP5, typename VP6, typename VP7, typename VP8, typename VP9,
typename VR0, typename VR1, typename VR2, typename VR3, typename VR4, typename VR5, typename VR6, typename VR7, typename VR8, typename VR9>
__device__ __forceinline__ void loadToSmem(const thrust::tuple<VP0, VP1, VP2, VP3, VP4, VP5, VP6, VP7, VP8, VP9>& smem,
const thrust::tuple<VR0, VR1, VR2, VR3, VR4, VR5, VR6, VR7, VR8, VR9>& data,
unsigned int tid)
{
For<0, thrust::tuple_size<thrust::tuple<VP0, VP1, VP2, VP3, VP4, VP5, VP6, VP7, VP8, VP9> >::value>::loadToSmem(smem, data, tid);
}
template <typename VP0, typename VP1, typename VP2, typename VP3, typename VP4, typename VP5, typename VP6, typename VP7, typename VP8, typename VP9,
typename VR0, typename VR1, typename VR2, typename VR3, typename VR4, typename VR5, typename VR6, typename VR7, typename VR8, typename VR9>
__device__ __forceinline__ void loadFromSmem(const thrust::tuple<VP0, VP1, VP2, VP3, VP4, VP5, VP6, VP7, VP8, VP9>& smem,
const thrust::tuple<VR0, VR1, VR2, VR3, VR4, VR5, VR6, VR7, VR8, VR9>& data,
unsigned int tid)
{
For<0, thrust::tuple_size<thrust::tuple<VP0, VP1, VP2, VP3, VP4, VP5, VP6, VP7, VP8, VP9> >::value>::loadFromSmem(smem, data, tid);
}
//////////////////////////////////////////////////////
// copyVals
template <typename V>
__device__ __forceinline__ void copyValsShfl(V& val, unsigned int delta, int width)
{
val = shfl_down(val, delta, width);
}
template <typename V>
__device__ __forceinline__ void copyVals(volatile V* svals, V& val, unsigned int tid, unsigned int delta)
{
svals[tid] = val = svals[tid + delta];
}
template <typename VR0, typename VR1, typename VR2, typename VR3, typename VR4, typename VR5, typename VR6, typename VR7, typename VR8, typename VR9>
__device__ __forceinline__ void copyValsShfl(const thrust::tuple<VR0, VR1, VR2, VR3, VR4, VR5, VR6, VR7, VR8, VR9>& val,
unsigned int delta,
int width)
{
For<0, thrust::tuple_size<thrust::tuple<VR0, VR1, VR2, VR3, VR4, VR5, VR6, VR7, VR8, VR9> >::value>::copyShfl(val, delta, width);
}
template <typename VP0, typename VP1, typename VP2, typename VP3, typename VP4, typename VP5, typename VP6, typename VP7, typename VP8, typename VP9,
typename VR0, typename VR1, typename VR2, typename VR3, typename VR4, typename VR5, typename VR6, typename VR7, typename VR8, typename VR9>
__device__ __forceinline__ void copyVals(const thrust::tuple<VP0, VP1, VP2, VP3, VP4, VP5, VP6, VP7, VP8, VP9>& svals,
const thrust::tuple<VR0, VR1, VR2, VR3, VR4, VR5, VR6, VR7, VR8, VR9>& val,
unsigned int tid, unsigned int delta)
{
For<0, thrust::tuple_size<thrust::tuple<VP0, VP1, VP2, VP3, VP4, VP5, VP6, VP7, VP8, VP9> >::value>::copy(svals, val, tid, delta);
}
//////////////////////////////////////////////////////
// merge
template <typename K, typename V, class Cmp>
__device__ __forceinline__ void mergeShfl(K& key, V& val, const Cmp& cmp, unsigned int delta, int width)
{
K reg = shfl_down(key, delta, width);
if (cmp(reg, key))
{
key = reg;
copyValsShfl(val, delta, width);
}
}
template <typename K, typename V, class Cmp>
__device__ __forceinline__ void merge(volatile K* skeys, K& key, volatile V* svals, V& val, const Cmp& cmp, unsigned int tid, unsigned int delta)
{
K reg = skeys[tid + delta];
if (cmp(reg, key))
{
skeys[tid] = key = reg;
copyVals(svals, val, tid, delta);
}
}
template <typename K,
typename VR0, typename VR1, typename VR2, typename VR3, typename VR4, typename VR5, typename VR6, typename VR7, typename VR8, typename VR9,
class Cmp>
__device__ __forceinline__ void mergeShfl(K& key,
const thrust::tuple<VR0, VR1, VR2, VR3, VR4, VR5, VR6, VR7, VR8, VR9>& val,
const Cmp& cmp,
unsigned int delta, int width)
{
K reg = shfl_down(key, delta, width);
if (cmp(reg, key))
{
key = reg;
copyValsShfl(val, delta, width);
}
}
template <typename K,
typename VP0, typename VP1, typename VP2, typename VP3, typename VP4, typename VP5, typename VP6, typename VP7, typename VP8, typename VP9,
typename VR0, typename VR1, typename VR2, typename VR3, typename VR4, typename VR5, typename VR6, typename VR7, typename VR8, typename VR9,
class Cmp>
__device__ __forceinline__ void merge(volatile K* skeys, K& key,
const thrust::tuple<VP0, VP1, VP2, VP3, VP4, VP5, VP6, VP7, VP8, VP9>& svals,
const thrust::tuple<VR0, VR1, VR2, VR3, VR4, VR5, VR6, VR7, VR8, VR9>& val,
const Cmp& cmp, unsigned int tid, unsigned int delta)
{
K reg = skeys[tid + delta];
if (cmp(reg, key))
{
skeys[tid] = key = reg;
copyVals(svals, val, tid, delta);
}
}
template <typename KR0, typename KR1, typename KR2, typename KR3, typename KR4, typename KR5, typename KR6, typename KR7, typename KR8, typename KR9,
typename VR0, typename VR1, typename VR2, typename VR3, typename VR4, typename VR5, typename VR6, typename VR7, typename VR8, typename VR9,
class Cmp0, class Cmp1, class Cmp2, class Cmp3, class Cmp4, class Cmp5, class Cmp6, class Cmp7, class Cmp8, class Cmp9>
__device__ __forceinline__ void mergeShfl(const thrust::tuple<KR0, KR1, KR2, KR3, KR4, KR5, KR6, KR7, KR8, KR9>& key,
const thrust::tuple<VR0, VR1, VR2, VR3, VR4, VR5, VR6, VR7, VR8, VR9>& val,
const thrust::tuple<Cmp0, Cmp1, Cmp2, Cmp3, Cmp4, Cmp5, Cmp6, Cmp7, Cmp8, Cmp9>& cmp,
unsigned int delta, int width)
{
For<0, thrust::tuple_size<thrust::tuple<KR0, KR1, KR2, KR3, KR4, KR5, KR6, KR7, KR8, KR9> >::value>::mergeShfl(key, val, cmp, delta, width);
}
template <typename KP0, typename KP1, typename KP2, typename KP3, typename KP4, typename KP5, typename KP6, typename KP7, typename KP8, typename KP9,
typename KR0, typename KR1, typename KR2, typename KR3, typename KR4, typename KR5, typename KR6, typename KR7, typename KR8, typename KR9,
typename VP0, typename VP1, typename VP2, typename VP3, typename VP4, typename VP5, typename VP6, typename VP7, typename VP8, typename VP9,
typename VR0, typename VR1, typename VR2, typename VR3, typename VR4, typename VR5, typename VR6, typename VR7, typename VR8, typename VR9,
class Cmp0, class Cmp1, class Cmp2, class Cmp3, class Cmp4, class Cmp5, class Cmp6, class Cmp7, class Cmp8, class Cmp9>
__device__ __forceinline__ void merge(const thrust::tuple<KP0, KP1, KP2, KP3, KP4, KP5, KP6, KP7, KP8, KP9>& skeys,
const thrust::tuple<KR0, KR1, KR2, KR3, KR4, KR5, KR6, KR7, KR8, KR9>& key,
const thrust::tuple<VP0, VP1, VP2, VP3, VP4, VP5, VP6, VP7, VP8, VP9>& svals,
const thrust::tuple<VR0, VR1, VR2, VR3, VR4, VR5, VR6, VR7, VR8, VR9>& val,
const thrust::tuple<Cmp0, Cmp1, Cmp2, Cmp3, Cmp4, Cmp5, Cmp6, Cmp7, Cmp8, Cmp9>& cmp,
unsigned int tid, unsigned int delta)
{
For<0, thrust::tuple_size<thrust::tuple<VP0, VP1, VP2, VP3, VP4, VP5, VP6, VP7, VP8, VP9> >::value>::merge(skeys, key, svals, val, cmp, tid, delta);
}
//////////////////////////////////////////////////////
// Generic
template <unsigned int N> struct Generic
{
template <class KP, class KR, class VP, class VR, class Cmp>
static __device__ void reduce(KP skeys, KR key, VP svals, VR val, unsigned int tid, Cmp cmp)
{
loadToSmem(skeys, key, tid);
loadValsToSmem(svals, val, tid);
if (N >= 32)
__syncthreads();
if (N >= 2048)
{
if (tid < 1024)
merge(skeys, key, svals, val, cmp, tid, 1024);
__syncthreads();
}
if (N >= 1024)
{
if (tid < 512)
merge(skeys, key, svals, val, cmp, tid, 512);
__syncthreads();
}
if (N >= 512)
{
if (tid < 256)
merge(skeys, key, svals, val, cmp, tid, 256);
__syncthreads();
}
if (N >= 256)
{
if (tid < 128)
merge(skeys, key, svals, val, cmp, tid, 128);
__syncthreads();
}
if (N >= 128)
{
if (tid < 64)
merge(skeys, key, svals, val, cmp, tid, 64);
__syncthreads();
}
if (N >= 64)
{
if (tid < 32)
merge(skeys, key, svals, val, cmp, tid, 32);
}
if (tid < 16)
{
merge(skeys, key, svals, val, cmp, tid, 16);
merge(skeys, key, svals, val, cmp, tid, 8);
merge(skeys, key, svals, val, cmp, tid, 4);
merge(skeys, key, svals, val, cmp, tid, 2);
merge(skeys, key, svals, val, cmp, tid, 1);
}
}
};
template <unsigned int I, class KP, class KR, class VP, class VR, class Cmp>
struct Unroll
{
static __device__ void loopShfl(KR key, VR val, Cmp cmp, unsigned int N)
{
mergeShfl(key, val, cmp, I, N);
Unroll<I / 2, KP, KR, VP, VR, Cmp>::loopShfl(key, val, cmp, N);
}
static __device__ void loop(KP skeys, KR key, VP svals, VR val, unsigned int tid, Cmp cmp)
{
merge(skeys, key, svals, val, cmp, tid, I);
Unroll<I / 2, KP, KR, VP, VR, Cmp>::loop(skeys, key, svals, val, tid, cmp);
}
};
template <class KP, class KR, class VP, class VR, class Cmp>
struct Unroll<0, KP, KR, VP, VR, Cmp>
{
static __device__ void loopShfl(KR, VR, Cmp, unsigned int)
{
}
static __device__ void loop(KP, KR, VP, VR, unsigned int, Cmp)
{
}
};
template <unsigned int N> struct WarpOptimized
{
template <class KP, class KR, class VP, class VR, class Cmp>
static __device__ void reduce(KP skeys, KR key, VP svals, VR val, unsigned int tid, Cmp cmp)
{
#if 0 // __CUDA_ARCH__ >= 300
CV_UNUSED(skeys);
CV_UNUSED(svals);
CV_UNUSED(tid);
Unroll<N / 2, KP, KR, VP, VR, Cmp>::loopShfl(key, val, cmp, N);
#else
loadToSmem(skeys, key, tid);
loadToSmem(svals, val, tid);
if (tid < N / 2)
Unroll<N / 2, KP, KR, VP, VR, Cmp>::loop(skeys, key, svals, val, tid, cmp);
#endif
}
};
template <unsigned int N> struct GenericOptimized32
{
enum { M = N / 32 };
template <class KP, class KR, class VP, class VR, class Cmp>
static __device__ void reduce(KP skeys, KR key, VP svals, VR val, unsigned int tid, Cmp cmp)
{
const unsigned int laneId = Warp::laneId();
#if 0 // __CUDA_ARCH__ >= 300
Unroll<16, KP, KR, VP, VR, Cmp>::loopShfl(key, val, cmp, warpSize);
if (laneId == 0)
{
loadToSmem(skeys, key, tid / 32);
loadToSmem(svals, val, tid / 32);
}
#else
loadToSmem(skeys, key, tid);
loadToSmem(svals, val, tid);
if (laneId < 16)
Unroll<16, KP, KR, VP, VR, Cmp>::loop(skeys, key, svals, val, tid, cmp);
__syncthreads();
if (laneId == 0)
{
loadToSmem(skeys, key, tid / 32);
loadToSmem(svals, val, tid / 32);
}
#endif
__syncthreads();
loadFromSmem(skeys, key, tid);
if (tid < 32)
{
#if 0 // __CUDA_ARCH__ >= 300
loadFromSmem(svals, val, tid);
Unroll<M / 2, KP, KR, VP, VR, Cmp>::loopShfl(key, val, cmp, M);
#else
Unroll<M / 2, KP, KR, VP, VR, Cmp>::loop(skeys, key, svals, val, tid, cmp);
#endif
}
}
};
template <bool val, class T1, class T2> struct StaticIf;
template <class T1, class T2> struct StaticIf<true, T1, T2>
{
typedef T1 type;
};
template <class T1, class T2> struct StaticIf<false, T1, T2>
{
typedef T2 type;
};
template <unsigned int N> struct IsPowerOf2
{
enum { value = ((N != 0) && !(N & (N - 1))) };
};
template <unsigned int N> struct Dispatcher
{
typedef typename StaticIf<
(N <= 32) && IsPowerOf2<N>::value,
WarpOptimized<N>,
typename StaticIf<
(N <= 1024) && IsPowerOf2<N>::value,
GenericOptimized32<N>,
Generic<N>
>::type
>::type reductor;
};
}
}}}
//! @endcond
#endif // OPENCV_CUDA_PRED_VAL_REDUCE_DETAIL_HPP

View File

@@ -0,0 +1,392 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef OPENCV_CUDA_TRANSFORM_DETAIL_HPP
#define OPENCV_CUDA_TRANSFORM_DETAIL_HPP
#include "../common.hpp"
#include "../vec_traits.hpp"
#include "../functional.hpp"
//! @cond IGNORED
namespace cv { namespace cuda { namespace device
{
namespace transform_detail
{
//! Read Write Traits
template <typename T, typename D, int shift> struct UnaryReadWriteTraits
{
typedef typename TypeVec<T, shift>::vec_type read_type;
typedef typename TypeVec<D, shift>::vec_type write_type;
};
template <typename T1, typename T2, typename D, int shift> struct BinaryReadWriteTraits
{
typedef typename TypeVec<T1, shift>::vec_type read_type1;
typedef typename TypeVec<T2, shift>::vec_type read_type2;
typedef typename TypeVec<D, shift>::vec_type write_type;
};
//! Transform kernels
template <int shift> struct OpUnroller;
template <> struct OpUnroller<1>
{
template <typename T, typename D, typename UnOp, typename Mask>
static __device__ __forceinline__ void unroll(const T& src, D& dst, const Mask& mask, UnOp& op, int x_shifted, int y)
{
if (mask(y, x_shifted))
dst.x = op(src.x);
}
template <typename T1, typename T2, typename D, typename BinOp, typename Mask>
static __device__ __forceinline__ void unroll(const T1& src1, const T2& src2, D& dst, const Mask& mask, BinOp& op, int x_shifted, int y)
{
if (mask(y, x_shifted))
dst.x = op(src1.x, src2.x);
}
};
template <> struct OpUnroller<2>
{
template <typename T, typename D, typename UnOp, typename Mask>
static __device__ __forceinline__ void unroll(const T& src, D& dst, const Mask& mask, UnOp& op, int x_shifted, int y)
{
if (mask(y, x_shifted))
dst.x = op(src.x);
if (mask(y, x_shifted + 1))
dst.y = op(src.y);
}
template <typename T1, typename T2, typename D, typename BinOp, typename Mask>
static __device__ __forceinline__ void unroll(const T1& src1, const T2& src2, D& dst, const Mask& mask, BinOp& op, int x_shifted, int y)
{
if (mask(y, x_shifted))
dst.x = op(src1.x, src2.x);
if (mask(y, x_shifted + 1))
dst.y = op(src1.y, src2.y);
}
};
template <> struct OpUnroller<3>
{
template <typename T, typename D, typename UnOp, typename Mask>
static __device__ __forceinline__ void unroll(const T& src, D& dst, const Mask& mask, const UnOp& op, int x_shifted, int y)
{
if (mask(y, x_shifted))
dst.x = op(src.x);
if (mask(y, x_shifted + 1))
dst.y = op(src.y);
if (mask(y, x_shifted + 2))
dst.z = op(src.z);
}
template <typename T1, typename T2, typename D, typename BinOp, typename Mask>
static __device__ __forceinline__ void unroll(const T1& src1, const T2& src2, D& dst, const Mask& mask, const BinOp& op, int x_shifted, int y)
{
if (mask(y, x_shifted))
dst.x = op(src1.x, src2.x);
if (mask(y, x_shifted + 1))
dst.y = op(src1.y, src2.y);
if (mask(y, x_shifted + 2))
dst.z = op(src1.z, src2.z);
}
};
template <> struct OpUnroller<4>
{
template <typename T, typename D, typename UnOp, typename Mask>
static __device__ __forceinline__ void unroll(const T& src, D& dst, const Mask& mask, const UnOp& op, int x_shifted, int y)
{
if (mask(y, x_shifted))
dst.x = op(src.x);
if (mask(y, x_shifted + 1))
dst.y = op(src.y);
if (mask(y, x_shifted + 2))
dst.z = op(src.z);
if (mask(y, x_shifted + 3))
dst.w = op(src.w);
}
template <typename T1, typename T2, typename D, typename BinOp, typename Mask>
static __device__ __forceinline__ void unroll(const T1& src1, const T2& src2, D& dst, const Mask& mask, const BinOp& op, int x_shifted, int y)
{
if (mask(y, x_shifted))
dst.x = op(src1.x, src2.x);
if (mask(y, x_shifted + 1))
dst.y = op(src1.y, src2.y);
if (mask(y, x_shifted + 2))
dst.z = op(src1.z, src2.z);
if (mask(y, x_shifted + 3))
dst.w = op(src1.w, src2.w);
}
};
template <> struct OpUnroller<8>
{
template <typename T, typename D, typename UnOp, typename Mask>
static __device__ __forceinline__ void unroll(const T& src, D& dst, const Mask& mask, const UnOp& op, int x_shifted, int y)
{
if (mask(y, x_shifted))
dst.a0 = op(src.a0);
if (mask(y, x_shifted + 1))
dst.a1 = op(src.a1);
if (mask(y, x_shifted + 2))
dst.a2 = op(src.a2);
if (mask(y, x_shifted + 3))
dst.a3 = op(src.a3);
if (mask(y, x_shifted + 4))
dst.a4 = op(src.a4);
if (mask(y, x_shifted + 5))
dst.a5 = op(src.a5);
if (mask(y, x_shifted + 6))
dst.a6 = op(src.a6);
if (mask(y, x_shifted + 7))
dst.a7 = op(src.a7);
}
template <typename T1, typename T2, typename D, typename BinOp, typename Mask>
static __device__ __forceinline__ void unroll(const T1& src1, const T2& src2, D& dst, const Mask& mask, const BinOp& op, int x_shifted, int y)
{
if (mask(y, x_shifted))
dst.a0 = op(src1.a0, src2.a0);
if (mask(y, x_shifted + 1))
dst.a1 = op(src1.a1, src2.a1);
if (mask(y, x_shifted + 2))
dst.a2 = op(src1.a2, src2.a2);
if (mask(y, x_shifted + 3))
dst.a3 = op(src1.a3, src2.a3);
if (mask(y, x_shifted + 4))
dst.a4 = op(src1.a4, src2.a4);
if (mask(y, x_shifted + 5))
dst.a5 = op(src1.a5, src2.a5);
if (mask(y, x_shifted + 6))
dst.a6 = op(src1.a6, src2.a6);
if (mask(y, x_shifted + 7))
dst.a7 = op(src1.a7, src2.a7);
}
};
template <typename T, typename D, typename UnOp, typename Mask>
static __global__ void transformSmart(const PtrStepSz<T> src_, PtrStep<D> dst_, const Mask mask, const UnOp op)
{
typedef TransformFunctorTraits<UnOp> ft;
typedef typename UnaryReadWriteTraits<T, D, ft::smart_shift>::read_type read_type;
typedef typename UnaryReadWriteTraits<T, D, ft::smart_shift>::write_type write_type;
const int x = threadIdx.x + blockIdx.x * blockDim.x;
const int y = threadIdx.y + blockIdx.y * blockDim.y;
const int x_shifted = x * ft::smart_shift;
if (y < src_.rows)
{
const T* src = src_.ptr(y);
D* dst = dst_.ptr(y);
if (x_shifted + ft::smart_shift - 1 < src_.cols)
{
const read_type src_n_el = ((const read_type*)src)[x];
OpUnroller<ft::smart_shift>::unroll(src_n_el, ((write_type*)dst)[x], mask, op, x_shifted, y);
}
else
{
for (int real_x = x_shifted; real_x < src_.cols; ++real_x)
{
if (mask(y, real_x))
dst[real_x] = op(src[real_x]);
}
}
}
}
template <typename T, typename D, typename UnOp, typename Mask>
__global__ static void transformSimple(const PtrStepSz<T> src, PtrStep<D> dst, const Mask mask, const UnOp op)
{
const int x = blockDim.x * blockIdx.x + threadIdx.x;
const int y = blockDim.y * blockIdx.y + threadIdx.y;
if (x < src.cols && y < src.rows && mask(y, x))
{
dst.ptr(y)[x] = op(src.ptr(y)[x]);
}
}
template <typename T1, typename T2, typename D, typename BinOp, typename Mask>
static __global__ void transformSmart(const PtrStepSz<T1> src1_, const PtrStep<T2> src2_, PtrStep<D> dst_,
const Mask mask, const BinOp op)
{
typedef TransformFunctorTraits<BinOp> ft;
typedef typename BinaryReadWriteTraits<T1, T2, D, ft::smart_shift>::read_type1 read_type1;
typedef typename BinaryReadWriteTraits<T1, T2, D, ft::smart_shift>::read_type2 read_type2;
typedef typename BinaryReadWriteTraits<T1, T2, D, ft::smart_shift>::write_type write_type;
const int x = threadIdx.x + blockIdx.x * blockDim.x;
const int y = threadIdx.y + blockIdx.y * blockDim.y;
const int x_shifted = x * ft::smart_shift;
if (y < src1_.rows)
{
const T1* src1 = src1_.ptr(y);
const T2* src2 = src2_.ptr(y);
D* dst = dst_.ptr(y);
if (x_shifted + ft::smart_shift - 1 < src1_.cols)
{
const read_type1 src1_n_el = ((const read_type1*)src1)[x];
const read_type2 src2_n_el = ((const read_type2*)src2)[x];
OpUnroller<ft::smart_shift>::unroll(src1_n_el, src2_n_el, ((write_type*)dst)[x], mask, op, x_shifted, y);
}
else
{
for (int real_x = x_shifted; real_x < src1_.cols; ++real_x)
{
if (mask(y, real_x))
dst[real_x] = op(src1[real_x], src2[real_x]);
}
}
}
}
template <typename T1, typename T2, typename D, typename BinOp, typename Mask>
static __global__ void transformSimple(const PtrStepSz<T1> src1, const PtrStep<T2> src2, PtrStep<D> dst,
const Mask mask, const BinOp op)
{
const int x = blockDim.x * blockIdx.x + threadIdx.x;
const int y = blockDim.y * blockIdx.y + threadIdx.y;
if (x < src1.cols && y < src1.rows && mask(y, x))
{
const T1 src1_data = src1.ptr(y)[x];
const T2 src2_data = src2.ptr(y)[x];
dst.ptr(y)[x] = op(src1_data, src2_data);
}
}
template <bool UseSmart> struct TransformDispatcher;
template<> struct TransformDispatcher<false>
{
template <typename T, typename D, typename UnOp, typename Mask>
static void call(PtrStepSz<T> src, PtrStepSz<D> dst, UnOp op, Mask mask, cudaStream_t stream)
{
typedef TransformFunctorTraits<UnOp> ft;
const dim3 threads(ft::simple_block_dim_x, ft::simple_block_dim_y, 1);
const dim3 grid(divUp(src.cols, threads.x), divUp(src.rows, threads.y), 1);
transformSimple<T, D><<<grid, threads, 0, stream>>>(src, dst, mask, op);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
template <typename T1, typename T2, typename D, typename BinOp, typename Mask>
static void call(PtrStepSz<T1> src1, PtrStepSz<T2> src2, PtrStepSz<D> dst, BinOp op, Mask mask, cudaStream_t stream)
{
typedef TransformFunctorTraits<BinOp> ft;
const dim3 threads(ft::simple_block_dim_x, ft::simple_block_dim_y, 1);
const dim3 grid(divUp(src1.cols, threads.x), divUp(src1.rows, threads.y), 1);
transformSimple<T1, T2, D><<<grid, threads, 0, stream>>>(src1, src2, dst, mask, op);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
};
template<> struct TransformDispatcher<true>
{
template <typename T, typename D, typename UnOp, typename Mask>
static void call(PtrStepSz<T> src, PtrStepSz<D> dst, UnOp op, Mask mask, cudaStream_t stream)
{
typedef TransformFunctorTraits<UnOp> ft;
CV_StaticAssert(ft::smart_shift != 1, "");
if (!isAligned(src.data, ft::smart_shift * sizeof(T)) || !isAligned(src.step, ft::smart_shift * sizeof(T)) ||
!isAligned(dst.data, ft::smart_shift * sizeof(D)) || !isAligned(dst.step, ft::smart_shift * sizeof(D)))
{
TransformDispatcher<false>::call(src, dst, op, mask, stream);
return;
}
const dim3 threads(ft::smart_block_dim_x, ft::smart_block_dim_y, 1);
const dim3 grid(divUp(src.cols, threads.x * ft::smart_shift), divUp(src.rows, threads.y), 1);
transformSmart<T, D><<<grid, threads, 0, stream>>>(src, dst, mask, op);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
template <typename T1, typename T2, typename D, typename BinOp, typename Mask>
static void call(PtrStepSz<T1> src1, PtrStepSz<T2> src2, PtrStepSz<D> dst, BinOp op, Mask mask, cudaStream_t stream)
{
typedef TransformFunctorTraits<BinOp> ft;
CV_StaticAssert(ft::smart_shift != 1, "");
if (!isAligned(src1.data, ft::smart_shift * sizeof(T1)) || !isAligned(src1.step, ft::smart_shift * sizeof(T1)) ||
!isAligned(src2.data, ft::smart_shift * sizeof(T2)) || !isAligned(src2.step, ft::smart_shift * sizeof(T2)) ||
!isAligned(dst.data, ft::smart_shift * sizeof(D)) || !isAligned(dst.step, ft::smart_shift * sizeof(D)))
{
TransformDispatcher<false>::call(src1, src2, dst, op, mask, stream);
return;
}
const dim3 threads(ft::smart_block_dim_x, ft::smart_block_dim_y, 1);
const dim3 grid(divUp(src1.cols, threads.x * ft::smart_shift), divUp(src1.rows, threads.y), 1);
transformSmart<T1, T2, D><<<grid, threads, 0, stream>>>(src1, src2, dst, mask, op);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
};
} // namespace transform_detail
}}} // namespace cv { namespace cuda { namespace cudev
//! @endcond
#endif // OPENCV_CUDA_TRANSFORM_DETAIL_HPP

View File

@@ -0,0 +1,191 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef OPENCV_CUDA_TYPE_TRAITS_DETAIL_HPP
#define OPENCV_CUDA_TYPE_TRAITS_DETAIL_HPP
#include "../common.hpp"
#include "../vec_traits.hpp"
//! @cond IGNORED
namespace cv { namespace cuda { namespace device
{
namespace type_traits_detail
{
template <bool, typename T1, typename T2> struct Select { typedef T1 type; };
template <typename T1, typename T2> struct Select<false, T1, T2> { typedef T2 type; };
template <typename T> struct IsSignedIntergral { enum {value = 0}; };
template <> struct IsSignedIntergral<schar> { enum {value = 1}; };
template <> struct IsSignedIntergral<char1> { enum {value = 1}; };
template <> struct IsSignedIntergral<short> { enum {value = 1}; };
template <> struct IsSignedIntergral<short1> { enum {value = 1}; };
template <> struct IsSignedIntergral<int> { enum {value = 1}; };
template <> struct IsSignedIntergral<int1> { enum {value = 1}; };
template <typename T> struct IsUnsignedIntegral { enum {value = 0}; };
template <> struct IsUnsignedIntegral<uchar> { enum {value = 1}; };
template <> struct IsUnsignedIntegral<uchar1> { enum {value = 1}; };
template <> struct IsUnsignedIntegral<ushort> { enum {value = 1}; };
template <> struct IsUnsignedIntegral<ushort1> { enum {value = 1}; };
template <> struct IsUnsignedIntegral<uint> { enum {value = 1}; };
template <> struct IsUnsignedIntegral<uint1> { enum {value = 1}; };
template <typename T> struct IsIntegral { enum {value = IsSignedIntergral<T>::value || IsUnsignedIntegral<T>::value}; };
template <> struct IsIntegral<char> { enum {value = 1}; };
template <> struct IsIntegral<bool> { enum {value = 1}; };
template <typename T> struct IsFloat { enum {value = 0}; };
template <> struct IsFloat<float> { enum {value = 1}; };
template <> struct IsFloat<double> { enum {value = 1}; };
template <typename T> struct IsVec { enum {value = 0}; };
template <> struct IsVec<uchar1> { enum {value = 1}; };
template <> struct IsVec<uchar2> { enum {value = 1}; };
template <> struct IsVec<uchar3> { enum {value = 1}; };
template <> struct IsVec<uchar4> { enum {value = 1}; };
template <> struct IsVec<uchar8> { enum {value = 1}; };
template <> struct IsVec<char1> { enum {value = 1}; };
template <> struct IsVec<char2> { enum {value = 1}; };
template <> struct IsVec<char3> { enum {value = 1}; };
template <> struct IsVec<char4> { enum {value = 1}; };
template <> struct IsVec<char8> { enum {value = 1}; };
template <> struct IsVec<ushort1> { enum {value = 1}; };
template <> struct IsVec<ushort2> { enum {value = 1}; };
template <> struct IsVec<ushort3> { enum {value = 1}; };
template <> struct IsVec<ushort4> { enum {value = 1}; };
template <> struct IsVec<ushort8> { enum {value = 1}; };
template <> struct IsVec<short1> { enum {value = 1}; };
template <> struct IsVec<short2> { enum {value = 1}; };
template <> struct IsVec<short3> { enum {value = 1}; };
template <> struct IsVec<short4> { enum {value = 1}; };
template <> struct IsVec<short8> { enum {value = 1}; };
template <> struct IsVec<uint1> { enum {value = 1}; };
template <> struct IsVec<uint2> { enum {value = 1}; };
template <> struct IsVec<uint3> { enum {value = 1}; };
template <> struct IsVec<uint4> { enum {value = 1}; };
template <> struct IsVec<uint8> { enum {value = 1}; };
template <> struct IsVec<int1> { enum {value = 1}; };
template <> struct IsVec<int2> { enum {value = 1}; };
template <> struct IsVec<int3> { enum {value = 1}; };
template <> struct IsVec<int4> { enum {value = 1}; };
template <> struct IsVec<int8> { enum {value = 1}; };
template <> struct IsVec<float1> { enum {value = 1}; };
template <> struct IsVec<float2> { enum {value = 1}; };
template <> struct IsVec<float3> { enum {value = 1}; };
template <> struct IsVec<float4> { enum {value = 1}; };
template <> struct IsVec<float8> { enum {value = 1}; };
template <> struct IsVec<double1> { enum {value = 1}; };
template <> struct IsVec<double2> { enum {value = 1}; };
template <> struct IsVec<double3> { enum {value = 1}; };
template <> struct IsVec<double4> { enum {value = 1}; };
template <> struct IsVec<double8> { enum {value = 1}; };
template <class U> struct AddParameterType { typedef const U& type; };
template <class U> struct AddParameterType<U&> { typedef U& type; };
template <> struct AddParameterType<void> { typedef void type; };
template <class U> struct ReferenceTraits
{
enum { value = false };
typedef U type;
};
template <class U> struct ReferenceTraits<U&>
{
enum { value = true };
typedef U type;
};
template <class U> struct PointerTraits
{
enum { value = false };
typedef void type;
};
template <class U> struct PointerTraits<U*>
{
enum { value = true };
typedef U type;
};
template <class U> struct PointerTraits<U*&>
{
enum { value = true };
typedef U type;
};
template <class U> struct UnConst
{
typedef U type;
enum { value = 0 };
};
template <class U> struct UnConst<const U>
{
typedef U type;
enum { value = 1 };
};
template <class U> struct UnConst<const U&>
{
typedef U& type;
enum { value = 1 };
};
template <class U> struct UnVolatile
{
typedef U type;
enum { value = 0 };
};
template <class U> struct UnVolatile<volatile U>
{
typedef U type;
enum { value = 1 };
};
template <class U> struct UnVolatile<volatile U&>
{
typedef U& type;
enum { value = 1 };
};
} // namespace type_traits_detail
}}} // namespace cv { namespace cuda { namespace cudev
//! @endcond
#endif // OPENCV_CUDA_TYPE_TRAITS_DETAIL_HPP

View File

@@ -0,0 +1,121 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef OPENCV_CUDA_VEC_DISTANCE_DETAIL_HPP
#define OPENCV_CUDA_VEC_DISTANCE_DETAIL_HPP
#include "../datamov_utils.hpp"
//! @cond IGNORED
namespace cv { namespace cuda { namespace device
{
namespace vec_distance_detail
{
template <int THREAD_DIM, int N> struct UnrollVecDiffCached
{
template <typename Dist, typename T1, typename T2>
static __device__ void calcCheck(const T1* vecCached, const T2* vecGlob, int len, Dist& dist, int ind)
{
if (ind < len)
{
T1 val1 = *vecCached++;
T2 val2;
ForceGlob<T2>::Load(vecGlob, ind, val2);
dist.reduceIter(val1, val2);
UnrollVecDiffCached<THREAD_DIM, N - 1>::calcCheck(vecCached, vecGlob, len, dist, ind + THREAD_DIM);
}
}
template <typename Dist, typename T1, typename T2>
static __device__ void calcWithoutCheck(const T1* vecCached, const T2* vecGlob, Dist& dist)
{
T1 val1 = *vecCached++;
T2 val2;
ForceGlob<T2>::Load(vecGlob, 0, val2);
vecGlob += THREAD_DIM;
dist.reduceIter(val1, val2);
UnrollVecDiffCached<THREAD_DIM, N - 1>::calcWithoutCheck(vecCached, vecGlob, dist);
}
};
template <int THREAD_DIM> struct UnrollVecDiffCached<THREAD_DIM, 0>
{
template <typename Dist, typename T1, typename T2>
static __device__ __forceinline__ void calcCheck(const T1*, const T2*, int, Dist&, int)
{
}
template <typename Dist, typename T1, typename T2>
static __device__ __forceinline__ void calcWithoutCheck(const T1*, const T2*, Dist&)
{
}
};
template <int THREAD_DIM, int MAX_LEN, bool LEN_EQ_MAX_LEN> struct VecDiffCachedCalculator;
template <int THREAD_DIM, int MAX_LEN> struct VecDiffCachedCalculator<THREAD_DIM, MAX_LEN, false>
{
template <typename Dist, typename T1, typename T2>
static __device__ __forceinline__ void calc(const T1* vecCached, const T2* vecGlob, int len, Dist& dist, int tid)
{
UnrollVecDiffCached<THREAD_DIM, MAX_LEN / THREAD_DIM>::calcCheck(vecCached, vecGlob, len, dist, tid);
}
};
template <int THREAD_DIM, int MAX_LEN> struct VecDiffCachedCalculator<THREAD_DIM, MAX_LEN, true>
{
template <typename Dist, typename T1, typename T2>
static __device__ __forceinline__ void calc(const T1* vecCached, const T2* vecGlob, int len, Dist& dist, int tid)
{
UnrollVecDiffCached<THREAD_DIM, MAX_LEN / THREAD_DIM>::calcWithoutCheck(vecCached, vecGlob + tid, dist);
}
};
} // namespace vec_distance_detail
}}} // namespace cv { namespace cuda { namespace cudev
//! @endcond
#endif // OPENCV_CUDA_VEC_DISTANCE_DETAIL_HPP

View File

@@ -0,0 +1,88 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef OPENCV_CUDA_DYNAMIC_SMEM_HPP
#define OPENCV_CUDA_DYNAMIC_SMEM_HPP
/** @file
* @deprecated Use @ref cudev instead.
*/
//! @cond IGNORED
namespace cv { namespace cuda { namespace device
{
template<class T> struct DynamicSharedMem
{
__device__ __forceinline__ operator T*()
{
extern __shared__ int __smem[];
return (T*)__smem;
}
__device__ __forceinline__ operator const T*() const
{
extern __shared__ int __smem[];
return (T*)__smem;
}
};
// specialize for double to avoid unaligned memory access compile errors
template<> struct DynamicSharedMem<double>
{
__device__ __forceinline__ operator double*()
{
extern __shared__ double __smem_d[];
return (double*)__smem_d;
}
__device__ __forceinline__ operator const double*() const
{
extern __shared__ double __smem_d[];
return (double*)__smem_d;
}
};
}}}
//! @endcond
#endif // OPENCV_CUDA_DYNAMIC_SMEM_HPP

View File

@@ -0,0 +1,269 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef OPENCV_CUDA_EMULATION_HPP_
#define OPENCV_CUDA_EMULATION_HPP_
#include "common.hpp"
#include "warp_reduce.hpp"
/** @file
* @deprecated Use @ref cudev instead.
*/
//! @cond IGNORED
namespace cv { namespace cuda { namespace device
{
struct Emulation
{
static __device__ __forceinline__ int syncthreadsOr(int pred)
{
#if defined (__CUDA_ARCH__) && (__CUDA_ARCH__ < 200)
// just campilation stab
return 0;
#else
return __syncthreads_or(pred);
#endif
}
template<int CTA_SIZE>
static __forceinline__ __device__ int Ballot(int predicate)
{
#if defined (__CUDA_ARCH__) && (__CUDA_ARCH__ >= 200)
return __ballot(predicate);
#else
__shared__ volatile int cta_buffer[CTA_SIZE];
int tid = threadIdx.x;
cta_buffer[tid] = predicate ? (1 << (tid & 31)) : 0;
return warp_reduce(cta_buffer);
#endif
}
struct smem
{
enum { TAG_MASK = (1U << ( (sizeof(unsigned int) << 3) - 5U)) - 1U };
template<typename T>
static __device__ __forceinline__ T atomicInc(T* address, T val)
{
#if defined (__CUDA_ARCH__) && (__CUDA_ARCH__ < 120)
T count;
unsigned int tag = threadIdx.x << ( (sizeof(unsigned int) << 3) - 5U);
do
{
count = *address & TAG_MASK;
count = tag | (count + 1);
*address = count;
} while (*address != count);
return (count & TAG_MASK) - 1;
#else
return ::atomicInc(address, val);
#endif
}
template<typename T>
static __device__ __forceinline__ T atomicAdd(T* address, T val)
{
#if defined (__CUDA_ARCH__) && (__CUDA_ARCH__ < 120)
T count;
unsigned int tag = threadIdx.x << ( (sizeof(unsigned int) << 3) - 5U);
do
{
count = *address & TAG_MASK;
count = tag | (count + val);
*address = count;
} while (*address != count);
return (count & TAG_MASK) - val;
#else
return ::atomicAdd(address, val);
#endif
}
template<typename T>
static __device__ __forceinline__ T atomicMin(T* address, T val)
{
#if defined (__CUDA_ARCH__) && (__CUDA_ARCH__ < 120)
T count = ::min(*address, val);
do
{
*address = count;
} while (*address > count);
return count;
#else
return ::atomicMin(address, val);
#endif
}
}; // struct cmem
struct glob
{
static __device__ __forceinline__ int atomicAdd(int* address, int val)
{
return ::atomicAdd(address, val);
}
static __device__ __forceinline__ unsigned int atomicAdd(unsigned int* address, unsigned int val)
{
return ::atomicAdd(address, val);
}
static __device__ __forceinline__ float atomicAdd(float* address, float val)
{
#if __CUDA_ARCH__ >= 200
return ::atomicAdd(address, val);
#else
int* address_as_i = (int*) address;
int old = *address_as_i, assumed;
do {
assumed = old;
old = ::atomicCAS(address_as_i, assumed,
__float_as_int(val + __int_as_float(assumed)));
} while (assumed != old);
return __int_as_float(old);
#endif
}
static __device__ __forceinline__ double atomicAdd(double* address, double val)
{
#if __CUDA_ARCH__ >= 130
unsigned long long int* address_as_ull = (unsigned long long int*) address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = ::atomicCAS(address_as_ull, assumed,
__double_as_longlong(val + __longlong_as_double(assumed)));
} while (assumed != old);
return __longlong_as_double(old);
#else
CV_UNUSED(address);
CV_UNUSED(val);
return 0.0;
#endif
}
static __device__ __forceinline__ int atomicMin(int* address, int val)
{
return ::atomicMin(address, val);
}
static __device__ __forceinline__ float atomicMin(float* address, float val)
{
#if __CUDA_ARCH__ >= 120
int* address_as_i = (int*) address;
int old = *address_as_i, assumed;
do {
assumed = old;
old = ::atomicCAS(address_as_i, assumed,
__float_as_int(::fminf(val, __int_as_float(assumed))));
} while (assumed != old);
return __int_as_float(old);
#else
CV_UNUSED(address);
CV_UNUSED(val);
return 0.0f;
#endif
}
static __device__ __forceinline__ double atomicMin(double* address, double val)
{
#if __CUDA_ARCH__ >= 130
unsigned long long int* address_as_ull = (unsigned long long int*) address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = ::atomicCAS(address_as_ull, assumed,
__double_as_longlong(::fmin(val, __longlong_as_double(assumed))));
} while (assumed != old);
return __longlong_as_double(old);
#else
CV_UNUSED(address);
CV_UNUSED(val);
return 0.0;
#endif
}
static __device__ __forceinline__ int atomicMax(int* address, int val)
{
return ::atomicMax(address, val);
}
static __device__ __forceinline__ float atomicMax(float* address, float val)
{
#if __CUDA_ARCH__ >= 120
int* address_as_i = (int*) address;
int old = *address_as_i, assumed;
do {
assumed = old;
old = ::atomicCAS(address_as_i, assumed,
__float_as_int(::fmaxf(val, __int_as_float(assumed))));
} while (assumed != old);
return __int_as_float(old);
#else
CV_UNUSED(address);
CV_UNUSED(val);
return 0.0f;
#endif
}
static __device__ __forceinline__ double atomicMax(double* address, double val)
{
#if __CUDA_ARCH__ >= 130
unsigned long long int* address_as_ull = (unsigned long long int*) address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = ::atomicCAS(address_as_ull, assumed,
__double_as_longlong(::fmax(val, __longlong_as_double(assumed))));
} while (assumed != old);
return __longlong_as_double(old);
#else
CV_UNUSED(address);
CV_UNUSED(val);
return 0.0;
#endif
}
};
}; //struct Emulation
}}} // namespace cv { namespace cuda { namespace cudev
//! @endcond
#endif /* OPENCV_CUDA_EMULATION_HPP_ */

View File

@@ -0,0 +1,293 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef OPENCV_CUDA_FILTERS_HPP
#define OPENCV_CUDA_FILTERS_HPP
#include "saturate_cast.hpp"
#include "vec_traits.hpp"
#include "vec_math.hpp"
#include "type_traits.hpp"
#include "nppdefs.h"
/** @file
* @deprecated Use @ref cudev instead.
*/
//! @cond IGNORED
namespace cv { namespace cuda { namespace device
{
template <typename Ptr2D> struct PointFilter
{
typedef typename Ptr2D::elem_type elem_type;
typedef float index_type;
explicit __host__ __device__ __forceinline__ PointFilter(const Ptr2D& src_, float fx = 0.f, float fy = 0.f)
: src(src_)
{
CV_UNUSED(fx);
CV_UNUSED(fy);
}
__device__ __forceinline__ elem_type operator ()(float y, float x) const
{
return src(__float2int_rz(y), __float2int_rz(x));
}
Ptr2D src;
};
template <typename Ptr2D> struct LinearFilter
{
typedef typename Ptr2D::elem_type elem_type;
typedef float index_type;
explicit __host__ __device__ __forceinline__ LinearFilter(const Ptr2D& src_, float fx = 0.f, float fy = 0.f)
: src(src_)
{
CV_UNUSED(fx);
CV_UNUSED(fy);
}
__device__ __forceinline__ elem_type operator ()(float y, float x) const
{
typedef typename TypeVec<float, VecTraits<elem_type>::cn>::vec_type work_type;
work_type out = VecTraits<work_type>::all(0);
const int x1 = __float2int_rd(x);
const int y1 = __float2int_rd(y);
if (x1 <= NPP_MIN_32S || x1 >= NPP_MAX_32S || y1 <= NPP_MIN_32S || y1 >= NPP_MAX_32S)
{
elem_type src_reg = src(y1, x1);
out = out + src_reg * 1.0f;
return saturate_cast<elem_type>(out);
}
const int x2 = x1 + 1;
const int y2 = y1 + 1;
elem_type src_reg = src(y1, x1);
out = out + src_reg * ((x2 - x) * (y2 - y));
src_reg = src(y1, x2);
out = out + src_reg * ((x - x1) * (y2 - y));
src_reg = src(y2, x1);
out = out + src_reg * ((x2 - x) * (y - y1));
src_reg = src(y2, x2);
out = out + src_reg * ((x - x1) * (y - y1));
return saturate_cast<elem_type>(out);
}
Ptr2D src;
};
template <typename Ptr2D> struct CubicFilter
{
typedef typename Ptr2D::elem_type elem_type;
typedef float index_type;
typedef typename TypeVec<float, VecTraits<elem_type>::cn>::vec_type work_type;
explicit __host__ __device__ __forceinline__ CubicFilter(const Ptr2D& src_, float fx = 0.f, float fy = 0.f)
: src(src_)
{
CV_UNUSED(fx);
CV_UNUSED(fy);
}
static __device__ __forceinline__ float bicubicCoeff(float x_)
{
float x = fabsf(x_);
if (x <= 1.0f)
{
return x * x * (1.5f * x - 2.5f) + 1.0f;
}
else if (x < 2.0f)
{
return x * (x * (-0.5f * x + 2.5f) - 4.0f) + 2.0f;
}
else
{
return 0.0f;
}
}
__device__ elem_type operator ()(float y, float x) const
{
const float xmin = ::ceilf(x - 2.0f);
const float xmax = ::floorf(x + 2.0f);
const float ymin = ::ceilf(y - 2.0f);
const float ymax = ::floorf(y + 2.0f);
work_type sum = VecTraits<work_type>::all(0);
float wsum = 0.0f;
for (float cy = ymin; cy <= ymax; cy += 1.0f)
{
for (float cx = xmin; cx <= xmax; cx += 1.0f)
{
const float w = bicubicCoeff(x - cx) * bicubicCoeff(y - cy);
sum = sum + w * src(__float2int_rd(cy), __float2int_rd(cx));
wsum += w;
}
}
work_type res = (!wsum)? VecTraits<work_type>::all(0) : sum / wsum;
return saturate_cast<elem_type>(res);
}
Ptr2D src;
};
// for integer scaling
template <typename Ptr2D> struct IntegerAreaFilter
{
typedef typename Ptr2D::elem_type elem_type;
typedef float index_type;
explicit __host__ __device__ __forceinline__ IntegerAreaFilter(const Ptr2D& src_, float scale_x_, float scale_y_)
: src(src_), scale_x(scale_x_), scale_y(scale_y_), scale(1.f / (scale_x * scale_y)) {}
__device__ __forceinline__ elem_type operator ()(float y, float x) const
{
float fsx1 = x * scale_x;
float fsx2 = fsx1 + scale_x;
int sx1 = __float2int_ru(fsx1);
int sx2 = __float2int_rd(fsx2);
float fsy1 = y * scale_y;
float fsy2 = fsy1 + scale_y;
int sy1 = __float2int_ru(fsy1);
int sy2 = __float2int_rd(fsy2);
typedef typename TypeVec<float, VecTraits<elem_type>::cn>::vec_type work_type;
work_type out = VecTraits<work_type>::all(0.f);
for(int dy = sy1; dy < sy2; ++dy)
for(int dx = sx1; dx < sx2; ++dx)
{
out = out + src(dy, dx) * scale;
}
return saturate_cast<elem_type>(out);
}
Ptr2D src;
float scale_x, scale_y ,scale;
};
template <typename Ptr2D> struct AreaFilter
{
typedef typename Ptr2D::elem_type elem_type;
typedef float index_type;
explicit __host__ __device__ __forceinline__ AreaFilter(const Ptr2D& src_, float scale_x_, float scale_y_)
: src(src_), scale_x(scale_x_), scale_y(scale_y_){}
__device__ __forceinline__ elem_type operator ()(float y, float x) const
{
float fsx1 = x * scale_x;
float fsx2 = fsx1 + scale_x;
int sx1 = __float2int_ru(fsx1);
int sx2 = __float2int_rd(fsx2);
float fsy1 = y * scale_y;
float fsy2 = fsy1 + scale_y;
int sy1 = __float2int_ru(fsy1);
int sy2 = __float2int_rd(fsy2);
float scale = 1.f / (fminf(scale_x, src.width - fsx1) * fminf(scale_y, src.height - fsy1));
typedef typename TypeVec<float, VecTraits<elem_type>::cn>::vec_type work_type;
work_type out = VecTraits<work_type>::all(0.f);
for (int dy = sy1; dy < sy2; ++dy)
{
for (int dx = sx1; dx < sx2; ++dx)
out = out + src(dy, dx) * scale;
if (sx1 > fsx1)
out = out + src(dy, (sx1 -1) ) * ((sx1 - fsx1) * scale);
if (sx2 < fsx2)
out = out + src(dy, sx2) * ((fsx2 -sx2) * scale);
}
if (sy1 > fsy1)
for (int dx = sx1; dx < sx2; ++dx)
out = out + src( (sy1 - 1) , dx) * ((sy1 -fsy1) * scale);
if (sy2 < fsy2)
for (int dx = sx1; dx < sx2; ++dx)
out = out + src(sy2, dx) * ((fsy2 -sy2) * scale);
if ((sy1 > fsy1) && (sx1 > fsx1))
out = out + src( (sy1 - 1) , (sx1 - 1)) * ((sy1 -fsy1) * (sx1 -fsx1) * scale);
if ((sy1 > fsy1) && (sx2 < fsx2))
out = out + src( (sy1 - 1) , sx2) * ((sy1 -fsy1) * (fsx2 -sx2) * scale);
if ((sy2 < fsy2) && (sx2 < fsx2))
out = out + src(sy2, sx2) * ((fsy2 -sy2) * (fsx2 -sx2) * scale);
if ((sy2 < fsy2) && (sx1 > fsx1))
out = out + src(sy2, (sx1 - 1)) * ((fsy2 -sy2) * (sx1 -fsx1) * scale);
return saturate_cast<elem_type>(out);
}
Ptr2D src;
float scale_x, scale_y;
int width, haight;
};
}}} // namespace cv { namespace cuda { namespace cudev
//! @endcond
#endif // OPENCV_CUDA_FILTERS_HPP

View File

@@ -0,0 +1,79 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef OPENCV_CUDA_DEVICE_FUNCATTRIB_HPP
#define OPENCV_CUDA_DEVICE_FUNCATTRIB_HPP
#include <cstdio>
/** @file
* @deprecated Use @ref cudev instead.
*/
//! @cond IGNORED
namespace cv { namespace cuda { namespace device
{
template<class Func>
void printFuncAttrib(Func& func)
{
cudaFuncAttributes attrs;
cudaFuncGetAttributes(&attrs, func);
printf("=== Function stats ===\n");
printf("Name: \n");
printf("sharedSizeBytes = %d\n", attrs.sharedSizeBytes);
printf("constSizeBytes = %d\n", attrs.constSizeBytes);
printf("localSizeBytes = %d\n", attrs.localSizeBytes);
printf("maxThreadsPerBlock = %d\n", attrs.maxThreadsPerBlock);
printf("numRegs = %d\n", attrs.numRegs);
printf("ptxVersion = %d\n", attrs.ptxVersion);
printf("binaryVersion = %d\n", attrs.binaryVersion);
printf("\n");
fflush(stdout);
}
}}} // namespace cv { namespace cuda { namespace cudev
//! @endcond
#endif /* OPENCV_CUDA_DEVICE_FUNCATTRIB_HPP */

View File

@@ -0,0 +1,805 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef OPENCV_CUDA_FUNCTIONAL_HPP
#define OPENCV_CUDA_FUNCTIONAL_HPP
#include <functional>
#include "saturate_cast.hpp"
#include "vec_traits.hpp"
#include "type_traits.hpp"
/** @file
* @deprecated Use @ref cudev instead.
*/
//! @cond IGNORED
namespace cv { namespace cuda { namespace device
{
// Function Objects
template<typename Argument, typename Result> struct unary_function
{
typedef Argument argument_type;
typedef Result result_type;
};
template<typename Argument1, typename Argument2, typename Result> struct binary_function
{
typedef Argument1 first_argument_type;
typedef Argument2 second_argument_type;
typedef Result result_type;
};
// Arithmetic Operations
template <typename T> struct plus : binary_function<T, T, T>
{
__device__ __forceinline__ T operator ()(typename TypeTraits<T>::ParameterType a,
typename TypeTraits<T>::ParameterType b) const
{
return a + b;
}
__host__ __device__ __forceinline__ plus() {}
__host__ __device__ __forceinline__ plus(const plus&) {}
};
template <typename T> struct minus : binary_function<T, T, T>
{
__device__ __forceinline__ T operator ()(typename TypeTraits<T>::ParameterType a,
typename TypeTraits<T>::ParameterType b) const
{
return a - b;
}
__host__ __device__ __forceinline__ minus() {}
__host__ __device__ __forceinline__ minus(const minus&) {}
};
template <typename T> struct multiplies : binary_function<T, T, T>
{
__device__ __forceinline__ T operator ()(typename TypeTraits<T>::ParameterType a,
typename TypeTraits<T>::ParameterType b) const
{
return a * b;
}
__host__ __device__ __forceinline__ multiplies() {}
__host__ __device__ __forceinline__ multiplies(const multiplies&) {}
};
template <typename T> struct divides : binary_function<T, T, T>
{
__device__ __forceinline__ T operator ()(typename TypeTraits<T>::ParameterType a,
typename TypeTraits<T>::ParameterType b) const
{
return a / b;
}
__host__ __device__ __forceinline__ divides() {}
__host__ __device__ __forceinline__ divides(const divides&) {}
};
template <typename T> struct modulus : binary_function<T, T, T>
{
__device__ __forceinline__ T operator ()(typename TypeTraits<T>::ParameterType a,
typename TypeTraits<T>::ParameterType b) const
{
return a % b;
}
__host__ __device__ __forceinline__ modulus() {}
__host__ __device__ __forceinline__ modulus(const modulus&) {}
};
template <typename T> struct negate : unary_function<T, T>
{
__device__ __forceinline__ T operator ()(typename TypeTraits<T>::ParameterType a) const
{
return -a;
}
__host__ __device__ __forceinline__ negate() {}
__host__ __device__ __forceinline__ negate(const negate&) {}
};
// Comparison Operations
template <typename T> struct equal_to : binary_function<T, T, bool>
{
__device__ __forceinline__ bool operator ()(typename TypeTraits<T>::ParameterType a,
typename TypeTraits<T>::ParameterType b) const
{
return a == b;
}
__host__ __device__ __forceinline__ equal_to() {}
__host__ __device__ __forceinline__ equal_to(const equal_to&) {}
};
template <typename T> struct not_equal_to : binary_function<T, T, bool>
{
__device__ __forceinline__ bool operator ()(typename TypeTraits<T>::ParameterType a,
typename TypeTraits<T>::ParameterType b) const
{
return a != b;
}
__host__ __device__ __forceinline__ not_equal_to() {}
__host__ __device__ __forceinline__ not_equal_to(const not_equal_to&) {}
};
template <typename T> struct greater : binary_function<T, T, bool>
{
__device__ __forceinline__ bool operator ()(typename TypeTraits<T>::ParameterType a,
typename TypeTraits<T>::ParameterType b) const
{
return a > b;
}
__host__ __device__ __forceinline__ greater() {}
__host__ __device__ __forceinline__ greater(const greater&) {}
};
template <typename T> struct less : binary_function<T, T, bool>
{
__device__ __forceinline__ bool operator ()(typename TypeTraits<T>::ParameterType a,
typename TypeTraits<T>::ParameterType b) const
{
return a < b;
}
__host__ __device__ __forceinline__ less() {}
__host__ __device__ __forceinline__ less(const less&) {}
};
template <typename T> struct greater_equal : binary_function<T, T, bool>
{
__device__ __forceinline__ bool operator ()(typename TypeTraits<T>::ParameterType a,
typename TypeTraits<T>::ParameterType b) const
{
return a >= b;
}
__host__ __device__ __forceinline__ greater_equal() {}
__host__ __device__ __forceinline__ greater_equal(const greater_equal&) {}
};
template <typename T> struct less_equal : binary_function<T, T, bool>
{
__device__ __forceinline__ bool operator ()(typename TypeTraits<T>::ParameterType a,
typename TypeTraits<T>::ParameterType b) const
{
return a <= b;
}
__host__ __device__ __forceinline__ less_equal() {}
__host__ __device__ __forceinline__ less_equal(const less_equal&) {}
};
// Logical Operations
template <typename T> struct logical_and : binary_function<T, T, bool>
{
__device__ __forceinline__ bool operator ()(typename TypeTraits<T>::ParameterType a,
typename TypeTraits<T>::ParameterType b) const
{
return a && b;
}
__host__ __device__ __forceinline__ logical_and() {}
__host__ __device__ __forceinline__ logical_and(const logical_and&) {}
};
template <typename T> struct logical_or : binary_function<T, T, bool>
{
__device__ __forceinline__ bool operator ()(typename TypeTraits<T>::ParameterType a,
typename TypeTraits<T>::ParameterType b) const
{
return a || b;
}
__host__ __device__ __forceinline__ logical_or() {}
__host__ __device__ __forceinline__ logical_or(const logical_or&) {}
};
template <typename T> struct logical_not : unary_function<T, bool>
{
__device__ __forceinline__ bool operator ()(typename TypeTraits<T>::ParameterType a) const
{
return !a;
}
__host__ __device__ __forceinline__ logical_not() {}
__host__ __device__ __forceinline__ logical_not(const logical_not&) {}
};
// Bitwise Operations
template <typename T> struct bit_and : binary_function<T, T, T>
{
__device__ __forceinline__ T operator ()(typename TypeTraits<T>::ParameterType a,
typename TypeTraits<T>::ParameterType b) const
{
return a & b;
}
__host__ __device__ __forceinline__ bit_and() {}
__host__ __device__ __forceinline__ bit_and(const bit_and&) {}
};
template <typename T> struct bit_or : binary_function<T, T, T>
{
__device__ __forceinline__ T operator ()(typename TypeTraits<T>::ParameterType a,
typename TypeTraits<T>::ParameterType b) const
{
return a | b;
}
__host__ __device__ __forceinline__ bit_or() {}
__host__ __device__ __forceinline__ bit_or(const bit_or&) {}
};
template <typename T> struct bit_xor : binary_function<T, T, T>
{
__device__ __forceinline__ T operator ()(typename TypeTraits<T>::ParameterType a,
typename TypeTraits<T>::ParameterType b) const
{
return a ^ b;
}
__host__ __device__ __forceinline__ bit_xor() {}
__host__ __device__ __forceinline__ bit_xor(const bit_xor&) {}
};
template <typename T> struct bit_not : unary_function<T, T>
{
__device__ __forceinline__ T operator ()(typename TypeTraits<T>::ParameterType v) const
{
return ~v;
}
__host__ __device__ __forceinline__ bit_not() {}
__host__ __device__ __forceinline__ bit_not(const bit_not&) {}
};
// Generalized Identity Operations
template <typename T> struct identity : unary_function<T, T>
{
__device__ __forceinline__ typename TypeTraits<T>::ParameterType operator()(typename TypeTraits<T>::ParameterType x) const
{
return x;
}
__host__ __device__ __forceinline__ identity() {}
__host__ __device__ __forceinline__ identity(const identity&) {}
};
template <typename T1, typename T2> struct project1st : binary_function<T1, T2, T1>
{
__device__ __forceinline__ typename TypeTraits<T1>::ParameterType operator()(typename TypeTraits<T1>::ParameterType lhs, typename TypeTraits<T2>::ParameterType rhs) const
{
return lhs;
}
__host__ __device__ __forceinline__ project1st() {}
__host__ __device__ __forceinline__ project1st(const project1st&) {}
};
template <typename T1, typename T2> struct project2nd : binary_function<T1, T2, T2>
{
__device__ __forceinline__ typename TypeTraits<T2>::ParameterType operator()(typename TypeTraits<T1>::ParameterType lhs, typename TypeTraits<T2>::ParameterType rhs) const
{
return rhs;
}
__host__ __device__ __forceinline__ project2nd() {}
__host__ __device__ __forceinline__ project2nd(const project2nd&) {}
};
// Min/Max Operations
#define OPENCV_CUDA_IMPLEMENT_MINMAX(name, type, op) \
template <> struct name<type> : binary_function<type, type, type> \
{ \
__device__ __forceinline__ type operator()(type lhs, type rhs) const {return op(lhs, rhs);} \
__host__ __device__ __forceinline__ name() {}\
__host__ __device__ __forceinline__ name(const name&) {}\
};
template <typename T> struct maximum : binary_function<T, T, T>
{
__device__ __forceinline__ T operator()(typename TypeTraits<T>::ParameterType lhs, typename TypeTraits<T>::ParameterType rhs) const
{
return max(lhs, rhs);
}
__host__ __device__ __forceinline__ maximum() {}
__host__ __device__ __forceinline__ maximum(const maximum&) {}
};
OPENCV_CUDA_IMPLEMENT_MINMAX(maximum, uchar, ::max)
OPENCV_CUDA_IMPLEMENT_MINMAX(maximum, schar, ::max)
OPENCV_CUDA_IMPLEMENT_MINMAX(maximum, char, ::max)
OPENCV_CUDA_IMPLEMENT_MINMAX(maximum, ushort, ::max)
OPENCV_CUDA_IMPLEMENT_MINMAX(maximum, short, ::max)
OPENCV_CUDA_IMPLEMENT_MINMAX(maximum, int, ::max)
OPENCV_CUDA_IMPLEMENT_MINMAX(maximum, uint, ::max)
OPENCV_CUDA_IMPLEMENT_MINMAX(maximum, float, ::fmax)
OPENCV_CUDA_IMPLEMENT_MINMAX(maximum, double, ::fmax)
template <typename T> struct minimum : binary_function<T, T, T>
{
__device__ __forceinline__ T operator()(typename TypeTraits<T>::ParameterType lhs, typename TypeTraits<T>::ParameterType rhs) const
{
return min(lhs, rhs);
}
__host__ __device__ __forceinline__ minimum() {}
__host__ __device__ __forceinline__ minimum(const minimum&) {}
};
OPENCV_CUDA_IMPLEMENT_MINMAX(minimum, uchar, ::min)
OPENCV_CUDA_IMPLEMENT_MINMAX(minimum, schar, ::min)
OPENCV_CUDA_IMPLEMENT_MINMAX(minimum, char, ::min)
OPENCV_CUDA_IMPLEMENT_MINMAX(minimum, ushort, ::min)
OPENCV_CUDA_IMPLEMENT_MINMAX(minimum, short, ::min)
OPENCV_CUDA_IMPLEMENT_MINMAX(minimum, int, ::min)
OPENCV_CUDA_IMPLEMENT_MINMAX(minimum, uint, ::min)
OPENCV_CUDA_IMPLEMENT_MINMAX(minimum, float, ::fmin)
OPENCV_CUDA_IMPLEMENT_MINMAX(minimum, double, ::fmin)
#undef OPENCV_CUDA_IMPLEMENT_MINMAX
// Math functions
template <typename T> struct abs_func : unary_function<T, T>
{
__device__ __forceinline__ T operator ()(typename TypeTraits<T>::ParameterType x) const
{
return abs(x);
}
__host__ __device__ __forceinline__ abs_func() {}
__host__ __device__ __forceinline__ abs_func(const abs_func&) {}
};
template <> struct abs_func<unsigned char> : unary_function<unsigned char, unsigned char>
{
__device__ __forceinline__ unsigned char operator ()(unsigned char x) const
{
return x;
}
__host__ __device__ __forceinline__ abs_func() {}
__host__ __device__ __forceinline__ abs_func(const abs_func&) {}
};
template <> struct abs_func<signed char> : unary_function<signed char, signed char>
{
__device__ __forceinline__ signed char operator ()(signed char x) const
{
return ::abs((int)x);
}
__host__ __device__ __forceinline__ abs_func() {}
__host__ __device__ __forceinline__ abs_func(const abs_func&) {}
};
template <> struct abs_func<char> : unary_function<char, char>
{
__device__ __forceinline__ char operator ()(char x) const
{
return ::abs((int)x);
}
__host__ __device__ __forceinline__ abs_func() {}
__host__ __device__ __forceinline__ abs_func(const abs_func&) {}
};
template <> struct abs_func<unsigned short> : unary_function<unsigned short, unsigned short>
{
__device__ __forceinline__ unsigned short operator ()(unsigned short x) const
{
return x;
}
__host__ __device__ __forceinline__ abs_func() {}
__host__ __device__ __forceinline__ abs_func(const abs_func&) {}
};
template <> struct abs_func<short> : unary_function<short, short>
{
__device__ __forceinline__ short operator ()(short x) const
{
return ::abs((int)x);
}
__host__ __device__ __forceinline__ abs_func() {}
__host__ __device__ __forceinline__ abs_func(const abs_func&) {}
};
template <> struct abs_func<unsigned int> : unary_function<unsigned int, unsigned int>
{
__device__ __forceinline__ unsigned int operator ()(unsigned int x) const
{
return x;
}
__host__ __device__ __forceinline__ abs_func() {}
__host__ __device__ __forceinline__ abs_func(const abs_func&) {}
};
template <> struct abs_func<int> : unary_function<int, int>
{
__device__ __forceinline__ int operator ()(int x) const
{
return ::abs(x);
}
__host__ __device__ __forceinline__ abs_func() {}
__host__ __device__ __forceinline__ abs_func(const abs_func&) {}
};
template <> struct abs_func<float> : unary_function<float, float>
{
__device__ __forceinline__ float operator ()(float x) const
{
return ::fabsf(x);
}
__host__ __device__ __forceinline__ abs_func() {}
__host__ __device__ __forceinline__ abs_func(const abs_func&) {}
};
template <> struct abs_func<double> : unary_function<double, double>
{
__device__ __forceinline__ double operator ()(double x) const
{
return ::fabs(x);
}
__host__ __device__ __forceinline__ abs_func() {}
__host__ __device__ __forceinline__ abs_func(const abs_func&) {}
};
#define OPENCV_CUDA_IMPLEMENT_UN_FUNCTOR(name, func) \
template <typename T> struct name ## _func : unary_function<T, float> \
{ \
__device__ __forceinline__ float operator ()(typename TypeTraits<T>::ParameterType v) const \
{ \
return func ## f(v); \
} \
__host__ __device__ __forceinline__ name ## _func() {} \
__host__ __device__ __forceinline__ name ## _func(const name ## _func&) {} \
}; \
template <> struct name ## _func<double> : unary_function<double, double> \
{ \
__device__ __forceinline__ double operator ()(double v) const \
{ \
return func(v); \
} \
__host__ __device__ __forceinline__ name ## _func() {} \
__host__ __device__ __forceinline__ name ## _func(const name ## _func&) {} \
};
#define OPENCV_CUDA_IMPLEMENT_BIN_FUNCTOR(name, func) \
template <typename T> struct name ## _func : binary_function<T, T, float> \
{ \
__device__ __forceinline__ float operator ()(typename TypeTraits<T>::ParameterType v1, typename TypeTraits<T>::ParameterType v2) const \
{ \
return func ## f(v1, v2); \
} \
__host__ __device__ __forceinline__ name ## _func() {} \
__host__ __device__ __forceinline__ name ## _func(const name ## _func&) {} \
}; \
template <> struct name ## _func<double> : binary_function<double, double, double> \
{ \
__device__ __forceinline__ double operator ()(double v1, double v2) const \
{ \
return func(v1, v2); \
} \
__host__ __device__ __forceinline__ name ## _func() {} \
__host__ __device__ __forceinline__ name ## _func(const name ## _func&) {} \
};
OPENCV_CUDA_IMPLEMENT_UN_FUNCTOR(sqrt, ::sqrt)
OPENCV_CUDA_IMPLEMENT_UN_FUNCTOR(exp, ::exp)
OPENCV_CUDA_IMPLEMENT_UN_FUNCTOR(exp2, ::exp2)
OPENCV_CUDA_IMPLEMENT_UN_FUNCTOR(exp10, ::exp10)
OPENCV_CUDA_IMPLEMENT_UN_FUNCTOR(log, ::log)
OPENCV_CUDA_IMPLEMENT_UN_FUNCTOR(log2, ::log2)
OPENCV_CUDA_IMPLEMENT_UN_FUNCTOR(log10, ::log10)
OPENCV_CUDA_IMPLEMENT_UN_FUNCTOR(sin, ::sin)
OPENCV_CUDA_IMPLEMENT_UN_FUNCTOR(cos, ::cos)
OPENCV_CUDA_IMPLEMENT_UN_FUNCTOR(tan, ::tan)
OPENCV_CUDA_IMPLEMENT_UN_FUNCTOR(asin, ::asin)
OPENCV_CUDA_IMPLEMENT_UN_FUNCTOR(acos, ::acos)
OPENCV_CUDA_IMPLEMENT_UN_FUNCTOR(atan, ::atan)
OPENCV_CUDA_IMPLEMENT_UN_FUNCTOR(sinh, ::sinh)
OPENCV_CUDA_IMPLEMENT_UN_FUNCTOR(cosh, ::cosh)
OPENCV_CUDA_IMPLEMENT_UN_FUNCTOR(tanh, ::tanh)
OPENCV_CUDA_IMPLEMENT_UN_FUNCTOR(asinh, ::asinh)
OPENCV_CUDA_IMPLEMENT_UN_FUNCTOR(acosh, ::acosh)
OPENCV_CUDA_IMPLEMENT_UN_FUNCTOR(atanh, ::atanh)
OPENCV_CUDA_IMPLEMENT_BIN_FUNCTOR(hypot, ::hypot)
OPENCV_CUDA_IMPLEMENT_BIN_FUNCTOR(atan2, ::atan2)
OPENCV_CUDA_IMPLEMENT_BIN_FUNCTOR(pow, ::pow)
#undef OPENCV_CUDA_IMPLEMENT_UN_FUNCTOR
#undef OPENCV_CUDA_IMPLEMENT_UN_FUNCTOR_NO_DOUBLE
#undef OPENCV_CUDA_IMPLEMENT_BIN_FUNCTOR
template<typename T> struct hypot_sqr_func : binary_function<T, T, float>
{
__device__ __forceinline__ T operator ()(typename TypeTraits<T>::ParameterType src1, typename TypeTraits<T>::ParameterType src2) const
{
return src1 * src1 + src2 * src2;
}
__host__ __device__ __forceinline__ hypot_sqr_func() {}
__host__ __device__ __forceinline__ hypot_sqr_func(const hypot_sqr_func&) {}
};
// Saturate Cast Functor
template <typename T, typename D> struct saturate_cast_func : unary_function<T, D>
{
__device__ __forceinline__ D operator ()(typename TypeTraits<T>::ParameterType v) const
{
return saturate_cast<D>(v);
}
__host__ __device__ __forceinline__ saturate_cast_func() {}
__host__ __device__ __forceinline__ saturate_cast_func(const saturate_cast_func&) {}
};
// Threshold Functors
template <typename T> struct thresh_binary_func : unary_function<T, T>
{
__host__ __device__ __forceinline__ thresh_binary_func(T thresh_, T maxVal_) : thresh(thresh_), maxVal(maxVal_) {}
__device__ __forceinline__ T operator()(typename TypeTraits<T>::ParameterType src) const
{
return (src > thresh) * maxVal;
}
__host__ __device__ __forceinline__ thresh_binary_func() {}
__host__ __device__ __forceinline__ thresh_binary_func(const thresh_binary_func& other)
: thresh(other.thresh), maxVal(other.maxVal) {}
T thresh;
T maxVal;
};
template <typename T> struct thresh_binary_inv_func : unary_function<T, T>
{
__host__ __device__ __forceinline__ thresh_binary_inv_func(T thresh_, T maxVal_) : thresh(thresh_), maxVal(maxVal_) {}
__device__ __forceinline__ T operator()(typename TypeTraits<T>::ParameterType src) const
{
return (src <= thresh) * maxVal;
}
__host__ __device__ __forceinline__ thresh_binary_inv_func() {}
__host__ __device__ __forceinline__ thresh_binary_inv_func(const thresh_binary_inv_func& other)
: thresh(other.thresh), maxVal(other.maxVal) {}
T thresh;
T maxVal;
};
template <typename T> struct thresh_trunc_func : unary_function<T, T>
{
explicit __host__ __device__ __forceinline__ thresh_trunc_func(T thresh_, T maxVal_ = 0) : thresh(thresh_) {CV_UNUSED(maxVal_);}
__device__ __forceinline__ T operator()(typename TypeTraits<T>::ParameterType src) const
{
return minimum<T>()(src, thresh);
}
__host__ __device__ __forceinline__ thresh_trunc_func() {}
__host__ __device__ __forceinline__ thresh_trunc_func(const thresh_trunc_func& other)
: thresh(other.thresh) {}
T thresh;
};
template <typename T> struct thresh_to_zero_func : unary_function<T, T>
{
explicit __host__ __device__ __forceinline__ thresh_to_zero_func(T thresh_, T maxVal_ = 0) : thresh(thresh_) {CV_UNUSED(maxVal_);}
__device__ __forceinline__ T operator()(typename TypeTraits<T>::ParameterType src) const
{
return (src > thresh) * src;
}
__host__ __device__ __forceinline__ thresh_to_zero_func() {}
__host__ __device__ __forceinline__ thresh_to_zero_func(const thresh_to_zero_func& other)
: thresh(other.thresh) {}
T thresh;
};
template <typename T> struct thresh_to_zero_inv_func : unary_function<T, T>
{
explicit __host__ __device__ __forceinline__ thresh_to_zero_inv_func(T thresh_, T maxVal_ = 0) : thresh(thresh_) {CV_UNUSED(maxVal_);}
__device__ __forceinline__ T operator()(typename TypeTraits<T>::ParameterType src) const
{
return (src <= thresh) * src;
}
__host__ __device__ __forceinline__ thresh_to_zero_inv_func() {}
__host__ __device__ __forceinline__ thresh_to_zero_inv_func(const thresh_to_zero_inv_func& other)
: thresh(other.thresh) {}
T thresh;
};
// Function Object Adaptors
template <typename Predicate> struct unary_negate : unary_function<typename Predicate::argument_type, bool>
{
explicit __host__ __device__ __forceinline__ unary_negate(const Predicate& p) : pred(p) {}
__device__ __forceinline__ bool operator()(typename TypeTraits<typename Predicate::argument_type>::ParameterType x) const
{
return !pred(x);
}
__host__ __device__ __forceinline__ unary_negate() {}
__host__ __device__ __forceinline__ unary_negate(const unary_negate& other) : pred(other.pred) {}
Predicate pred;
};
template <typename Predicate> __host__ __device__ __forceinline__ unary_negate<Predicate> not1(const Predicate& pred)
{
return unary_negate<Predicate>(pred);
}
template <typename Predicate> struct binary_negate : binary_function<typename Predicate::first_argument_type, typename Predicate::second_argument_type, bool>
{
explicit __host__ __device__ __forceinline__ binary_negate(const Predicate& p) : pred(p) {}
__device__ __forceinline__ bool operator()(typename TypeTraits<typename Predicate::first_argument_type>::ParameterType x,
typename TypeTraits<typename Predicate::second_argument_type>::ParameterType y) const
{
return !pred(x,y);
}
__host__ __device__ __forceinline__ binary_negate() {}
__host__ __device__ __forceinline__ binary_negate(const binary_negate& other) : pred(other.pred) {}
Predicate pred;
};
template <typename BinaryPredicate> __host__ __device__ __forceinline__ binary_negate<BinaryPredicate> not2(const BinaryPredicate& pred)
{
return binary_negate<BinaryPredicate>(pred);
}
template <typename Op> struct binder1st : unary_function<typename Op::second_argument_type, typename Op::result_type>
{
__host__ __device__ __forceinline__ binder1st(const Op& op_, const typename Op::first_argument_type& arg1_) : op(op_), arg1(arg1_) {}
__device__ __forceinline__ typename Op::result_type operator ()(typename TypeTraits<typename Op::second_argument_type>::ParameterType a) const
{
return op(arg1, a);
}
__host__ __device__ __forceinline__ binder1st() {}
__host__ __device__ __forceinline__ binder1st(const binder1st& other) : op(other.op), arg1(other.arg1) {}
Op op;
typename Op::first_argument_type arg1;
};
template <typename Op, typename T> __host__ __device__ __forceinline__ binder1st<Op> bind1st(const Op& op, const T& x)
{
return binder1st<Op>(op, typename Op::first_argument_type(x));
}
template <typename Op> struct binder2nd : unary_function<typename Op::first_argument_type, typename Op::result_type>
{
__host__ __device__ __forceinline__ binder2nd(const Op& op_, const typename Op::second_argument_type& arg2_) : op(op_), arg2(arg2_) {}
__forceinline__ __device__ typename Op::result_type operator ()(typename TypeTraits<typename Op::first_argument_type>::ParameterType a) const
{
return op(a, arg2);
}
__host__ __device__ __forceinline__ binder2nd() {}
__host__ __device__ __forceinline__ binder2nd(const binder2nd& other) : op(other.op), arg2(other.arg2) {}
Op op;
typename Op::second_argument_type arg2;
};
template <typename Op, typename T> __host__ __device__ __forceinline__ binder2nd<Op> bind2nd(const Op& op, const T& x)
{
return binder2nd<Op>(op, typename Op::second_argument_type(x));
}
// Functor Traits
template <typename F> struct IsUnaryFunction
{
typedef char Yes;
struct No {Yes a[2];};
template <typename T, typename D> static Yes check(unary_function<T, D>);
static No check(...);
static F makeF();
enum { value = (sizeof(check(makeF())) == sizeof(Yes)) };
};
template <typename F> struct IsBinaryFunction
{
typedef char Yes;
struct No {Yes a[2];};
template <typename T1, typename T2, typename D> static Yes check(binary_function<T1, T2, D>);
static No check(...);
static F makeF();
enum { value = (sizeof(check(makeF())) == sizeof(Yes)) };
};
namespace functional_detail
{
template <size_t src_elem_size, size_t dst_elem_size> struct UnOpShift { enum { shift = 1 }; };
template <size_t src_elem_size> struct UnOpShift<src_elem_size, 1> { enum { shift = 4 }; };
template <size_t src_elem_size> struct UnOpShift<src_elem_size, 2> { enum { shift = 2 }; };
template <typename T, typename D> struct DefaultUnaryShift
{
enum { shift = UnOpShift<sizeof(T), sizeof(D)>::shift };
};
template <size_t src_elem_size1, size_t src_elem_size2, size_t dst_elem_size> struct BinOpShift { enum { shift = 1 }; };
template <size_t src_elem_size1, size_t src_elem_size2> struct BinOpShift<src_elem_size1, src_elem_size2, 1> { enum { shift = 4 }; };
template <size_t src_elem_size1, size_t src_elem_size2> struct BinOpShift<src_elem_size1, src_elem_size2, 2> { enum { shift = 2 }; };
template <typename T1, typename T2, typename D> struct DefaultBinaryShift
{
enum { shift = BinOpShift<sizeof(T1), sizeof(T2), sizeof(D)>::shift };
};
template <typename Func, bool unary = IsUnaryFunction<Func>::value> struct ShiftDispatcher;
template <typename Func> struct ShiftDispatcher<Func, true>
{
enum { shift = DefaultUnaryShift<typename Func::argument_type, typename Func::result_type>::shift };
};
template <typename Func> struct ShiftDispatcher<Func, false>
{
enum { shift = DefaultBinaryShift<typename Func::first_argument_type, typename Func::second_argument_type, typename Func::result_type>::shift };
};
}
template <typename Func> struct DefaultTransformShift
{
enum { shift = functional_detail::ShiftDispatcher<Func>::shift };
};
template <typename Func> struct DefaultTransformFunctorTraits
{
enum { simple_block_dim_x = 16 };
enum { simple_block_dim_y = 16 };
enum { smart_block_dim_x = 16 };
enum { smart_block_dim_y = 16 };
enum { smart_shift = DefaultTransformShift<Func>::shift };
};
template <typename Func> struct TransformFunctorTraits : DefaultTransformFunctorTraits<Func> {};
#define OPENCV_CUDA_TRANSFORM_FUNCTOR_TRAITS(type) \
template <> struct TransformFunctorTraits< type > : DefaultTransformFunctorTraits< type >
}}} // namespace cv { namespace cuda { namespace cudev
//! @endcond
#endif // OPENCV_CUDA_FUNCTIONAL_HPP

View File

@@ -0,0 +1,128 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef OPENCV_CUDA_LIMITS_HPP
#define OPENCV_CUDA_LIMITS_HPP
#include <limits.h>
#include <float.h>
#include "common.hpp"
/** @file
* @deprecated Use @ref cudev instead.
*/
//! @cond IGNORED
namespace cv { namespace cuda { namespace device
{
template <class T> struct numeric_limits;
template <> struct numeric_limits<bool>
{
__device__ __forceinline__ static bool min() { return false; }
__device__ __forceinline__ static bool max() { return true; }
static const bool is_signed = false;
};
template <> struct numeric_limits<signed char>
{
__device__ __forceinline__ static signed char min() { return SCHAR_MIN; }
__device__ __forceinline__ static signed char max() { return SCHAR_MAX; }
static const bool is_signed = true;
};
template <> struct numeric_limits<unsigned char>
{
__device__ __forceinline__ static unsigned char min() { return 0; }
__device__ __forceinline__ static unsigned char max() { return UCHAR_MAX; }
static const bool is_signed = false;
};
template <> struct numeric_limits<short>
{
__device__ __forceinline__ static short min() { return SHRT_MIN; }
__device__ __forceinline__ static short max() { return SHRT_MAX; }
static const bool is_signed = true;
};
template <> struct numeric_limits<unsigned short>
{
__device__ __forceinline__ static unsigned short min() { return 0; }
__device__ __forceinline__ static unsigned short max() { return USHRT_MAX; }
static const bool is_signed = false;
};
template <> struct numeric_limits<int>
{
__device__ __forceinline__ static int min() { return INT_MIN; }
__device__ __forceinline__ static int max() { return INT_MAX; }
static const bool is_signed = true;
};
template <> struct numeric_limits<unsigned int>
{
__device__ __forceinline__ static unsigned int min() { return 0; }
__device__ __forceinline__ static unsigned int max() { return UINT_MAX; }
static const bool is_signed = false;
};
template <> struct numeric_limits<float>
{
__device__ __forceinline__ static float min() { return FLT_MIN; }
__device__ __forceinline__ static float max() { return FLT_MAX; }
__device__ __forceinline__ static float epsilon() { return FLT_EPSILON; }
static const bool is_signed = true;
};
template <> struct numeric_limits<double>
{
__device__ __forceinline__ static double min() { return DBL_MIN; }
__device__ __forceinline__ static double max() { return DBL_MAX; }
__device__ __forceinline__ static double epsilon() { return DBL_EPSILON; }
static const bool is_signed = true;
};
}}} // namespace cv { namespace cuda { namespace cudev {
//! @endcond
#endif // OPENCV_CUDA_LIMITS_HPP

View File

@@ -0,0 +1,209 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef OPENCV_CUDA_REDUCE_HPP
#define OPENCV_CUDA_REDUCE_HPP
#ifndef THRUST_DEBUG // eliminate -Wundef warning
#define THRUST_DEBUG 0
#endif
#include <thrust/tuple.h>
#include "detail/reduce.hpp"
#include "detail/reduce_key_val.hpp"
/** @file
* @deprecated Use @ref cudev instead.
*/
//! @cond IGNORED
namespace cv { namespace cuda { namespace device
{
template <int N, typename T, class Op>
__device__ __forceinline__ void reduce(volatile T* smem, T& val, unsigned int tid, const Op& op)
{
reduce_detail::Dispatcher<N>::reductor::template reduce<volatile T*, T&, const Op&>(smem, val, tid, op);
}
template <int N,
typename P0, typename P1, typename P2, typename P3, typename P4, typename P5, typename P6, typename P7, typename P8, typename P9,
typename R0, typename R1, typename R2, typename R3, typename R4, typename R5, typename R6, typename R7, typename R8, typename R9,
class Op0, class Op1, class Op2, class Op3, class Op4, class Op5, class Op6, class Op7, class Op8, class Op9>
__device__ __forceinline__ void reduce(const thrust::tuple<P0, P1, P2, P3, P4, P5, P6, P7, P8, P9>& smem,
const thrust::tuple<R0, R1, R2, R3, R4, R5, R6, R7, R8, R9>& val,
unsigned int tid,
const thrust::tuple<Op0, Op1, Op2, Op3, Op4, Op5, Op6, Op7, Op8, Op9>& op)
{
reduce_detail::Dispatcher<N>::reductor::template reduce<
const thrust::tuple<P0, P1, P2, P3, P4, P5, P6, P7, P8, P9>&,
const thrust::tuple<R0, R1, R2, R3, R4, R5, R6, R7, R8, R9>&,
const thrust::tuple<Op0, Op1, Op2, Op3, Op4, Op5, Op6, Op7, Op8, Op9>&>(smem, val, tid, op);
}
template <unsigned int N, typename K, typename V, class Cmp>
__device__ __forceinline__ void reduceKeyVal(volatile K* skeys, K& key, volatile V* svals, V& val, unsigned int tid, const Cmp& cmp)
{
reduce_key_val_detail::Dispatcher<N>::reductor::template reduce<volatile K*, K&, volatile V*, V&, const Cmp&>(skeys, key, svals, val, tid, cmp);
}
template <unsigned int N,
typename K,
typename VP0, typename VP1, typename VP2, typename VP3, typename VP4, typename VP5, typename VP6, typename VP7, typename VP8, typename VP9,
typename VR0, typename VR1, typename VR2, typename VR3, typename VR4, typename VR5, typename VR6, typename VR7, typename VR8, typename VR9,
class Cmp>
__device__ __forceinline__ void reduceKeyVal(volatile K* skeys, K& key,
const thrust::tuple<VP0, VP1, VP2, VP3, VP4, VP5, VP6, VP7, VP8, VP9>& svals,
const thrust::tuple<VR0, VR1, VR2, VR3, VR4, VR5, VR6, VR7, VR8, VR9>& val,
unsigned int tid, const Cmp& cmp)
{
reduce_key_val_detail::Dispatcher<N>::reductor::template reduce<volatile K*, K&,
const thrust::tuple<VP0, VP1, VP2, VP3, VP4, VP5, VP6, VP7, VP8, VP9>&,
const thrust::tuple<VR0, VR1, VR2, VR3, VR4, VR5, VR6, VR7, VR8, VR9>&,
const Cmp&>(skeys, key, svals, val, tid, cmp);
}
template <unsigned int N,
typename KP0, typename KP1, typename KP2, typename KP3, typename KP4, typename KP5, typename KP6, typename KP7, typename KP8, typename KP9,
typename KR0, typename KR1, typename KR2, typename KR3, typename KR4, typename KR5, typename KR6, typename KR7, typename KR8, typename KR9,
typename VP0, typename VP1, typename VP2, typename VP3, typename VP4, typename VP5, typename VP6, typename VP7, typename VP8, typename VP9,
typename VR0, typename VR1, typename VR2, typename VR3, typename VR4, typename VR5, typename VR6, typename VR7, typename VR8, typename VR9,
class Cmp0, class Cmp1, class Cmp2, class Cmp3, class Cmp4, class Cmp5, class Cmp6, class Cmp7, class Cmp8, class Cmp9>
__device__ __forceinline__ void reduceKeyVal(const thrust::tuple<KP0, KP1, KP2, KP3, KP4, KP5, KP6, KP7, KP8, KP9>& skeys,
const thrust::tuple<KR0, KR1, KR2, KR3, KR4, KR5, KR6, KR7, KR8, KR9>& key,
const thrust::tuple<VP0, VP1, VP2, VP3, VP4, VP5, VP6, VP7, VP8, VP9>& svals,
const thrust::tuple<VR0, VR1, VR2, VR3, VR4, VR5, VR6, VR7, VR8, VR9>& val,
unsigned int tid,
const thrust::tuple<Cmp0, Cmp1, Cmp2, Cmp3, Cmp4, Cmp5, Cmp6, Cmp7, Cmp8, Cmp9>& cmp)
{
reduce_key_val_detail::Dispatcher<N>::reductor::template reduce<
const thrust::tuple<KP0, KP1, KP2, KP3, KP4, KP5, KP6, KP7, KP8, KP9>&,
const thrust::tuple<KR0, KR1, KR2, KR3, KR4, KR5, KR6, KR7, KR8, KR9>&,
const thrust::tuple<VP0, VP1, VP2, VP3, VP4, VP5, VP6, VP7, VP8, VP9>&,
const thrust::tuple<VR0, VR1, VR2, VR3, VR4, VR5, VR6, VR7, VR8, VR9>&,
const thrust::tuple<Cmp0, Cmp1, Cmp2, Cmp3, Cmp4, Cmp5, Cmp6, Cmp7, Cmp8, Cmp9>&
>(skeys, key, svals, val, tid, cmp);
}
// smem_tuple
template <typename T0>
__device__ __forceinline__
thrust::tuple<volatile T0*>
smem_tuple(T0* t0)
{
return thrust::make_tuple((volatile T0*) t0);
}
template <typename T0, typename T1>
__device__ __forceinline__
thrust::tuple<volatile T0*, volatile T1*>
smem_tuple(T0* t0, T1* t1)
{
return thrust::make_tuple((volatile T0*) t0, (volatile T1*) t1);
}
template <typename T0, typename T1, typename T2>
__device__ __forceinline__
thrust::tuple<volatile T0*, volatile T1*, volatile T2*>
smem_tuple(T0* t0, T1* t1, T2* t2)
{
return thrust::make_tuple((volatile T0*) t0, (volatile T1*) t1, (volatile T2*) t2);
}
template <typename T0, typename T1, typename T2, typename T3>
__device__ __forceinline__
thrust::tuple<volatile T0*, volatile T1*, volatile T2*, volatile T3*>
smem_tuple(T0* t0, T1* t1, T2* t2, T3* t3)
{
return thrust::make_tuple((volatile T0*) t0, (volatile T1*) t1, (volatile T2*) t2, (volatile T3*) t3);
}
template <typename T0, typename T1, typename T2, typename T3, typename T4>
__device__ __forceinline__
thrust::tuple<volatile T0*, volatile T1*, volatile T2*, volatile T3*, volatile T4*>
smem_tuple(T0* t0, T1* t1, T2* t2, T3* t3, T4* t4)
{
return thrust::make_tuple((volatile T0*) t0, (volatile T1*) t1, (volatile T2*) t2, (volatile T3*) t3, (volatile T4*) t4);
}
template <typename T0, typename T1, typename T2, typename T3, typename T4, typename T5>
__device__ __forceinline__
thrust::tuple<volatile T0*, volatile T1*, volatile T2*, volatile T3*, volatile T4*, volatile T5*>
smem_tuple(T0* t0, T1* t1, T2* t2, T3* t3, T4* t4, T5* t5)
{
return thrust::make_tuple((volatile T0*) t0, (volatile T1*) t1, (volatile T2*) t2, (volatile T3*) t3, (volatile T4*) t4, (volatile T5*) t5);
}
template <typename T0, typename T1, typename T2, typename T3, typename T4, typename T5, typename T6>
__device__ __forceinline__
thrust::tuple<volatile T0*, volatile T1*, volatile T2*, volatile T3*, volatile T4*, volatile T5*, volatile T6*>
smem_tuple(T0* t0, T1* t1, T2* t2, T3* t3, T4* t4, T5* t5, T6* t6)
{
return thrust::make_tuple((volatile T0*) t0, (volatile T1*) t1, (volatile T2*) t2, (volatile T3*) t3, (volatile T4*) t4, (volatile T5*) t5, (volatile T6*) t6);
}
template <typename T0, typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7>
__device__ __forceinline__
thrust::tuple<volatile T0*, volatile T1*, volatile T2*, volatile T3*, volatile T4*, volatile T5*, volatile T6*, volatile T7*>
smem_tuple(T0* t0, T1* t1, T2* t2, T3* t3, T4* t4, T5* t5, T6* t6, T7* t7)
{
return thrust::make_tuple((volatile T0*) t0, (volatile T1*) t1, (volatile T2*) t2, (volatile T3*) t3, (volatile T4*) t4, (volatile T5*) t5, (volatile T6*) t6, (volatile T7*) t7);
}
template <typename T0, typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8>
__device__ __forceinline__
thrust::tuple<volatile T0*, volatile T1*, volatile T2*, volatile T3*, volatile T4*, volatile T5*, volatile T6*, volatile T7*, volatile T8*>
smem_tuple(T0* t0, T1* t1, T2* t2, T3* t3, T4* t4, T5* t5, T6* t6, T7* t7, T8* t8)
{
return thrust::make_tuple((volatile T0*) t0, (volatile T1*) t1, (volatile T2*) t2, (volatile T3*) t3, (volatile T4*) t4, (volatile T5*) t5, (volatile T6*) t6, (volatile T7*) t7, (volatile T8*) t8);
}
template <typename T0, typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8, typename T9>
__device__ __forceinline__
thrust::tuple<volatile T0*, volatile T1*, volatile T2*, volatile T3*, volatile T4*, volatile T5*, volatile T6*, volatile T7*, volatile T8*, volatile T9*>
smem_tuple(T0* t0, T1* t1, T2* t2, T3* t3, T4* t4, T5* t5, T6* t6, T7* t7, T8* t8, T9* t9)
{
return thrust::make_tuple((volatile T0*) t0, (volatile T1*) t1, (volatile T2*) t2, (volatile T3*) t3, (volatile T4*) t4, (volatile T5*) t5, (volatile T6*) t6, (volatile T7*) t7, (volatile T8*) t8, (volatile T9*) t9);
}
}}}
//! @endcond
#endif // OPENCV_CUDA_REDUCE_HPP

View File

@@ -0,0 +1,292 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef OPENCV_CUDA_SATURATE_CAST_HPP
#define OPENCV_CUDA_SATURATE_CAST_HPP
#include "common.hpp"
/** @file
* @deprecated Use @ref cudev instead.
*/
//! @cond IGNORED
namespace cv { namespace cuda { namespace device
{
template<typename _Tp> __device__ __forceinline__ _Tp saturate_cast(uchar v) { return _Tp(v); }
template<typename _Tp> __device__ __forceinline__ _Tp saturate_cast(schar v) { return _Tp(v); }
template<typename _Tp> __device__ __forceinline__ _Tp saturate_cast(ushort v) { return _Tp(v); }
template<typename _Tp> __device__ __forceinline__ _Tp saturate_cast(short v) { return _Tp(v); }
template<typename _Tp> __device__ __forceinline__ _Tp saturate_cast(uint v) { return _Tp(v); }
template<typename _Tp> __device__ __forceinline__ _Tp saturate_cast(int v) { return _Tp(v); }
template<typename _Tp> __device__ __forceinline__ _Tp saturate_cast(float v) { return _Tp(v); }
template<typename _Tp> __device__ __forceinline__ _Tp saturate_cast(double v) { return _Tp(v); }
template<> __device__ __forceinline__ uchar saturate_cast<uchar>(schar v)
{
uint res = 0;
int vi = v;
asm("cvt.sat.u8.s8 %0, %1;" : "=r"(res) : "r"(vi));
return res;
}
template<> __device__ __forceinline__ uchar saturate_cast<uchar>(short v)
{
uint res = 0;
asm("cvt.sat.u8.s16 %0, %1;" : "=r"(res) : "h"(v));
return res;
}
template<> __device__ __forceinline__ uchar saturate_cast<uchar>(ushort v)
{
uint res = 0;
asm("cvt.sat.u8.u16 %0, %1;" : "=r"(res) : "h"(v));
return res;
}
template<> __device__ __forceinline__ uchar saturate_cast<uchar>(int v)
{
uint res = 0;
asm("cvt.sat.u8.s32 %0, %1;" : "=r"(res) : "r"(v));
return res;
}
template<> __device__ __forceinline__ uchar saturate_cast<uchar>(uint v)
{
uint res = 0;
asm("cvt.sat.u8.u32 %0, %1;" : "=r"(res) : "r"(v));
return res;
}
template<> __device__ __forceinline__ uchar saturate_cast<uchar>(float v)
{
uint res = 0;
asm("cvt.rni.sat.u8.f32 %0, %1;" : "=r"(res) : "f"(v));
return res;
}
template<> __device__ __forceinline__ uchar saturate_cast<uchar>(double v)
{
#if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 130
uint res = 0;
asm("cvt.rni.sat.u8.f64 %0, %1;" : "=r"(res) : "d"(v));
return res;
#else
return saturate_cast<uchar>((float)v);
#endif
}
template<> __device__ __forceinline__ schar saturate_cast<schar>(uchar v)
{
uint res = 0;
uint vi = v;
asm("cvt.sat.s8.u8 %0, %1;" : "=r"(res) : "r"(vi));
return res;
}
template<> __device__ __forceinline__ schar saturate_cast<schar>(short v)
{
uint res = 0;
asm("cvt.sat.s8.s16 %0, %1;" : "=r"(res) : "h"(v));
return res;
}
template<> __device__ __forceinline__ schar saturate_cast<schar>(ushort v)
{
uint res = 0;
asm("cvt.sat.s8.u16 %0, %1;" : "=r"(res) : "h"(v));
return res;
}
template<> __device__ __forceinline__ schar saturate_cast<schar>(int v)
{
uint res = 0;
asm("cvt.sat.s8.s32 %0, %1;" : "=r"(res) : "r"(v));
return res;
}
template<> __device__ __forceinline__ schar saturate_cast<schar>(uint v)
{
uint res = 0;
asm("cvt.sat.s8.u32 %0, %1;" : "=r"(res) : "r"(v));
return res;
}
template<> __device__ __forceinline__ schar saturate_cast<schar>(float v)
{
uint res = 0;
asm("cvt.rni.sat.s8.f32 %0, %1;" : "=r"(res) : "f"(v));
return res;
}
template<> __device__ __forceinline__ schar saturate_cast<schar>(double v)
{
#if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 130
uint res = 0;
asm("cvt.rni.sat.s8.f64 %0, %1;" : "=r"(res) : "d"(v));
return res;
#else
return saturate_cast<schar>((float)v);
#endif
}
template<> __device__ __forceinline__ ushort saturate_cast<ushort>(schar v)
{
ushort res = 0;
int vi = v;
asm("cvt.sat.u16.s8 %0, %1;" : "=h"(res) : "r"(vi));
return res;
}
template<> __device__ __forceinline__ ushort saturate_cast<ushort>(short v)
{
ushort res = 0;
asm("cvt.sat.u16.s16 %0, %1;" : "=h"(res) : "h"(v));
return res;
}
template<> __device__ __forceinline__ ushort saturate_cast<ushort>(int v)
{
ushort res = 0;
asm("cvt.sat.u16.s32 %0, %1;" : "=h"(res) : "r"(v));
return res;
}
template<> __device__ __forceinline__ ushort saturate_cast<ushort>(uint v)
{
ushort res = 0;
asm("cvt.sat.u16.u32 %0, %1;" : "=h"(res) : "r"(v));
return res;
}
template<> __device__ __forceinline__ ushort saturate_cast<ushort>(float v)
{
ushort res = 0;
asm("cvt.rni.sat.u16.f32 %0, %1;" : "=h"(res) : "f"(v));
return res;
}
template<> __device__ __forceinline__ ushort saturate_cast<ushort>(double v)
{
#if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 130
ushort res = 0;
asm("cvt.rni.sat.u16.f64 %0, %1;" : "=h"(res) : "d"(v));
return res;
#else
return saturate_cast<ushort>((float)v);
#endif
}
template<> __device__ __forceinline__ short saturate_cast<short>(ushort v)
{
short res = 0;
asm("cvt.sat.s16.u16 %0, %1;" : "=h"(res) : "h"(v));
return res;
}
template<> __device__ __forceinline__ short saturate_cast<short>(int v)
{
short res = 0;
asm("cvt.sat.s16.s32 %0, %1;" : "=h"(res) : "r"(v));
return res;
}
template<> __device__ __forceinline__ short saturate_cast<short>(uint v)
{
short res = 0;
asm("cvt.sat.s16.u32 %0, %1;" : "=h"(res) : "r"(v));
return res;
}
template<> __device__ __forceinline__ short saturate_cast<short>(float v)
{
short res = 0;
asm("cvt.rni.sat.s16.f32 %0, %1;" : "=h"(res) : "f"(v));
return res;
}
template<> __device__ __forceinline__ short saturate_cast<short>(double v)
{
#if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 130
short res = 0;
asm("cvt.rni.sat.s16.f64 %0, %1;" : "=h"(res) : "d"(v));
return res;
#else
return saturate_cast<short>((float)v);
#endif
}
template<> __device__ __forceinline__ int saturate_cast<int>(uint v)
{
int res = 0;
asm("cvt.sat.s32.u32 %0, %1;" : "=r"(res) : "r"(v));
return res;
}
template<> __device__ __forceinline__ int saturate_cast<int>(float v)
{
return __float2int_rn(v);
}
template<> __device__ __forceinline__ int saturate_cast<int>(double v)
{
#if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 130
return __double2int_rn(v);
#else
return saturate_cast<int>((float)v);
#endif
}
template<> __device__ __forceinline__ uint saturate_cast<uint>(schar v)
{
uint res = 0;
int vi = v;
asm("cvt.sat.u32.s8 %0, %1;" : "=r"(res) : "r"(vi));
return res;
}
template<> __device__ __forceinline__ uint saturate_cast<uint>(short v)
{
uint res = 0;
asm("cvt.sat.u32.s16 %0, %1;" : "=r"(res) : "h"(v));
return res;
}
template<> __device__ __forceinline__ uint saturate_cast<uint>(int v)
{
uint res = 0;
asm("cvt.sat.u32.s32 %0, %1;" : "=r"(res) : "r"(v));
return res;
}
template<> __device__ __forceinline__ uint saturate_cast<uint>(float v)
{
return __float2uint_rn(v);
}
template<> __device__ __forceinline__ uint saturate_cast<uint>(double v)
{
#if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 130
return __double2uint_rn(v);
#else
return saturate_cast<uint>((float)v);
#endif
}
}}}
//! @endcond
#endif /* OPENCV_CUDA_SATURATE_CAST_HPP */

Some files were not shown because too many files have changed in this diff Show More