添加项目文件。

This commit is contained in:
CaiXiang
2025-01-20 10:30:01 +08:00
parent 77371da5d7
commit 752be79e06
1010 changed files with 610100 additions and 0 deletions

519
3rdparty/opencv/inc/opencv2/mcc/ccm.hpp vendored Normal file
View File

@@ -0,0 +1,519 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright(C) 2020, Huawei Technologies Co.,Ltd. All rights reserved.
// Third party copyrights are property of their respective owners.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Author: Longbu Wang <wanglongbu@huawei.com.com>
// Jinheng Zhang <zhangjinheng1@huawei.com>
// Chenqi Shan <shanchenqi@huawei.com>
#ifndef __OPENCV_MCC_CCM_HPP__
#define __OPENCV_MCC_CCM_HPP__
#include <opencv2/core.hpp>
#include <opencv2/imgproc.hpp>
namespace cv
{
namespace ccm
{
/** @addtogroup color_correction
@{
Introduction
------------
The purpose of color correction is to adjust the color response of input
and output devices to a known state. The device being calibrated is sometimes
called the calibration source; the color space used as the standard is sometimes
called the calibration target. Color calibration has been used in many industries,
such as television production, games, photography, engineering, chemistry,
medicine, etc. Due to the manufacturing process of the input and output equipment,
the channel response has nonlinear distortion. In order to correct the picture output
of the equipment, it is nessary to calibrate the captured color and the actual color.
*/
/** @brief Enum of the possible types of ccm.
*/
enum CCM_TYPE
{
CCM_3x3, ///< The CCM with the shape \f$3\times3\f$ performs linear transformation on color values.
CCM_4x3, ///< The CCM with the shape \f$4\times3\f$ performs affine transformation.
};
/** @brief Enum of the possible types of initial method.
*/
enum INITIAL_METHOD_TYPE
{
INITIAL_METHOD_WHITE_BALANCE, ///< The white balance method. The initial value is:\n
/// \f$
/// M_{CCM}=
/// \begin{bmatrix}
/// k_R & 0 & 0\\
/// 0 & k_G & 0\\
/// 0 & 0 & k_B\\
/// \end{bmatrix}
/// \f$\n
/// where\n
/// \f$
/// k_R=mean(R_{li}')/mean(R_{li})\\
/// k_R=mean(G_{li}')/mean(G_{li})\\
/// k_R=mean(B_{li}')/mean(B_{li})
/// \f$
INITIAL_METHOD_LEAST_SQUARE, ///<the least square method is an optimal solution under the linear RGB distance function
};
/** @brief Macbeth and Vinyl ColorChecker with 2deg D50
*/
enum CONST_COLOR {
COLORCHECKER_Macbeth, ///< Macbeth ColorChecker
COLORCHECKER_Vinyl, ///< DKK ColorChecker
COLORCHECKER_DigitalSG, ///< DigitalSG ColorChecker with 140 squares
};
enum COLOR_SPACE {
COLOR_SPACE_sRGB, ///< https://en.wikipedia.org/wiki/SRGB , RGB color space
COLOR_SPACE_sRGBL, ///< https://en.wikipedia.org/wiki/SRGB , linear RGB color space
COLOR_SPACE_AdobeRGB, ///< https://en.wikipedia.org/wiki/Adobe_RGB_color_space , RGB color space
COLOR_SPACE_AdobeRGBL, ///< https://en.wikipedia.org/wiki/Adobe_RGB_color_space , linear RGB color space
COLOR_SPACE_WideGamutRGB, ///< https://en.wikipedia.org/wiki/Wide-gamut_RGB_color_space , RGB color space
COLOR_SPACE_WideGamutRGBL, ///< https://en.wikipedia.org/wiki/Wide-gamut_RGB_color_space , linear RGB color space
COLOR_SPACE_ProPhotoRGB, ///< https://en.wikipedia.org/wiki/ProPhoto_RGB_color_space , RGB color space
COLOR_SPACE_ProPhotoRGBL, ///< https://en.wikipedia.org/wiki/ProPhoto_RGB_color_space , linear RGB color space
COLOR_SPACE_DCI_P3_RGB, ///< https://en.wikipedia.org/wiki/DCI-P3 , RGB color space
COLOR_SPACE_DCI_P3_RGBL, ///< https://en.wikipedia.org/wiki/DCI-P3 , linear RGB color space
COLOR_SPACE_AppleRGB, ///< https://en.wikipedia.org/wiki/RGB_color_space , RGB color space
COLOR_SPACE_AppleRGBL, ///< https://en.wikipedia.org/wiki/RGB_color_space , linear RGB color space
COLOR_SPACE_REC_709_RGB, ///< https://en.wikipedia.org/wiki/Rec._709 , RGB color space
COLOR_SPACE_REC_709_RGBL, ///< https://en.wikipedia.org/wiki/Rec._709 , linear RGB color space
COLOR_SPACE_REC_2020_RGB, ///< https://en.wikipedia.org/wiki/Rec._2020 , RGB color space
COLOR_SPACE_REC_2020_RGBL, ///< https://en.wikipedia.org/wiki/Rec._2020 , linear RGB color space
COLOR_SPACE_XYZ_D65_2, ///< https://en.wikipedia.org/wiki/CIE_1931_color_space , non-RGB color space
COLOR_SPACE_XYZ_D65_10, ///< non-RGB color space
COLOR_SPACE_XYZ_D50_2, ///< non-RGB color space
COLOR_SPACE_XYZ_D50_10, ///< non-RGB color space
COLOR_SPACE_XYZ_A_2, ///< non-RGB color space
COLOR_SPACE_XYZ_A_10, ///< non-RGB color space
COLOR_SPACE_XYZ_D55_2, ///< non-RGB color space
COLOR_SPACE_XYZ_D55_10, ///< non-RGB color space
COLOR_SPACE_XYZ_D75_2, ///< non-RGB color space
COLOR_SPACE_XYZ_D75_10, ///< non-RGB color space
COLOR_SPACE_XYZ_E_2, ///< non-RGB color space
COLOR_SPACE_XYZ_E_10, ///< non-RGB color space
COLOR_SPACE_Lab_D65_2, ///< https://en.wikipedia.org/wiki/CIELAB_color_space , non-RGB color space
COLOR_SPACE_Lab_D65_10, ///< non-RGB color space
COLOR_SPACE_Lab_D50_2, ///< non-RGB color space
COLOR_SPACE_Lab_D50_10, ///< non-RGB color space
COLOR_SPACE_Lab_A_2, ///< non-RGB color space
COLOR_SPACE_Lab_A_10, ///< non-RGB color space
COLOR_SPACE_Lab_D55_2, ///< non-RGB color space
COLOR_SPACE_Lab_D55_10, ///< non-RGB color space
COLOR_SPACE_Lab_D75_2, ///< non-RGB color space
COLOR_SPACE_Lab_D75_10, ///< non-RGB color space
COLOR_SPACE_Lab_E_2, ///< non-RGB color space
COLOR_SPACE_Lab_E_10, ///< non-RGB color space
};
/** @brief Linearization transformation type
The first step in color correction is to linearize the detected colors.
Because the input color space has not been calibrated, we usually use some empirical methods to linearize.
There are several common linearization methods.
The first is identical transformation, the second is gamma correction, and the third is polynomial fitting.
Linearization is generally an elementwise function. The mathematical symbols are as follows:
\f$C\f$: any channel of a color, could be \f$R, G\f$ or \f$B\f$.
\f$R, G, B\f$: \f$R, G, B\f$ channels respectively.
\f$G\f$: grayscale;
\f$s,sl\f$: subscript, which represents the detected data and its linearized value, the former is the input and the latter is the output;
\f$d,dl\f$: subscript, which represents the reference data and its linearized value
### Identical Transformation
No change is made during the Identical transformation linearization, usually because the tristimulus values of the input RGB image is already proportional to the luminance.
For example, if the input measurement data is in RAW format, the measurement data is already linear, so no linearization is required.
The identity transformation formula is as follows:
\f[
C_{sl}=C_s
\f]
### Gamma Correction
Gamma correction is a means of performing nonlinearity in RGB space, see the Color Space documentation for details.
In the linearization part, the value of \f$\gamma\f$ is usually set to 2.2.
You can also customize the value.
The formula for gamma correction linearization is as follows:
\f[
C_{sl}=C_s^{\gamma},\qquad C_s\ge0\\
C_{sl}=-(-C_s)^{\gamma},\qquad C_s<0\\\\
\f]
### Polynomial Fitting
Polynomial fitting uses polynomials to linearize.
Provided the polynomial is:
\f[
f(x)=a_nx^n+a_{n-1}x^{n-1}+... +a_0
\f]
Then:
\f[
C_{sl}=f(C_s)
\f]
In practice, \f$n\le3\f$ is used to prevent overfitting.
There are many variants of polynomial fitting, the difference lies in the way of generating \f$f(x)\f$.
It is usually necessary to use linearized reference colors and corresponding detected colors to calculate the polynomial parameters.
However, not all colors can participate in the calculation. The saturation detected colors needs to be removed. See the algorithm introduction document for details.
#### Fitting Channels Respectively
Use three polynomials, \f$r(x), g(x), b(x)\f$, to linearize each channel of the RGB color space[1-3]:
\f[
R_{sl}=r(R_s)\\
G_{sl}=g(G_s)\\
B_{sl}=b(B_s)\\
\f]
The polynomial is generated by minimizing the residual sum of squares between the detected data and the linearized reference data.
Take the R-channel as an example:
\f[
R=\arg min_{f}(\Sigma(R_{dl}-f(R_S)^2)
\f]
It's equivalent to finding the least square regression for below equations:
\f[
f(R_{s1})=R_{dl1}\\
f(R_{s2})=R_{dl2}\\
...
\f]
With a polynomial, the above equations becomes:
\f[
\begin{bmatrix}
R_{s1}^{n} & R_{s1}^{n-1} & ... & 1\\
R_{s2}^{n} & R_{s2}^{n-1} & ... & 1\\
... & ... & ... & ...
\end{bmatrix}
\begin{bmatrix}
a_{n}\\
a_{n-1}\\
... \\
a_0
\end{bmatrix}
=
\begin{bmatrix}
R_{dl1}\\
R_{dl2}\\
...
\end{bmatrix}
\f]
It can be expressed as a system of linear equations:
\f[
AX=B
\f]
When the number of reference colors is not less than the degree of the polynomial, the linear system has a least-squares solution:
\f[
X=(A^TA)^{-1}A^TB
\f]
Once we get the polynomial coefficients, we can get the polynomial r.
This method of finding polynomial coefficients can be implemented by numpy.polyfit in numpy, expressed here as:
\f[
R=polyfit(R_S, R_{dl})
\f]
Note that, in general, the polynomial that we want to obtain is guaranteed to monotonically increase in the interval [0,1] ,
but this means that nonlinear method is needed to generate the polynomials(see [4] for detail).
This would greatly increases the complexity of the program.
Considering that the monotonicity does not affect the correct operation of the color correction program, polyfit is still used to implement the program.
Parameters for other channels can also be derived in a similar way.
#### Grayscale Polynomial Fitting
In this method[2], single polynomial is used for all channels.
The polynomial is still a polyfit result from the detected colors to the linear reference colors.
However, only the gray of the reference colors can participate in the calculation.
Since the detected colors corresponding to the gray of reference colors is not necessarily gray, it needs to be grayed.
Grayscale refers to the Y channel of the XYZ color space.
The color space of the detected data is not determined and cannot be converted into the XYZ space.
Therefore, the sRGB formula is used to approximate[5].
\f[
G_{s}=0.2126R_{s}+0.7152G_{s}+0.0722B_{s}
\f]
Then the polynomial parameters can be obtained by using the polyfit.
\f[
f=polyfit(G_{s}, G_{dl})
\f]
After \f$f\f$ is obtained, linearization can be performed.
#### Logarithmic Polynomial Fitting
For gamma correction formula, we take the logarithm:
\f[
ln(C_{sl})={\gamma}ln(C_s),\qquad C_s\ge0\
\f]
It can be seen that there is a linear relationship between \f$ln(C_s)\f$ and \f$ln(C_{sl})\f$. It can be considered that the formula is an approximation of a polynomial relationship, that is, there exists a polynomial \f$f\f$, which makes[2]:
\f[
ln(C_{sl})=f(ln(C_s)), \qquad C_s>0\\
C_{sl}=0, \qquad C_s=0
\f]
Because \f$exp(ln(0))\to\infty \f$, the channel whose component is 0 is directly mapped to 0 in the formula above.
For fitting channels respectively, we have:
\f[
r=polyfit(ln(R_s),ln(R_{dl}))\\
g=polyfit(ln(G_s),ln(G_{dl}))\\
b=polyfit(ln(B_s),ln(B_{dl}))\\
\f]
Note that the parameter of \f$ln(*) \f$ cannot be 0.
Therefore, we need to delete the channels whose values are 0 from \f$R_s \f$ and \f$R_{dl} \f$, \f$G_s\f$ and \f$G_{dl}\f$, \f$B_s\f$ and \f$B_{dl}\f$.
Therefore:
\f[
ln(R_{sl})=r(ln(R_s)), \qquad R_s>0\\
R_{sl}=0, \qquad R_s=0\\
ln(G_{sl})=g(ln(G_s)),\qquad G_s>0\\
G_{sl}=0, \qquad G_s=0\\
ln(B_{sl})=b(ln(B_s)),\qquad B_s>0\\
B_{sl}=0, \qquad B_s=0\\
\f]
For grayscale polynomials, there are also:
\f[
f=polyfit(ln(G_{sl}),ln(G_{dl}))
\f]
and:
\f[
ln(C_{sl})=f(ln(C_s)), \qquad C_s>0\\
C_sl=0, \qquad C_s=0
\f]
*/
enum LINEAR_TYPE
{
LINEARIZATION_IDENTITY, ///<no change is made
LINEARIZATION_GAMMA, ///<gamma correction; Need assign a value to gamma simultaneously
LINEARIZATION_COLORPOLYFIT, ///<polynomial fitting channels respectively; Need assign a value to deg simultaneously
LINEARIZATION_COLORLOGPOLYFIT, ///<logarithmic polynomial fitting channels respectively; Need assign a value to deg simultaneously
LINEARIZATION_GRAYPOLYFIT, ///<grayscale polynomial fitting; Need assign a value to deg and dst_whites simultaneously
LINEARIZATION_GRAYLOGPOLYFIT ///<grayscale Logarithmic polynomial fitting; Need assign a value to deg and dst_whites simultaneously
};
/** @brief Enum of possible functions to calculate the distance between colors.
See https://en.wikipedia.org/wiki/Color_difference for details
*/
enum DISTANCE_TYPE
{
DISTANCE_CIE76, ///<The 1976 formula is the first formula that related a measured color difference to a known set of CIELAB coordinates.
DISTANCE_CIE94_GRAPHIC_ARTS, ///<The 1976 definition was extended to address perceptual non-uniformities.
DISTANCE_CIE94_TEXTILES,
DISTANCE_CIE2000,
DISTANCE_CMC_1TO1, ///<In 1984, the Colour Measurement Committee of the Society of Dyers and Colourists defined a difference measure, also based on the L*C*h color model.
DISTANCE_CMC_2TO1,
DISTANCE_RGB, ///<Euclidean distance of rgb color space
DISTANCE_RGBL ///<Euclidean distance of rgbl color space
};
/** @brief Core class of ccm model
Produce a ColorCorrectionModel instance for inference
*/
class CV_EXPORTS_W ColorCorrectionModel
{
public:
/** @brief Color Correction Model
Supported list of color cards:
- @ref COLORCHECKER_Macbeth, the Macbeth ColorChecker
- @ref COLORCHECKER_Vinyl, the DKK ColorChecker
- @ref COLORCHECKER_DigitalSG, the DigitalSG ColorChecker with 140 squares
@param src detected colors of ColorChecker patches;\n
the color type is RGB not BGR, and the color values are in [0, 1];
@param constcolor the Built-in color card
*/
CV_WRAP ColorCorrectionModel(const Mat& src, CONST_COLOR constcolor);
/** @brief Color Correction Model
@param src detected colors of ColorChecker patches;\n
the color type is RGB not BGR, and the color values are in [0, 1];
@param colors the reference color values, the color values are in [0, 1].\n
@param ref_cs the corresponding color space
If the color type is some RGB, the format is RGB not BGR;\n
*/
CV_WRAP ColorCorrectionModel(const Mat& src, Mat colors, COLOR_SPACE ref_cs);
/** @brief Color Correction Model
@param src detected colors of ColorChecker patches;\n
the color type is RGB not BGR, and the color values are in [0, 1];
@param colors the reference color values, the color values are in [0, 1].
@param ref_cs the corresponding color space
If the color type is some RGB, the format is RGB not BGR;
@param colored mask of colored color
*/
CV_WRAP ColorCorrectionModel(const Mat& src, Mat colors, COLOR_SPACE ref_cs, Mat colored);
/** @brief set ColorSpace
@note It should be some RGB color space;
Supported list of color cards:
- @ref COLOR_SPACE_sRGB
- @ref COLOR_SPACE_AdobeRGB
- @ref COLOR_SPACE_WideGamutRGB
- @ref COLOR_SPACE_ProPhotoRGB
- @ref COLOR_SPACE_DCI_P3_RGB
- @ref COLOR_SPACE_AppleRGB
- @ref COLOR_SPACE_REC_709_RGB
- @ref COLOR_SPACE_REC_2020_RGB
@param cs the absolute color space that detected colors convert to;\n
default: @ref COLOR_SPACE_sRGB
*/
CV_WRAP void setColorSpace(COLOR_SPACE cs);
/** @brief set ccm_type
@param ccm_type the shape of color correction matrix(CCM);\n
default: @ref CCM_3x3
*/
CV_WRAP void setCCM_TYPE(CCM_TYPE ccm_type);
/** @brief set Distance
@param distance the type of color distance;\n
default: @ref DISTANCE_CIE2000
*/
CV_WRAP void setDistance(DISTANCE_TYPE distance);
/** @brief set Linear
@param linear_type the method of linearization;\n
default: @ref LINEARIZATION_GAMMA
*/
CV_WRAP void setLinear(LINEAR_TYPE linear_type);
/** @brief set Gamma
@note only valid when linear is set to "gamma";\n
@param gamma the gamma value of gamma correction;\n
default: 2.2;
*/
CV_WRAP void setLinearGamma(const double& gamma);
/** @brief set degree
@note only valid when linear is set to
- @ref LINEARIZATION_COLORPOLYFIT
- @ref LINEARIZATION_GRAYPOLYFIT
- @ref LINEARIZATION_COLORLOGPOLYFIT
- @ref LINEARIZATION_GRAYLOGPOLYFIT
@param deg the degree of linearization polynomial;\n
default: 3
*/
CV_WRAP void setLinearDegree(const int& deg);
/** @brief set SaturatedThreshold.
The colors in the closed interval [lower, upper] are reserved to participate
in the calculation of the loss function and initialization parameters
@param lower the lower threshold to determine saturation;\n
default: 0;
@param upper the upper threshold to determine saturation;\n
default: 0
*/
CV_WRAP void setSaturatedThreshold(const double& lower, const double& upper);
/** @brief set WeightsList
@param weights_list the list of weight of each color;\n
default: empty array
*/
CV_WRAP void setWeightsList(const Mat& weights_list);
/** @brief set WeightCoeff
@param weights_coeff the exponent number of L* component of the reference color in CIE Lab color space;\n
default: 0
*/
CV_WRAP void setWeightCoeff(const double& weights_coeff);
/** @brief set InitialMethod
@param initial_method_type the method of calculating CCM initial value;\n
default: INITIAL_METHOD_LEAST_SQUARE
*/
CV_WRAP void setInitialMethod(INITIAL_METHOD_TYPE initial_method_type);
/** @brief set MaxCount
@param max_count used in MinProblemSolver-DownhillSolver;\n
Terminal criteria to the algorithm;\n
default: 5000;
*/
CV_WRAP void setMaxCount(const int& max_count);
/** @brief set Epsilon
@param epsilon used in MinProblemSolver-DownhillSolver;\n
Terminal criteria to the algorithm;\n
default: 1e-4;
*/
CV_WRAP void setEpsilon(const double& epsilon);
/** @brief make color correction */
CV_WRAP void run();
CV_WRAP Mat getCCM() const;
CV_WRAP double getLoss() const;
CV_WRAP Mat get_src_rgbl() const;
CV_WRAP Mat get_dst_rgbl() const;
CV_WRAP Mat getMask() const;
CV_WRAP Mat getWeights() const;
/** @brief Infer using fitting ccm.
@param img the input image.
@param islinear default false.
@return the output array.
*/
CV_WRAP Mat infer(const Mat& img, bool islinear = false);
class Impl;
private:
std::shared_ptr<Impl> p;
};
//! @} ccm
} // namespace ccm
} // namespace cv
#endif

View File

@@ -0,0 +1,222 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
/*
* MIT License
*
* Copyright (c) 2018 Pedro Diamel Marrero Fernández
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef __OPENCV_MCC_CHECKER_DETECTOR_HPP__
#define __OPENCV_MCC_CHECKER_DETECTOR_HPP__
#include <opencv2/core.hpp>
#include "checker_model.hpp"
#include <opencv2/dnn.hpp>
//----------To view debugging output-----------------------------
//Read the tutorial on how to use debugging in this module
//It can be found in the documentation of 'mcc' modules,
//Then uncomment the following line to view debugging output
//---------------------------------------------------------------
// #define MCC_DEBUG
//---------------------------------------------------------------
namespace cv
{
namespace mcc
{
//! @addtogroup mcc
//! @{
/**
* @brief Parameters for the detectMarker process:
* - int adaptiveThreshWinSizeMin : minimum window size for adaptive
* thresholding before finding contours
* (default 23).
* - int adaptiveThreshWinSizeMax : maximum window size for adaptive
* thresholding before finding contours
* (default 153).
* - int adaptiveThreshWinSizeStep : increments from adaptiveThreshWinSizeMin to
* adaptiveThreshWinSizeMax during the
* thresholding (default 16).
* - double adaptiveThreshConstant : constant for adaptive thresholding before
* finding contours (default 7)
* - double minContoursAreaRate : determine minimum area for marker contour to
* be detected. This is defined as a rate respect
* to the area of the input image. Used only if
* neural network is used (default 0.003).
* - double minContoursArea : determine minimum area for marker contour to be
* detected. This is defined as the actual area. Used
* only if neural network is not used (default 100).
* - double confidenceThreshold : minimum confidence for a bounding box detected
* by neural network to classify as
* detection.(default 0.5)
* (0<=confidenceThreshold<=1)
* - double minContourSolidity : minimum solidity of a contour for it be
* detected as a square in the chart. (default
* 0.9).
* - double findCandidatesApproxPolyDPEpsMultiplier : multipler to be used in
* cv::ApproxPolyDP function
* (default 0.05)
* - int borderWidth : width of the padding used to pass the inital neural
* network detection in the succeeding system.(default 0)
* - float B0factor : distance between two neighbours squares of the same chart.
* Defined as the ratio between distance and large dimension
* of square (default 1.25)
* - float maxError : maximum allowed error in the detection of a chart.
* default(0.1)
* - int minContourPointsAllowed : minium points in a detected contour.
* default(4)
* - int minContourLengthAllowed : minimum length of a countour. default(100)
* - int minInterContourDistance : minimum distance between two contours.
* default(100)
* - int minInterCheckerDistance : minimum distance between two checkers.
* default(10000)
* - int minImageSize : minimum size of the smaller dimension of the image.
* default(1000)
* - unsigned minGroupSize : minimum number of a squared of a chart that must be
* detected. default(4)
*/
struct CV_EXPORTS_W DetectorParameters
{
DetectorParameters();
CV_WRAP static Ptr<DetectorParameters> create();
CV_PROP_RW int adaptiveThreshWinSizeMin;
CV_PROP_RW int adaptiveThreshWinSizeMax;
CV_PROP_RW int adaptiveThreshWinSizeStep;
CV_PROP_RW double adaptiveThreshConstant;
CV_PROP_RW double minContoursAreaRate;
CV_PROP_RW double minContoursArea;
CV_PROP_RW double confidenceThreshold;
CV_PROP_RW double minContourSolidity;
CV_PROP_RW double findCandidatesApproxPolyDPEpsMultiplier;
CV_PROP_RW int borderWidth;
CV_PROP_RW float B0factor;
CV_PROP_RW float maxError;
CV_PROP_RW int minContourPointsAllowed;
CV_PROP_RW int minContourLengthAllowed;
CV_PROP_RW int minInterContourDistance;
CV_PROP_RW int minInterCheckerDistance;
CV_PROP_RW int minImageSize;
CV_PROP_RW unsigned minGroupSize;
};
/** @brief A class to find the positions of the ColorCharts in the image.
*/
class CV_EXPORTS_W CCheckerDetector : public Algorithm
{
public:
/** \brief Set the net which will be used to find the approximate
* bounding boxes for the color charts.
*
* It is not necessary to use this, but this usually results in
* better detection rate.
*
* \param net the neural network, if the network in empty, then
* the function will return false.
* \return true if it was able to set the detector's network,
* false otherwise.
*/
CV_WRAP virtual bool setNet(dnn::Net net) = 0;
/** \brief Find the ColorCharts in the given image.
*
* The found charts are not returned but instead stored in the
* detector, these can be accessed later on using getBestColorChecker()
* and getListColorChecker()
* \param image image in color space BGR
* \param chartType type of the chart to detect
* \param regionsOfInterest regions of image to look for the chart, if
* it is empty, charts are looked for in the
* entire image
* \param nc number of charts in the image, if you don't know the exact
* then keeping this number high helps.
* \param useNet if it is true the network provided using the setNet()
* is used for preliminary search for regions where chart
* could be present, inside the regionsOfInterest provied.
* \param params parameters of the detection system. More information
* about them can be found in the struct DetectorParameters.
* \return true if atleast one chart is detected otherwise false
*/
CV_WRAP_AS(processWithROI) virtual bool
process(InputArray image, const TYPECHART chartType,
const std::vector<Rect> &regionsOfInterest,
const int nc = 1, bool useNet = false,
const Ptr<DetectorParameters> &params = DetectorParameters::create()) = 0;
/** \brief Find the ColorCharts in the given image.
*
* Differs from the above one only in the arguments.
*
* This version searches for the chart in the full image.
*
* The found charts are not returned but instead stored in the
* detector, these can be accessed later on using getBestColorChecker()
* and getListColorChecker()
* \param image image in color space BGR
* \param chartType type of the chart to detect
* \param nc number of charts in the image, if you don't know the exact
* then keeping this number high helps.
* \param useNet if it is true the network provided using the setNet()
* is used for preliminary search for regions where chart
* could be present, inside the regionsOfInterest provied.
* \param params parameters of the detection system. More information
* about them can be found in the struct DetectorParameters.
* \return true if atleast one chart is detected otherwise false
*/
CV_WRAP virtual bool
process(InputArray image, const TYPECHART chartType,
const int nc = 1, bool useNet = false,
const Ptr<DetectorParameters> &params = DetectorParameters::create()) = 0;
/** \brief Get the best color checker. By the best it means the one
* detected with the highest confidence.
* \return checker A single colorchecker, if atleast one colorchecker
* was detected, 'nullptr' otherwise.
*/
CV_WRAP virtual Ptr<mcc::CChecker> getBestColorChecker() = 0;
/** \brief Get the list of all detected colorcheckers
* \return checkers vector of colorcheckers
*/
CV_WRAP virtual std::vector<Ptr<CChecker>> getListColorChecker() = 0;
/** \brief Returns the implementation of the CCheckerDetector.
*
*/
CV_WRAP static Ptr<CCheckerDetector> create();
};
//! @} mcc
} // namespace mcc
} // namespace cv
#endif

View File

@@ -0,0 +1,139 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
/*
* MIT License
*
* Copyright (c) 2018 Pedro Diamel Marrero Fernández
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef __OPENCV_MCC_CHECKER_MODEL_HPP__
#define __OPENCV_MCC_CHECKER_MODEL_HPP__
#include <opencv2/core.hpp>
#include <opencv2/imgproc.hpp>
namespace cv
{
namespace mcc
{
//! @addtogroup mcc
//! @{
/** TYPECHART
*
* \brief enum to hold the type of the checker
*
*/
enum TYPECHART
{
MCC24 = 0, ///< Standard Macbeth Chart with 24 squares
SG140, ///< DigitalSG with 140 squares
VINYL18, ///< DKK color chart with 12 squares and 6 rectangle
};
/** CChecker
*
* \brief checker object
*
* This class contains the information about the detected checkers,i.e, their
* type, the corners of the chart, the color profile, the cost, centers chart,
* etc.
*
*/
class CV_EXPORTS_W CChecker
{
public:
CChecker() {}
virtual ~CChecker() {}
/** \brief Create a new CChecker object.
* \return A pointer to the implementation of the CChecker
*/
CV_WRAP static Ptr<CChecker> create();
public:
// CV_PROP_RW TYPECHART target; ///< type of checkercolor
// CV_PROP_RW std::vector<cv::Point2f> box; ///< positions of the corners
// CV_PROP_RW cv::Mat charts_rgb; ///< charts profile in rgb color space
// CV_PROP_RW cv::Mat charts_ycbcr; ///< charts profile in YCbCr color space
// CV_PROP_RW float cost; ///< cost to aproximate
// CV_PROP_RW cv::Point2f center; ///< center of the chart.
CV_WRAP virtual void setTarget(TYPECHART _target) = 0;
CV_WRAP virtual void setBox(std::vector<Point2f> _box) = 0;
CV_WRAP virtual void setChartsRGB(Mat _chartsRGB) = 0;
CV_WRAP virtual void setChartsYCbCr(Mat _chartsYCbCr) = 0;
CV_WRAP virtual void setCost(float _cost) = 0;
CV_WRAP virtual void setCenter(Point2f _center) = 0;
CV_WRAP virtual TYPECHART getTarget() = 0;
CV_WRAP virtual std::vector<Point2f> getBox() = 0;
CV_WRAP virtual Mat getChartsRGB() = 0;
CV_WRAP virtual Mat getChartsYCbCr() = 0;
CV_WRAP virtual float getCost() = 0;
CV_WRAP virtual Point2f getCenter() = 0;
};
/** \brief checker draw
*
* This class contains the functions for drawing a detected chart. This class
* expects a pointer to the checker which will be drawn by this object in the
* constructor and then later on whenever the draw function is called the
* checker will be drawn. Remember that it is not possible to change the
* checkers which will be draw by a given object, as it is decided in the
* constructor itself. If you want to draw some other object you can create a
* new CCheckerDraw instance.
*
* The reason for this type of design is that in some videos we can assume that
* the checker is always in the same position, even if the image changes, so
* the drawing will always take place at the same position.
*/
class CV_EXPORTS_W CCheckerDraw
{
public:
virtual ~CCheckerDraw() {}
/** \brief Draws the checker to the given image.
* \param img image in color space BGR
* \return void
*/
CV_WRAP virtual void draw(InputOutputArray img) = 0;
/** \brief Create a new CCheckerDraw object.
* \param pChecker The checker which will be drawn by this object.
* \param color The color by with which the squares of the checker
* will be drawn
* \param thickness The thickness with which the sqaures will be
* drawn
* \return A pointer to the implementation of the CCheckerDraw
*/
CV_WRAP static Ptr<CCheckerDraw> create(Ptr<CChecker> pChecker,
cv::Scalar color = CV_RGB(0, 250, 0),
int thickness = 2);
};
//! @} mcc
} // namespace mcc
} // namespace cv
#endif