添加项目文件。

This commit is contained in:
CaiXiang
2025-06-09 09:09:25 +08:00
parent 75b909652e
commit 88acb23465
1054 changed files with 615623 additions and 0 deletions

View File

@@ -0,0 +1,186 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef __OPENCV_BM3D_IMAGE_DENOISING_HPP__
#define __OPENCV_BM3D_IMAGE_DENOISING_HPP__
/** @file
@date Jul 19, 2016
@author Bartek Pawlik
*/
#include <opencv2/core.hpp>
namespace cv
{
namespace xphoto
{
//! @addtogroup xphoto
//! @{
//! BM3D transform types
enum TransformTypes
{
/** Un-normalized Haar transform */
HAAR = 0
};
//! BM3D algorithm steps
enum Bm3dSteps
{
/** Execute all steps of the algorithm */
BM3D_STEPALL = 0,
/** Execute only first step of the algorithm */
BM3D_STEP1 = 1,
/** Execute only second step of the algorithm */
BM3D_STEP2 = 2
};
/** @brief Performs image denoising using the Block-Matching and 3D-filtering algorithm
<http://www.cs.tut.fi/~foi/GCF-BM3D/BM3D_TIP_2007.pdf> with several computational
optimizations. Noise expected to be a gaussian white noise.
@param src Input 8-bit or 16-bit 1-channel image.
@param dstStep1 Output image of the first step of BM3D with the same size and type as src.
@param dstStep2 Output image of the second step of BM3D with the same size and type as src.
@param h Parameter regulating filter strength. Big h value perfectly removes noise but also
removes image details, smaller h value preserves details but also preserves some noise.
@param templateWindowSize Size in pixels of the template patch that is used for block-matching.
Should be power of 2.
@param searchWindowSize Size in pixels of the window that is used to perform block-matching.
Affect performance linearly: greater searchWindowsSize - greater denoising time.
Must be larger than templateWindowSize.
@param blockMatchingStep1 Block matching threshold for the first step of BM3D (hard thresholding),
i.e. maximum distance for which two blocks are considered similar.
Value expressed in euclidean distance.
@param blockMatchingStep2 Block matching threshold for the second step of BM3D (Wiener filtering),
i.e. maximum distance for which two blocks are considered similar.
Value expressed in euclidean distance.
@param groupSize Maximum size of the 3D group for collaborative filtering.
@param slidingStep Sliding step to process every next reference block.
@param beta Kaiser window parameter that affects the sidelobe attenuation of the transform of the
window. Kaiser window is used in order to reduce border effects. To prevent usage of the window,
set beta to zero.
@param normType Norm used to calculate distance between blocks. L2 is slower than L1
but yields more accurate results.
@param step Step of BM3D to be executed. Possible variants are: step 1, step 2, both steps.
@param transformType Type of the orthogonal transform used in collaborative filtering step.
Currently only Haar transform is supported.
This function expected to be applied to grayscale images. Advanced usage of this function
can be manual denoising of colored image in different colorspaces.
@sa
fastNlMeansDenoising
*/
CV_EXPORTS_W void bm3dDenoising(
InputArray src,
InputOutputArray dstStep1,
OutputArray dstStep2,
float h = 1,
int templateWindowSize = 4,
int searchWindowSize = 16,
int blockMatchingStep1 = 2500,
int blockMatchingStep2 = 400,
int groupSize = 8,
int slidingStep = 1,
float beta = 2.0f,
int normType = cv::NORM_L2,
int step = cv::xphoto::BM3D_STEPALL,
int transformType = cv::xphoto::HAAR);
/** @brief Performs image denoising using the Block-Matching and 3D-filtering algorithm
<http://www.cs.tut.fi/~foi/GCF-BM3D/BM3D_TIP_2007.pdf> with several computational
optimizations. Noise expected to be a gaussian white noise.
@param src Input 8-bit or 16-bit 1-channel image.
@param dst Output image with the same size and type as src.
@param h Parameter regulating filter strength. Big h value perfectly removes noise but also
removes image details, smaller h value preserves details but also preserves some noise.
@param templateWindowSize Size in pixels of the template patch that is used for block-matching.
Should be power of 2.
@param searchWindowSize Size in pixels of the window that is used to perform block-matching.
Affect performance linearly: greater searchWindowsSize - greater denoising time.
Must be larger than templateWindowSize.
@param blockMatchingStep1 Block matching threshold for the first step of BM3D (hard thresholding),
i.e. maximum distance for which two blocks are considered similar.
Value expressed in euclidean distance.
@param blockMatchingStep2 Block matching threshold for the second step of BM3D (Wiener filtering),
i.e. maximum distance for which two blocks are considered similar.
Value expressed in euclidean distance.
@param groupSize Maximum size of the 3D group for collaborative filtering.
@param slidingStep Sliding step to process every next reference block.
@param beta Kaiser window parameter that affects the sidelobe attenuation of the transform of the
window. Kaiser window is used in order to reduce border effects. To prevent usage of the window,
set beta to zero.
@param normType Norm used to calculate distance between blocks. L2 is slower than L1
but yields more accurate results.
@param step Step of BM3D to be executed. Allowed are only BM3D_STEP1 and BM3D_STEPALL.
BM3D_STEP2 is not allowed as it requires basic estimate to be present.
@param transformType Type of the orthogonal transform used in collaborative filtering step.
Currently only Haar transform is supported.
This function expected to be applied to grayscale images. Advanced usage of this function
can be manual denoising of colored image in different colorspaces.
@sa
fastNlMeansDenoising
*/
CV_EXPORTS_W void bm3dDenoising(
InputArray src,
OutputArray dst,
float h = 1,
int templateWindowSize = 4,
int searchWindowSize = 16,
int blockMatchingStep1 = 2500,
int blockMatchingStep2 = 400,
int groupSize = 8,
int slidingStep = 1,
float beta = 2.0f,
int normType = cv::NORM_L2,
int step = cv::xphoto::BM3D_STEPALL,
int transformType = cv::xphoto::HAAR);
//! @}
}
}
#endif // __OPENCV_BM3D_IMAGE_DENOISING_HPP__

View File

@@ -0,0 +1,79 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef __OPENCV_DCT_IMAGE_DENOISING_HPP__
#define __OPENCV_DCT_IMAGE_DENOISING_HPP__
/** @file
@date Jun 26, 2014
@author Yury Gitman
*/
#include <opencv2/core.hpp>
namespace cv
{
namespace xphoto
{
//! @addtogroup xphoto
//! @{
/** @brief The function implements simple dct-based denoising
<http://www.ipol.im/pub/art/2011/ys-dct/>.
@param src source image
@param dst destination image
@param sigma expected noise standard deviation
@param psize size of block side where dct is computed
@sa
fastNlMeansDenoising
*/
CV_EXPORTS_W void dctDenoising(const Mat &src, Mat &dst, const double sigma, const int psize = 16);
//! @}
}
}
#endif // __OPENCV_DCT_IMAGE_DENOISING_HPP__

View File

@@ -0,0 +1,120 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
// (3-clause BSD License)
//
// Copyright (C) 2000-2019, Intel Corporation, all rights reserved.
// Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved.
// Copyright (C) 2009-2016, NVIDIA Corporation, all rights reserved.
// Copyright (C) 2010-2013, Advanced Micro Devices, Inc., all rights reserved.
// Copyright (C) 2015-2016, OpenCV Foundation, all rights reserved.
// Copyright (C) 2015-2016, Itseez Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * Neither the names of the copyright holders nor the names of the contributors
// may be used to endorse or promote products derived from this software
// without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef __OPENCV_INPAINTING_HPP__
#define __OPENCV_INPAINTING_HPP__
/** @file
@date Jul 22, 2014
@author Yury Gitman
*/
#include <opencv2/core.hpp>
namespace cv
{
namespace xphoto
{
//! @addtogroup xphoto
//! @{
//! @brief Various inpainting algorithms
//! @sa inpaint
enum InpaintTypes
{
/** This algorithm searches for dominant correspondences (transformations) of
image patches and tries to seamlessly fill-in the area to be inpainted using this
transformations */
INPAINT_SHIFTMAP = 0,
/** Performs Frequency Selective Reconstruction (FSR).
One of the two quality profiles BEST and FAST can be chosen, depending on the time available for reconstruction.
See @cite GenserPCS2018 and @cite SeilerTIP2015 for details.
The algorithm may be utilized for the following areas of application:
1. %Error Concealment (Inpainting).
The sampling mask indicates the missing pixels of the distorted input
image to be reconstructed.
2. Non-Regular Sampling.
For more information on how to choose a good sampling mask, please review
@cite GroscheICIP2018 and @cite GroscheIST2018.
1-channel grayscale or 3-channel BGR image are accepted.
Conventional accepted ranges:
- 0-255 for CV_8U
- 0-65535 for CV_16U
- 0-1 for CV_32F/CV_64F.
*/
INPAINT_FSR_BEST = 1,
INPAINT_FSR_FAST = 2 //!< See #INPAINT_FSR_BEST
};
/** @brief The function implements different single-image inpainting algorithms.
See the original papers @cite He2012 (Shiftmap) or @cite GenserPCS2018 and @cite SeilerTIP2015 (FSR) for details.
@param src source image
- #INPAINT_SHIFTMAP: it could be of any type and any number of channels from 1 to 4. In case of
3- and 4-channels images the function expect them in CIELab colorspace or similar one, where first
color component shows intensity, while second and third shows colors. Nonetheless you can try any
colorspaces.
- #INPAINT_FSR_BEST or #INPAINT_FSR_FAST: 1-channel grayscale or 3-channel BGR image.
@param mask mask (#CV_8UC1), where non-zero pixels indicate valid image area, while zero pixels
indicate area to be inpainted
@param dst destination image
@param algorithmType see xphoto::InpaintTypes
*/
CV_EXPORTS_W void inpaint(const Mat &src, const Mat &mask, Mat &dst, const int algorithmType);
//! @}
}
}
#endif // __OPENCV_INPAINTING_HPP__

View File

@@ -0,0 +1,41 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
#ifndef __OPENCV_OIL_PAINTING_HPP__
#define __OPENCV_OIL_PAINTING_HPP__
#include <opencv2/core.hpp>
#include <opencv2/imgproc.hpp>
namespace cv
{
namespace xphoto
{
//! @addtogroup xphoto
//! @{
/** @brief oilPainting
See the book @cite Holzmann1988 for details.
@param src Input three-channel or one channel image (either CV_8UC3 or CV_8UC1)
@param dst Output image of the same size and type as src.
@param size neighbouring size is 2-size+1
@param dynRatio image is divided by dynRatio before histogram processing
@param code color space conversion code(see ColorConversionCodes). Histogram will used only first plane
*/
CV_EXPORTS_W void oilPainting(InputArray src, OutputArray dst, int size, int dynRatio, int code);
/** @brief oilPainting
See the book @cite Holzmann1988 for details.
@param src Input three-channel or one channel image (either CV_8UC3 or CV_8UC1)
@param dst Output image of the same size and type as src.
@param size neighbouring size is 2-size+1
@param dynRatio image is divided by dynRatio before histogram processing
*/
CV_EXPORTS_W void oilPainting(InputArray src, OutputArray dst, int size, int dynRatio);
//! @}
}
}
#endif // __OPENCV_OIL_PAINTING_HPP__

View File

@@ -0,0 +1,56 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
#ifndef OPENCV_XPHOTO_TONEMAP_HPP
#define OPENCV_XPHOTO_TONEMAP_HPP
#include "opencv2/photo.hpp"
namespace cv { namespace xphoto {
//! @addtogroup xphoto
//! @{
/** @brief This algorithm decomposes image into two layers: base layer and detail layer using bilateral filter
and compresses contrast of the base layer thus preserving all the details.
This implementation uses regular bilateral filter from OpenCV.
Saturation enhancement is possible as in cv::TonemapDrago.
For more information see @cite DD02 .
*/
class CV_EXPORTS_W TonemapDurand : public Tonemap
{
public:
CV_WRAP virtual float getSaturation() const = 0;
CV_WRAP virtual void setSaturation(float saturation) = 0;
CV_WRAP virtual float getContrast() const = 0;
CV_WRAP virtual void setContrast(float contrast) = 0;
CV_WRAP virtual float getSigmaSpace() const = 0;
CV_WRAP virtual void setSigmaSpace(float sigma_space) = 0;
CV_WRAP virtual float getSigmaColor() const = 0;
CV_WRAP virtual void setSigmaColor(float sigma_color) = 0;
};
/** @brief Creates TonemapDurand object
You need to set the OPENCV_ENABLE_NONFREE option in cmake to use those. Use them at your own risk.
@param gamma gamma value for gamma correction. See createTonemap
@param contrast resulting contrast on logarithmic scale, i. e. log(max / min), where max and min
are maximum and minimum luminance values of the resulting image.
@param saturation saturation enhancement value. See createTonemapDrago
@param sigma_color bilateral filter sigma in color space
@param sigma_space bilateral filter sigma in coordinate space
*/
CV_EXPORTS_W Ptr<TonemapDurand>
createTonemapDurand(float gamma = 1.0f, float contrast = 4.0f, float saturation = 1.0f, float sigma_color = 2.0f, float sigma_space = 2.0f);
}} // namespace
#endif // OPENCV_XPHOTO_TONEMAP_HPP

View File

@@ -0,0 +1,230 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef __OPENCV_SIMPLE_COLOR_BALANCE_HPP__
#define __OPENCV_SIMPLE_COLOR_BALANCE_HPP__
/** @file
@date Jun 26, 2014
@author Yury Gitman
*/
#include <opencv2/core.hpp>
namespace cv
{
namespace xphoto
{
//! @addtogroup xphoto
//! @{
/** @brief The base class for auto white balance algorithms.
*/
class CV_EXPORTS_W WhiteBalancer : public Algorithm
{
public:
/** @brief Applies white balancing to the input image
@param src Input image
@param dst White balancing result
@sa cvtColor, equalizeHist
*/
CV_WRAP virtual void balanceWhite(InputArray src, OutputArray dst) = 0;
};
/** @brief A simple white balance algorithm that works by independently stretching
each of the input image channels to the specified range. For increased robustness
it ignores the top and bottom \f$p\%\f$ of pixel values.
*/
class CV_EXPORTS_W SimpleWB : public WhiteBalancer
{
public:
/** @brief Input image range minimum value
@see setInputMin */
CV_WRAP virtual float getInputMin() const = 0;
/** @copybrief getInputMin @see getInputMin */
CV_WRAP virtual void setInputMin(float val) = 0;
/** @brief Input image range maximum value
@see setInputMax */
CV_WRAP virtual float getInputMax() const = 0;
/** @copybrief getInputMax @see getInputMax */
CV_WRAP virtual void setInputMax(float val) = 0;
/** @brief Output image range minimum value
@see setOutputMin */
CV_WRAP virtual float getOutputMin() const = 0;
/** @copybrief getOutputMin @see getOutputMin */
CV_WRAP virtual void setOutputMin(float val) = 0;
/** @brief Output image range maximum value
@see setOutputMax */
CV_WRAP virtual float getOutputMax() const = 0;
/** @copybrief getOutputMax @see getOutputMax */
CV_WRAP virtual void setOutputMax(float val) = 0;
/** @brief Percent of top/bottom values to ignore
@see setP */
CV_WRAP virtual float getP() const = 0;
/** @copybrief getP @see getP */
CV_WRAP virtual void setP(float val) = 0;
};
/** @brief Creates an instance of SimpleWB
*/
CV_EXPORTS_W Ptr<SimpleWB> createSimpleWB();
/** @brief Gray-world white balance algorithm
This algorithm scales the values of pixels based on a
gray-world assumption which states that the average of all channels
should result in a gray image.
It adds a modification which thresholds pixels based on their
saturation value and only uses pixels below the provided threshold in
finding average pixel values.
Saturation is calculated using the following for a 3-channel RGB image per
pixel I and is in the range [0, 1]:
\f[ \texttt{Saturation} [I] = \frac{\textrm{max}(R,G,B) - \textrm{min}(R,G,B)
}{\textrm{max}(R,G,B)} \f]
A threshold of 1 means that all pixels are used to white-balance, while a
threshold of 0 means no pixels are used. Lower thresholds are useful in
white-balancing saturated images.
Currently supports images of type @ref CV_8UC3 and @ref CV_16UC3.
*/
class CV_EXPORTS_W GrayworldWB : public WhiteBalancer
{
public:
/** @brief Maximum saturation for a pixel to be included in the
gray-world assumption
@see setSaturationThreshold */
CV_WRAP virtual float getSaturationThreshold() const = 0;
/** @copybrief getSaturationThreshold @see getSaturationThreshold */
CV_WRAP virtual void setSaturationThreshold(float val) = 0;
};
/** @brief Creates an instance of GrayworldWB
*/
CV_EXPORTS_W Ptr<GrayworldWB> createGrayworldWB();
/** @brief More sophisticated learning-based automatic white balance algorithm.
As @ref GrayworldWB, this algorithm works by applying different gains to the input
image channels, but their computation is a bit more involved compared to the
simple gray-world assumption. More details about the algorithm can be found in
@cite Cheng2015 .
To mask out saturated pixels this function uses only pixels that satisfy the
following condition:
\f[ \frac{\textrm{max}(R,G,B)}{\texttt{range_max_val}} < \texttt{saturation_thresh} \f]
Currently supports images of type @ref CV_8UC3 and @ref CV_16UC3.
*/
class CV_EXPORTS_W LearningBasedWB : public WhiteBalancer
{
public:
/** @brief Implements the feature extraction part of the algorithm.
In accordance with @cite Cheng2015 , computes the following features for the input image:
1. Chromaticity of an average (R,G,B) tuple
2. Chromaticity of the brightest (R,G,B) tuple (while ignoring saturated pixels)
3. Chromaticity of the dominant (R,G,B) tuple (the one that has the highest value in the RGB histogram)
4. Mode of the chromaticity palette, that is constructed by taking 300 most common colors according to
the RGB histogram and projecting them on the chromaticity plane. Mode is the most high-density point
of the palette, which is computed by a straightforward fixed-bandwidth kernel density estimator with
a Epanechnikov kernel function.
@param src Input three-channel image (BGR color space is assumed).
@param dst An array of four (r,g) chromaticity tuples corresponding to the features listed above.
*/
CV_WRAP virtual void extractSimpleFeatures(InputArray src, OutputArray dst) = 0;
/** @brief Maximum possible value of the input image (e.g. 255 for 8 bit images,
4095 for 12 bit images)
@see setRangeMaxVal */
CV_WRAP virtual int getRangeMaxVal() const = 0;
/** @copybrief getRangeMaxVal @see getRangeMaxVal */
CV_WRAP virtual void setRangeMaxVal(int val) = 0;
/** @brief Threshold that is used to determine saturated pixels, i.e. pixels where at least one of the
channels exceeds \f$\texttt{saturation_threshold}\times\texttt{range_max_val}\f$ are ignored.
@see setSaturationThreshold */
CV_WRAP virtual float getSaturationThreshold() const = 0;
/** @copybrief getSaturationThreshold @see getSaturationThreshold */
CV_WRAP virtual void setSaturationThreshold(float val) = 0;
/** @brief Defines the size of one dimension of a three-dimensional RGB histogram that is used internally
by the algorithm. It often makes sense to increase the number of bins for images with higher bit depth
(e.g. 256 bins for a 12 bit image).
@see setHistBinNum */
CV_WRAP virtual int getHistBinNum() const = 0;
/** @copybrief getHistBinNum @see getHistBinNum */
CV_WRAP virtual void setHistBinNum(int val) = 0;
};
/** @brief Creates an instance of LearningBasedWB
@param path_to_model Path to a .yml file with the model. If not specified, the default model is used
*/
CV_EXPORTS_W Ptr<LearningBasedWB> createLearningBasedWB(const String& path_to_model = String());
/** @brief Implements an efficient fixed-point approximation for applying channel gains, which is
the last step of multiple white balance algorithms.
@param src Input three-channel image in the BGR color space (either CV_8UC3 or CV_16UC3)
@param dst Output image of the same size and type as src.
@param gainB gain for the B channel
@param gainG gain for the G channel
@param gainR gain for the R channel
*/
CV_EXPORTS_W void applyChannelGains(InputArray src, OutputArray dst, float gainB, float gainG, float gainR);
//! @}
}
}
#endif // __OPENCV_SIMPLE_COLOR_BALANCE_HPP__