添加项目文件。

This commit is contained in:
CaiXiang
2025-06-09 09:09:25 +08:00
parent 75b909652e
commit 88acb23465
1054 changed files with 615623 additions and 0 deletions

View File

@@ -0,0 +1,375 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef __OPENCV_TEXT_ERFILTER_HPP__
#define __OPENCV_TEXT_ERFILTER_HPP__
#include "opencv2/core.hpp"
#include <vector>
#include <deque>
#include <string>
namespace cv
{
namespace text
{
//! @addtogroup text_detect
//! @{
/** @brief The ERStat structure represents a class-specific Extremal Region (ER).
An ER is a 4-connected set of pixels with all its grey-level values smaller than the values in its
outer boundary. A class-specific ER is selected (using a classifier) from all the ER's in the
component tree of the image. :
*/
struct CV_EXPORTS ERStat
{
public:
//! Constructor
explicit ERStat(int level = 256, int pixel = 0, int x = 0, int y = 0);
//! Destructor
~ERStat() { }
//! seed point and the threshold (max grey-level value)
int pixel;
int level;
//! incrementally computable features
int area;
int perimeter;
int euler; //!< Euler's number
Rect rect;
double raw_moments[2]; //!< order 1 raw moments to derive the centroid
double central_moments[3]; //!< order 2 central moments to construct the covariance matrix
Ptr<std::deque<int> > crossings;//!< horizontal crossings
float med_crossings; //!< median of the crossings at three different height levels
//! 2nd stage features
float hole_area_ratio;
float convex_hull_ratio;
float num_inflexion_points;
// TODO Other features can be added (average color, standard deviation, and such)
// TODO shall we include the pixel list whenever available (i.e. after 2nd stage) ?
std::vector<int> *pixels;
//! probability that the ER belongs to the class we are looking for
double probability;
//! pointers preserving the tree structure of the component tree
ERStat* parent;
ERStat* child;
ERStat* next;
ERStat* prev;
//! whenever the regions is a local maxima of the probability
bool local_maxima;
ERStat* max_probability_ancestor;
ERStat* min_probability_ancestor;
};
/** @brief Base class for 1st and 2nd stages of Neumann and Matas scene text detection algorithm @cite Neumann12. :
Extracts the component tree (if needed) and filter the extremal regions (ER's) by using a given classifier.
*/
class CV_EXPORTS_W ERFilter : public Algorithm
{
public:
/** @brief Callback with the classifier is made a class.
By doing it we hide SVM, Boost etc. Developers can provide their own classifiers to the
ERFilter algorithm.
*/
class CV_EXPORTS_W Callback
{
public:
virtual ~Callback() { }
/** @brief The classifier must return probability measure for the region.
@param stat : The region to be classified
*/
virtual double eval(const ERStat& stat) = 0; //const = 0; //TODO why cannot use const = 0 here?
};
/** @brief The key method of ERFilter algorithm.
Takes image on input and returns the selected regions in a vector of ERStat only distinctive
ERs which correspond to characters are selected by a sequential classifier
@param image Single channel image CV_8UC1
@param regions Output for the 1st stage and Input/Output for the 2nd. The selected Extremal Regions
are stored here.
Extracts the component tree (if needed) and filter the extremal regions (ER's) by using a given
classifier.
*/
virtual void run( InputArray image, std::vector<ERStat>& regions ) = 0;
//! set/get methods to set the algorithm properties,
virtual void setCallback(const Ptr<ERFilter::Callback>& cb) = 0;
virtual void setThresholdDelta(int thresholdDelta) = 0;
virtual void setMinArea(float minArea) = 0;
virtual void setMaxArea(float maxArea) = 0;
virtual void setMinProbability(float minProbability) = 0;
virtual void setMinProbabilityDiff(float minProbabilityDiff) = 0;
virtual void setNonMaxSuppression(bool nonMaxSuppression) = 0;
virtual int getNumRejected() const = 0;
};
/** @brief Create an Extremal Region Filter for the 1st stage classifier of N&M algorithm @cite Neumann12.
@param cb : Callback with the classifier. Default classifier can be implicitly load with function
loadClassifierNM1, e.g. from file in samples/cpp/trained_classifierNM1.xml
@param thresholdDelta : Threshold step in subsequent thresholds when extracting the component tree
@param minArea : The minimum area (% of image size) allowed for retreived ER's
@param maxArea : The maximum area (% of image size) allowed for retreived ER's
@param minProbability : The minimum probability P(er|character) allowed for retreived ER's
@param nonMaxSuppression : Whenever non-maximum suppression is done over the branch probabilities
@param minProbabilityDiff : The minimum probability difference between local maxima and local minima ERs
The component tree of the image is extracted by a threshold increased step by step from 0 to 255,
incrementally computable descriptors (aspect_ratio, compactness, number of holes, and number of
horizontal crossings) are computed for each ER and used as features for a classifier which estimates
the class-conditional probability P(er|character). The value of P(er|character) is tracked using the
inclusion relation of ER across all thresholds and only the ERs which correspond to local maximum of
the probability P(er|character) are selected (if the local maximum of the probability is above a
global limit pmin and the difference between local maximum and local minimum is greater than
minProbabilityDiff).
*/
CV_EXPORTS_W Ptr<ERFilter> createERFilterNM1(const Ptr<ERFilter::Callback>& cb,
int thresholdDelta = 1, float minArea = (float)0.00025,
float maxArea = (float)0.13, float minProbability = (float)0.4,
bool nonMaxSuppression = true,
float minProbabilityDiff = (float)0.1);
/** @brief Create an Extremal Region Filter for the 2nd stage classifier of N&M algorithm @cite Neumann12.
@param cb : Callback with the classifier. Default classifier can be implicitly load with function
loadClassifierNM2, e.g. from file in samples/cpp/trained_classifierNM2.xml
@param minProbability : The minimum probability P(er|character) allowed for retreived ER's
In the second stage, the ERs that passed the first stage are classified into character and
non-character classes using more informative but also more computationally expensive features. The
classifier uses all the features calculated in the first stage and the following additional
features: hole area ratio, convex hull ratio, and number of outer inflexion points.
*/
CV_EXPORTS_W Ptr<ERFilter> createERFilterNM2(const Ptr<ERFilter::Callback>& cb,
float minProbability = (float)0.3);
/** @brief Reads an Extremal Region Filter for the 1st stage classifier of N&M algorithm
from the provided path e.g. /path/to/cpp/trained_classifierNM1.xml
@overload
*/
CV_EXPORTS_W Ptr<ERFilter> createERFilterNM1(const String& filename,
int thresholdDelta = 1, float minArea = (float)0.00025,
float maxArea = (float)0.13, float minProbability = (float)0.4,
bool nonMaxSuppression = true,
float minProbabilityDiff = (float)0.1);
/** @brief Reads an Extremal Region Filter for the 2nd stage classifier of N&M algorithm
from the provided path e.g. /path/to/cpp/trained_classifierNM2.xml
@overload
*/
CV_EXPORTS_W Ptr<ERFilter> createERFilterNM2(const String& filename,
float minProbability = (float)0.3);
/** @brief Allow to implicitly load the default classifier when creating an ERFilter object.
@param filename The XML or YAML file with the classifier model (e.g. trained_classifierNM1.xml)
returns a pointer to ERFilter::Callback.
*/
CV_EXPORTS_W Ptr<ERFilter::Callback> loadClassifierNM1(const String& filename);
/** @brief Allow to implicitly load the default classifier when creating an ERFilter object.
@param filename The XML or YAML file with the classifier model (e.g. trained_classifierNM2.xml)
returns a pointer to ERFilter::Callback.
*/
CV_EXPORTS_W Ptr<ERFilter::Callback> loadClassifierNM2(const String& filename);
//! computeNMChannels operation modes
enum { ERFILTER_NM_RGBLGrad,
ERFILTER_NM_IHSGrad
};
/** @brief Compute the different channels to be processed independently in the N&M algorithm @cite Neumann12.
@param _src Source image. Must be RGB CV_8UC3.
@param _channels Output vector\<Mat\> where computed channels are stored.
@param _mode Mode of operation. Currently the only available options are:
**ERFILTER_NM_RGBLGrad** (used by default) and **ERFILTER_NM_IHSGrad**.
In N&M algorithm, the combination of intensity (I), hue (H), saturation (S), and gradient magnitude
channels (Grad) are used in order to obtain high localization recall. This implementation also
provides an alternative combination of red (R), green (G), blue (B), lightness (L), and gradient
magnitude (Grad).
*/
CV_EXPORTS_W void computeNMChannels(InputArray _src, CV_OUT OutputArrayOfArrays _channels, int _mode = ERFILTER_NM_RGBLGrad);
//! text::erGrouping operation modes
enum erGrouping_Modes {
/** Exhaustive Search algorithm proposed in @cite Neumann11 for grouping horizontally aligned text.
The algorithm models a verification function for all the possible ER sequences. The
verification fuction for ER pairs consists in a set of threshold-based pairwise rules which
compare measurements of two regions (height ratio, centroid angle, and region distance). The
verification function for ER triplets creates a word text line estimate using Least
Median-Squares fitting for a given triplet and then verifies that the estimate is valid (based
on thresholds created during training). Verification functions for sequences larger than 3 are
approximated by verifying that the text line parameters of all (sub)sequences of length 3 are
consistent.
*/
ERGROUPING_ORIENTATION_HORIZ,
/** Text grouping method proposed in @cite Gomez13 @cite Gomez14 for grouping arbitrary oriented text. Regions
are agglomerated by Single Linkage Clustering in a weighted feature space that combines proximity
(x,y coordinates) and similarity measures (color, size, gradient magnitude, stroke width, etc.).
SLC provides a dendrogram where each node represents a text group hypothesis. Then the algorithm
finds the branches corresponding to text groups by traversing this dendrogram with a stopping rule
that combines the output of a rotation invariant text group classifier and a probabilistic measure
for hierarchical clustering validity assessment.
@note This mode is not supported due NFA code removal ( https://github.com/opencv/opencv_contrib/issues/2235 )
*/
ERGROUPING_ORIENTATION_ANY
};
/** @brief Find groups of Extremal Regions that are organized as text blocks.
@param img Original RGB or Greyscale image from wich the regions were extracted.
@param channels Vector of single channel images CV_8UC1 from wich the regions were extracted.
@param regions Vector of ER's retrieved from the ERFilter algorithm from each channel.
@param groups The output of the algorithm is stored in this parameter as set of lists of indexes to
provided regions.
@param groups_rects The output of the algorithm are stored in this parameter as list of rectangles.
@param method Grouping method (see text::erGrouping_Modes). Can be one of ERGROUPING_ORIENTATION_HORIZ,
ERGROUPING_ORIENTATION_ANY.
@param filename The XML or YAML file with the classifier model (e.g.
samples/trained_classifier_erGrouping.xml). Only to use when grouping method is
ERGROUPING_ORIENTATION_ANY.
@param minProbablity The minimum probability for accepting a group. Only to use when grouping
method is ERGROUPING_ORIENTATION_ANY.
*/
CV_EXPORTS void erGrouping(InputArray img, InputArrayOfArrays channels,
std::vector<std::vector<ERStat> > &regions,
std::vector<std::vector<Vec2i> > &groups,
std::vector<Rect> &groups_rects,
int method = ERGROUPING_ORIENTATION_HORIZ,
const std::string& filename = std::string(),
float minProbablity = 0.5);
CV_EXPORTS_W void erGrouping(InputArray image, InputArray channel,
std::vector<std::vector<Point> > regions,
CV_OUT std::vector<Rect> &groups_rects,
int method = ERGROUPING_ORIENTATION_HORIZ,
const String& filename = String(),
float minProbablity = (float)0.5);
/** @brief Converts MSER contours (vector\<Point\>) to ERStat regions.
@param image Source image CV_8UC1 from which the MSERs where extracted.
@param contours Input vector with all the contours (vector\<Point\>).
@param regions Output where the ERStat regions are stored.
It takes as input the contours provided by the OpenCV MSER feature detector and returns as output
two vectors of ERStats. This is because MSER() output contains both MSER+ and MSER- regions in a
single vector\<Point\>, the function separates them in two different vectors (this is as if the
ERStats where extracted from two different channels).
An example of MSERsToERStats in use can be found in the text detection webcam_demo:
<https://github.com/opencv/opencv_contrib/blob/master/modules/text/samples/webcam_demo.cpp>
*/
CV_EXPORTS void MSERsToERStats(InputArray image, std::vector<std::vector<Point> > &contours,
std::vector<std::vector<ERStat> > &regions);
// Utility funtion for scripting
CV_EXPORTS_W void detectRegions(InputArray image, const Ptr<ERFilter>& er_filter1, const Ptr<ERFilter>& er_filter2, CV_OUT std::vector< std::vector<Point> >& regions);
/** @brief Extracts text regions from image.
@param image Source image where text blocks needs to be extracted from. Should be CV_8UC3 (color).
@param er_filter1 Extremal Region Filter for the 1st stage classifier of N&M algorithm @cite Neumann12
@param er_filter2 Extremal Region Filter for the 2nd stage classifier of N&M algorithm @cite Neumann12
@param groups_rects Output list of rectangle blocks with text
@param method Grouping method (see text::erGrouping_Modes). Can be one of ERGROUPING_ORIENTATION_HORIZ, ERGROUPING_ORIENTATION_ANY.
@param filename The XML or YAML file with the classifier model (e.g. samples/trained_classifier_erGrouping.xml). Only to use when grouping method is ERGROUPING_ORIENTATION_ANY.
@param minProbability The minimum probability for accepting a group. Only to use when grouping method is ERGROUPING_ORIENTATION_ANY.
*/
CV_EXPORTS_W void detectRegions(InputArray image, const Ptr<ERFilter>& er_filter1, const Ptr<ERFilter>& er_filter2, CV_OUT std::vector<Rect> &groups_rects,
int method = ERGROUPING_ORIENTATION_HORIZ,
const String& filename = String(),
float minProbability = (float)0.5);
//! @}
}
}
#endif // _OPENCV_TEXT_ERFILTER_HPP_

589
3rdparty/opencv/inc/opencv2/text/ocr.hpp vendored Normal file
View File

@@ -0,0 +1,589 @@
/*M//////////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef __OPENCV_TEXT_OCR_HPP__
#define __OPENCV_TEXT_OCR_HPP__
#include <opencv2/core.hpp>
#include <vector>
#include <string>
namespace cv
{
namespace text
{
//! @addtogroup text_recognize
//! @{
enum
{
OCR_LEVEL_WORD,
OCR_LEVEL_TEXTLINE
};
//! Tesseract.PageSegMode Enumeration
enum page_seg_mode
{
PSM_OSD_ONLY,
PSM_AUTO_OSD,
PSM_AUTO_ONLY,
PSM_AUTO,
PSM_SINGLE_COLUMN,
PSM_SINGLE_BLOCK_VERT_TEXT,
PSM_SINGLE_BLOCK,
PSM_SINGLE_LINE,
PSM_SINGLE_WORD,
PSM_CIRCLE_WORD,
PSM_SINGLE_CHAR
};
//! Tesseract.OcrEngineMode Enumeration
enum ocr_engine_mode
{
OEM_TESSERACT_ONLY,
OEM_CUBE_ONLY,
OEM_TESSERACT_CUBE_COMBINED,
OEM_DEFAULT
};
//base class BaseOCR declares a common API that would be used in a typical text recognition scenario
class CV_EXPORTS_W BaseOCR
{
public:
virtual ~BaseOCR() {};
virtual void run(Mat& image, std::string& output_text, std::vector<Rect>* component_rects=NULL,
std::vector<std::string>* component_texts=NULL, std::vector<float>* component_confidences=NULL,
int component_level=0) = 0;
virtual void run(Mat& image, Mat& mask, std::string& output_text, std::vector<Rect>* component_rects=NULL,
std::vector<std::string>* component_texts=NULL, std::vector<float>* component_confidences=NULL,
int component_level=0) = 0;
};
/** @brief OCRTesseract class provides an interface with the tesseract-ocr API (v3.02.02) in C++.
Notice that it is compiled only when tesseract-ocr is correctly installed.
@note
- (C++) An example of OCRTesseract recognition combined with scene text detection can be found
at the end_to_end_recognition demo:
<https://github.com/opencv/opencv_contrib/blob/master/modules/text/samples/end_to_end_recognition.cpp>
- (C++) Another example of OCRTesseract recognition combined with scene text detection can be
found at the webcam_demo:
<https://github.com/opencv/opencv_contrib/blob/master/modules/text/samples/webcam_demo.cpp>
*/
class CV_EXPORTS_W OCRTesseract : public BaseOCR
{
public:
/** @brief Recognize text using the tesseract-ocr API.
Takes image on input and returns recognized text in the output_text parameter. Optionally
provides also the Rects for individual text elements found (e.g. words), and the list of those
text elements with their confidence values.
@param image Input image CV_8UC1 or CV_8UC3
@param output_text Output text of the tesseract-ocr.
@param component_rects If provided the method will output a list of Rects for the individual
text elements found (e.g. words or text lines).
@param component_texts If provided the method will output a list of text strings for the
recognition of individual text elements found (e.g. words or text lines).
@param component_confidences If provided the method will output a list of confidence values
for the recognition of individual text elements found (e.g. words or text lines).
@param component_level OCR_LEVEL_WORD (by default), or OCR_LEVEL_TEXTLINE.
*/
virtual void run(Mat& image, std::string& output_text, std::vector<Rect>* component_rects=NULL,
std::vector<std::string>* component_texts=NULL, std::vector<float>* component_confidences=NULL,
int component_level=0) CV_OVERRIDE;
virtual void run(Mat& image, Mat& mask, std::string& output_text, std::vector<Rect>* component_rects=NULL,
std::vector<std::string>* component_texts=NULL, std::vector<float>* component_confidences=NULL,
int component_level=0) CV_OVERRIDE;
// aliases for scripting
CV_WRAP String run(InputArray image, int min_confidence, int component_level=0);
CV_WRAP String run(InputArray image, InputArray mask, int min_confidence, int component_level=0);
CV_WRAP virtual void setWhiteList(const String& char_whitelist) = 0;
/** @brief Creates an instance of the OCRTesseract class. Initializes Tesseract.
@param datapath the name of the parent directory of tessdata ended with "/", or NULL to use the
system's default directory.
@param language an ISO 639-3 code or NULL will default to "eng".
@param char_whitelist specifies the list of characters used for recognition. NULL defaults to
"0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ".
@param oem tesseract-ocr offers different OCR Engine Modes (OEM), by default
tesseract::OEM_DEFAULT is used. See the tesseract-ocr API documentation for other possible
values.
@param psmode tesseract-ocr offers different Page Segmentation Modes (PSM) tesseract::PSM_AUTO
(fully automatic layout analysis) is used. See the tesseract-ocr API documentation for other
possible values.
*/
CV_WRAP static Ptr<OCRTesseract> create(const char* datapath=NULL, const char* language=NULL,
const char* char_whitelist=NULL, int oem=OEM_DEFAULT, int psmode=PSM_AUTO);
};
/* OCR HMM Decoder */
enum decoder_mode
{
OCR_DECODER_VITERBI = 0 // Other algorithms may be added
};
/* OCR classifier type*/
enum classifier_type
{
OCR_KNN_CLASSIFIER = 0,
OCR_CNN_CLASSIFIER = 1
};
/** @brief OCRHMMDecoder class provides an interface for OCR using Hidden Markov Models.
@note
- (C++) An example on using OCRHMMDecoder recognition combined with scene text detection can
be found at the webcam_demo sample:
<https://github.com/opencv/opencv_contrib/blob/master/modules/text/samples/webcam_demo.cpp>
*/
class CV_EXPORTS_W OCRHMMDecoder : public BaseOCR
{
public:
/** @brief Callback with the character classifier is made a class.
This way it hides the feature extractor and the classifier itself, so developers can write
their own OCR code.
The default character classifier and feature extractor can be loaded using the utility function
loadOCRHMMClassifierNM and KNN model provided in
<https://github.com/opencv/opencv_contrib/blob/master/modules/text/samples/OCRHMM_knn_model_data.xml.gz>.
*/
class CV_EXPORTS_W ClassifierCallback
{
public:
virtual ~ClassifierCallback() { }
/** @brief The character classifier must return a (ranked list of) class(es) id('s)
@param image Input image CV_8UC1 or CV_8UC3 with a single letter.
@param out_class The classifier returns the character class categorical label, or list of
class labels, to which the input image corresponds.
@param out_confidence The classifier returns the probability of the input image
corresponding to each classes in out_class.
*/
virtual void eval( InputArray image, std::vector<int>& out_class, std::vector<double>& out_confidence);
};
public:
/** @brief Recognize text using HMM.
Takes binary image on input and returns recognized text in the output_text parameter. Optionally
provides also the Rects for individual text elements found (e.g. words), and the list of those
text elements with their confidence values.
@param image Input binary image CV_8UC1 with a single text line (or word).
@param output_text Output text. Most likely character sequence found by the HMM decoder.
@param component_rects If provided the method will output a list of Rects for the individual
text elements found (e.g. words).
@param component_texts If provided the method will output a list of text strings for the
recognition of individual text elements found (e.g. words).
@param component_confidences If provided the method will output a list of confidence values
for the recognition of individual text elements found (e.g. words).
@param component_level Only OCR_LEVEL_WORD is supported.
*/
virtual void run(Mat& image, std::string& output_text, std::vector<Rect>* component_rects=NULL,
std::vector<std::string>* component_texts=NULL, std::vector<float>* component_confidences=NULL,
int component_level=0) CV_OVERRIDE;
/** @brief Recognize text using HMM.
Takes an image and a mask (where each connected component corresponds to a segmented character)
on input and returns recognized text in the output_text parameter. Optionally
provides also the Rects for individual text elements found (e.g. words), and the list of those
text elements with their confidence values.
@param image Input image CV_8UC1 or CV_8UC3 with a single text line (or word).
@param mask Input binary image CV_8UC1 same size as input image. Each connected component in mask corresponds to a segmented character in the input image.
@param output_text Output text. Most likely character sequence found by the HMM decoder.
@param component_rects If provided the method will output a list of Rects for the individual
text elements found (e.g. words).
@param component_texts If provided the method will output a list of text strings for the
recognition of individual text elements found (e.g. words).
@param component_confidences If provided the method will output a list of confidence values
for the recognition of individual text elements found (e.g. words).
@param component_level Only OCR_LEVEL_WORD is supported.
*/
virtual void run(Mat& image, Mat& mask, std::string& output_text, std::vector<Rect>* component_rects=NULL,
std::vector<std::string>* component_texts=NULL, std::vector<float>* component_confidences=NULL,
int component_level=0) CV_OVERRIDE;
// aliases for scripting
CV_WRAP String run(InputArray image, int min_confidence, int component_level=0);
CV_WRAP String run(InputArray image, InputArray mask, int min_confidence, int component_level=0);
/** @brief Creates an instance of the OCRHMMDecoder class. Initializes HMMDecoder.
@param classifier The character classifier with built in feature extractor.
@param vocabulary The language vocabulary (chars when ascii english text). vocabulary.size()
must be equal to the number of classes of the classifier.
@param transition_probabilities_table Table with transition probabilities between character
pairs. cols == rows == vocabulary.size().
@param emission_probabilities_table Table with observation emission probabilities. cols ==
rows == vocabulary.size().
@param mode HMM Decoding algorithm. Only OCR_DECODER_VITERBI is available for the moment
(<http://en.wikipedia.org/wiki/Viterbi_algorithm>).
*/
CV_WRAP static Ptr<OCRHMMDecoder> create(const Ptr<OCRHMMDecoder::ClassifierCallback> classifier,// The character classifier with built in feature extractor
const String& vocabulary, // The language vocabulary (chars when ASCII English text)
// size() must be equal to the number of classes
InputArray transition_probabilities_table, // Table with transition probabilities between character pairs
// cols == rows == vocabulary.size()
InputArray emission_probabilities_table, // Table with observation emission probabilities
// cols == rows == vocabulary.size()
int mode = OCR_DECODER_VITERBI); // HMM Decoding algorithm (only Viterbi for the moment)
/** @brief Creates an instance of the OCRHMMDecoder class. Loads and initializes HMMDecoder from the specified path
@overload
*/
CV_WRAP static Ptr<OCRHMMDecoder> create(const String& filename,
const String& vocabulary, // The language vocabulary (chars when ASCII English text)
// size() must be equal to the number of classes
InputArray transition_probabilities_table, // Table with transition probabilities between character pairs
// cols == rows == vocabulary.size()
InputArray emission_probabilities_table, // Table with observation emission probabilities
// cols == rows == vocabulary.size()
int mode = OCR_DECODER_VITERBI, // HMM Decoding algorithm (only Viterbi for the moment)
int classifier = OCR_KNN_CLASSIFIER); // The character classifier type
protected:
Ptr<OCRHMMDecoder::ClassifierCallback> classifier;
std::string vocabulary;
Mat transition_p;
Mat emission_p;
decoder_mode mode;
};
/** @brief Allow to implicitly load the default character classifier when creating an OCRHMMDecoder object.
@param filename The XML or YAML file with the classifier model (e.g. OCRHMM_knn_model_data.xml)
The KNN default classifier is based in the scene text recognition method proposed by Lukás Neumann &
Jiri Matas in [Neumann11b]. Basically, the region (contour) in the input image is normalized to a
fixed size, while retaining the centroid and aspect ratio, in order to extract a feature vector
based on gradient orientations along the chain-code of its perimeter. Then, the region is classified
using a KNN model trained with synthetic data of rendered characters with different standard font
types.
@deprecated loadOCRHMMClassifier instead
*/
CV_EXPORTS_W Ptr<OCRHMMDecoder::ClassifierCallback> loadOCRHMMClassifierNM(const String& filename);
/** @brief Allow to implicitly load the default character classifier when creating an OCRHMMDecoder object.
@param filename The XML or YAML file with the classifier model (e.g. OCRBeamSearch_CNN_model_data.xml.gz)
The CNN default classifier is based in the scene text recognition method proposed by Adam Coates &
Andrew NG in [Coates11a]. The character classifier consists in a Single Layer Convolutional Neural Network and
a linear classifier. It is applied to the input image in a sliding window fashion, providing a set of recognitions
at each window location.
@deprecated use loadOCRHMMClassifier instead
*/
CV_EXPORTS_W Ptr<OCRHMMDecoder::ClassifierCallback> loadOCRHMMClassifierCNN(const String& filename);
/** @brief Allow to implicitly load the default character classifier when creating an OCRHMMDecoder object.
@param filename The XML or YAML file with the classifier model (e.g. OCRBeamSearch_CNN_model_data.xml.gz)
@param classifier Can be one of classifier_type enum values.
*/
CV_EXPORTS_W Ptr<OCRHMMDecoder::ClassifierCallback> loadOCRHMMClassifier(const String& filename, int classifier);
//! @}
/** @brief Utility function to create a tailored language model transitions table from a given list of words (lexicon).
*
* @param vocabulary The language vocabulary (chars when ASCII English text).
*
* @param lexicon The list of words that are expected to be found in a particular image.
*
* @param transition_probabilities_table Output table with transition probabilities between character pairs. cols == rows == vocabulary.size().
*
* The function calculate frequency statistics of character pairs from the given lexicon and fills the output transition_probabilities_table with them. The transition_probabilities_table can be used as input in the OCRHMMDecoder::create() and OCRBeamSearchDecoder::create() methods.
* @note
* - (C++) An alternative would be to load the default generic language transition table provided in the text module samples folder (created from ispell 42869 english words list) :
* <https://github.com/opencv/opencv_contrib/blob/master/modules/text/samples/OCRHMM_transitions_table.xml>
**/
CV_EXPORTS void createOCRHMMTransitionsTable(std::string& vocabulary, std::vector<std::string>& lexicon, OutputArray transition_probabilities_table);
CV_EXPORTS_W Mat createOCRHMMTransitionsTable(const String& vocabulary, std::vector<cv::String>& lexicon);
/* OCR BeamSearch Decoder */
/** @brief OCRBeamSearchDecoder class provides an interface for OCR using Beam Search algorithm.
@note
- (C++) An example on using OCRBeamSearchDecoder recognition combined with scene text detection can
be found at the demo sample:
<https://github.com/opencv/opencv_contrib/blob/master/modules/text/samples/word_recognition.cpp>
*/
class CV_EXPORTS_W OCRBeamSearchDecoder : public BaseOCR
{
public:
/** @brief Callback with the character classifier is made a class.
This way it hides the feature extractor and the classifier itself, so developers can write
their own OCR code.
The default character classifier and feature extractor can be loaded using the utility function
loadOCRBeamSearchClassifierCNN with all its parameters provided in
<https://github.com/opencv/opencv_contrib/blob/master/modules/text/samples/OCRBeamSearch_CNN_model_data.xml.gz>.
*/
class CV_EXPORTS_W ClassifierCallback
{
public:
virtual ~ClassifierCallback() { }
/** @brief The character classifier must return a (ranked list of) class(es) id('s)
@param image Input image CV_8UC1 or CV_8UC3 with a single letter.
@param recognition_probabilities For each of the N characters found the classifier returns a list with
class probabilities for each class.
@param oversegmentation The classifier returns a list of N+1 character locations' x-coordinates,
including 0 as start-sequence location.
*/
virtual void eval( InputArray image, std::vector< std::vector<double> >& recognition_probabilities, std::vector<int>& oversegmentation );
int getWindowSize() {return 0;}
int getStepSize() {return 0;}
};
public:
/** @brief Recognize text using Beam Search.
Takes image on input and returns recognized text in the output_text parameter. Optionally
provides also the Rects for individual text elements found (e.g. words), and the list of those
text elements with their confidence values.
@param image Input binary image CV_8UC1 with a single text line (or word).
@param output_text Output text. Most likely character sequence found by the HMM decoder.
@param component_rects If provided the method will output a list of Rects for the individual
text elements found (e.g. words).
@param component_texts If provided the method will output a list of text strings for the
recognition of individual text elements found (e.g. words).
@param component_confidences If provided the method will output a list of confidence values
for the recognition of individual text elements found (e.g. words).
@param component_level Only OCR_LEVEL_WORD is supported.
*/
virtual void run(Mat& image, std::string& output_text, std::vector<Rect>* component_rects=NULL,
std::vector<std::string>* component_texts=NULL, std::vector<float>* component_confidences=NULL,
int component_level=0) CV_OVERRIDE;
virtual void run(Mat& image, Mat& mask, std::string& output_text, std::vector<Rect>* component_rects=NULL,
std::vector<std::string>* component_texts=NULL, std::vector<float>* component_confidences=NULL,
int component_level=0) CV_OVERRIDE;
// aliases for scripting
CV_WRAP String run(InputArray image, int min_confidence, int component_level=0);
CV_WRAP String run(InputArray image, InputArray mask, int min_confidence, int component_level=0);
/** @brief Creates an instance of the OCRBeamSearchDecoder class. Initializes HMMDecoder.
@param classifier The character classifier with built in feature extractor.
@param vocabulary The language vocabulary (chars when ASCII English text). vocabulary.size()
must be equal to the number of classes of the classifier.
@param transition_probabilities_table Table with transition probabilities between character
pairs. cols == rows == vocabulary.size().
@param emission_probabilities_table Table with observation emission probabilities. cols ==
rows == vocabulary.size().
@param mode HMM Decoding algorithm. Only OCR_DECODER_VITERBI is available for the moment
(<http://en.wikipedia.org/wiki/Viterbi_algorithm>).
@param beam_size Size of the beam in Beam Search algorithm.
*/
static CV_WRAP
Ptr<OCRBeamSearchDecoder> create(const Ptr<OCRBeamSearchDecoder::ClassifierCallback> classifier,// The character classifier with built in feature extractor
const std::string& vocabulary, // The language vocabulary (chars when ASCII English text)
// size() must be equal to the number of classes
InputArray transition_probabilities_table, // Table with transition probabilities between character pairs
// cols == rows == vocabulary.size()
InputArray emission_probabilities_table, // Table with observation emission probabilities
// cols == rows == vocabulary.size()
text::decoder_mode mode = OCR_DECODER_VITERBI, // HMM Decoding algorithm (only Viterbi for the moment)
int beam_size = 500 // Size of the beam in Beam Search algorithm
);
/** @brief Creates an instance of the OCRBeamSearchDecoder class. Initializes HMMDecoder from the specified path.
@overload
*/
static //CV_WRAP FIXIT bug in handling of Java overloads
Ptr<OCRBeamSearchDecoder> create(const String& filename, // The character classifier file
const String& vocabulary, // The language vocabulary (chars when ASCII English text)
// size() must be equal to the number of classes
InputArray transition_probabilities_table, // Table with transition probabilities between character pairs
// cols == rows == vocabulary.size()
InputArray emission_probabilities_table, // Table with observation emission probabilities
// cols == rows == vocabulary.size()
text::decoder_mode mode = OCR_DECODER_VITERBI, // HMM Decoding algorithm (only Viterbi for the moment)
int beam_size = 500 // Size of the beam in Beam Search algorithm
);
protected:
Ptr<OCRBeamSearchDecoder::ClassifierCallback> classifier;
std::string vocabulary;
Mat transition_p;
Mat emission_p;
decoder_mode mode;
int beam_size;
};
/** @brief Allow to implicitly load the default character classifier when creating an OCRBeamSearchDecoder object.
@param filename The XML or YAML file with the classifier model (e.g. OCRBeamSearch_CNN_model_data.xml.gz)
The CNN default classifier is based in the scene text recognition method proposed by Adam Coates &
Andrew NG in [Coates11a]. The character classifier consists in a Single Layer Convolutional Neural Network and
a linear classifier. It is applied to the input image in a sliding window fashion, providing a set of recognitions
at each window location.
*/
CV_EXPORTS_W Ptr<OCRBeamSearchDecoder::ClassifierCallback> loadOCRBeamSearchClassifierCNN(const String& filename);
/** @brief OCRHolisticWordRecognizer class provides the functionallity of segmented wordspotting.
* Given a predefined vocabulary , a DictNet is employed to select the most probable
* word given an input image.
*
* DictNet is described in detail in:
* Max Jaderberg et al.: Reading Text in the Wild with Convolutional Neural Networks, IJCV 2015
* http://arxiv.org/abs/1412.1842
*/
class CV_EXPORTS OCRHolisticWordRecognizer : public BaseOCR
{
public:
virtual void run(Mat& image,
std::string& output_text,
std::vector<Rect>* component_rects = NULL,
std::vector<std::string>* component_texts = NULL,
std::vector<float>* component_confidences = NULL,
int component_level = OCR_LEVEL_WORD) CV_OVERRIDE = 0;
/** @brief Recognize text using a segmentation based word-spotting/classifier cnn.
Takes image on input and returns recognized text in the output_text parameter. Optionally
provides also the Rects for individual text elements found (e.g. words), and the list of those
text elements with their confidence values.
@param image Input image CV_8UC1 or CV_8UC3
@param mask is totally ignored and is only available for compatibillity reasons
@param output_text Output text of the the word spoting, always one that exists in the dictionary.
@param component_rects Not applicable for word spotting can be be NULL if not, a single elemnt will
be put in the vector.
@param component_texts Not applicable for word spotting can be be NULL if not, a single elemnt will
be put in the vector.
@param component_confidences Not applicable for word spotting can be be NULL if not, a single elemnt will
be put in the vector.
@param component_level must be OCR_LEVEL_WORD.
*/
virtual void run(Mat& image,
Mat& mask,
std::string& output_text,
std::vector<Rect>* component_rects = NULL,
std::vector<std::string>* component_texts = NULL,
std::vector<float>* component_confidences = NULL,
int component_level = OCR_LEVEL_WORD) CV_OVERRIDE = 0;
/** @brief Creates an instance of the OCRHolisticWordRecognizer class.
*/
static Ptr<OCRHolisticWordRecognizer> create(const std::string &archFilename,
const std::string &weightsFilename,
const std::string &wordsFilename);
};
//! @}
}} // cv::text::
#endif // _OPENCV_TEXT_OCR_HPP_

View File

@@ -0,0 +1,24 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
#ifndef __OPENCV_TEXT_SWTTEXTDETECTOR_HPP__
#define __OPENCV_TEXT_SWTTEXTDETECTOR_HPP__
#include <opencv2/core.hpp>
#include <vector>
namespace cv {
namespace text {
/** @brief Applies the Stroke Width Transform operator followed by filtering of connected components of similar Stroke Widths to return letter candidates. It also chain them by proximity and size, saving the result in chainBBs.
@param input the input image with 3 channels.
@param result a vector of resulting bounding boxes where probability of finding text is high
@param dark_on_light a boolean value signifying whether the text is darker or lighter than the background, it is observed to reverse the gradient obtained from Scharr operator, and significantly affect the result.
@param draw an optional Mat of type CV_8UC3 which visualises the detected letters using bounding boxes.
@param chainBBs an optional parameter which chains the letter candidates according to heuristics in the paper and returns all possible regions where text is likely to occur.
*/
CV_EXPORTS_W void detectTextSWT (InputArray input, CV_OUT std::vector<cv::Rect>& result, bool dark_on_light, OutputArray& draw=noArray(), OutputArray & chainBBs =noArray());
}
}
#endif

View File

@@ -0,0 +1,73 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
#ifndef __OPENCV_TEXT_TEXTDETECTOR_HPP__
#define __OPENCV_TEXT_TEXTDETECTOR_HPP__
#include "ocr.hpp"
namespace cv
{
namespace text
{
//! @addtogroup text_detect
//! @{
/** @brief An abstract class providing interface for text detection algorithms
*/
class CV_EXPORTS_W TextDetector
{
public:
/**
@brief Method that provides a quick and simple interface to detect text inside an image
@param inputImage an image to process
@param Bbox a vector of Rect that will store the detected word bounding box
@param confidence a vector of float that will be updated with the confidence the classifier has for the selected bounding box
*/
CV_WRAP virtual void detect(InputArray inputImage, CV_OUT std::vector<Rect>& Bbox, CV_OUT std::vector<float>& confidence) = 0;
virtual ~TextDetector() {}
};
/** @brief TextDetectorCNN class provides the functionallity of text bounding box detection.
This class is representing to find bounding boxes of text words given an input image.
This class uses OpenCV dnn module to load pre-trained model described in @cite LiaoSBWL17.
The original repository with the modified SSD Caffe version: https://github.com/MhLiao/TextBoxes.
Model can be downloaded from [DropBox](https://www.dropbox.com/s/g8pjzv2de9gty8g/TextBoxes_icdar13.caffemodel?dl=0).
Modified .prototxt file with the model description can be found in `opencv_contrib/modules/text/samples/textbox.prototxt`.
*/
class CV_EXPORTS_W TextDetectorCNN : public TextDetector
{
public:
/**
@overload
@param inputImage an image expected to be a CV_U8C3 of any size
@param Bbox a vector of Rect that will store the detected word bounding box
@param confidence a vector of float that will be updated with the confidence the classifier has for the selected bounding box
*/
CV_WRAP virtual void detect(InputArray inputImage, CV_OUT std::vector<Rect>& Bbox, CV_OUT std::vector<float>& confidence) CV_OVERRIDE = 0;
/** @brief Creates an instance of the TextDetectorCNN class using the provided parameters.
@param modelArchFilename the relative or absolute path to the prototxt file describing the classifiers architecture.
@param modelWeightsFilename the relative or absolute path to the file containing the pretrained weights of the model in caffe-binary form.
@param detectionSizes a list of sizes for multiscale detection. The values`[(300,300),(700,500),(700,300),(700,700),(1600,1600)]` are
recommended in @cite LiaoSBWL17 to achieve the best quality.
*/
static Ptr<TextDetectorCNN> create(const String& modelArchFilename, const String& modelWeightsFilename,
std::vector<Size> detectionSizes);
/**
@overload
*/
CV_WRAP static Ptr<TextDetectorCNN> create(const String& modelArchFilename, const String& modelWeightsFilename);
};
//! @}
}//namespace text
}//namespace cv
#endif // _OPENCV_TEXT_OCR_HPP_