添加项目文件。

This commit is contained in:
CaiXiang
2024-09-25 09:43:03 +08:00
parent f5c6245902
commit 3e82af9e90
730 changed files with 350436 additions and 0 deletions

View File

@@ -0,0 +1,421 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef __OPENCV_FEATURE_HPP__
#define __OPENCV_FEATURE_HPP__
#include "opencv2/core.hpp"
#include "opencv2/imgproc.hpp"
#include <iostream>
#include <string>
#include <time.h>
/*
* TODO This implementation is based on apps/traincascade/
* TODO Changed CvHaarEvaluator based on ADABOOSTING implementation (Grabner et al.)
*/
namespace cv {
namespace detail {
inline namespace tracking {
//! @addtogroup tracking_detail
//! @{
inline namespace contrib_feature {
#define FEATURES "features"
#define CC_FEATURES FEATURES
#define CC_FEATURE_PARAMS "featureParams"
#define CC_MAX_CAT_COUNT "maxCatCount"
#define CC_FEATURE_SIZE "featSize"
#define CC_NUM_FEATURES "numFeat"
#define CC_ISINTEGRAL "isIntegral"
#define CC_RECTS "rects"
#define CC_TILTED "tilted"
#define CC_RECT "rect"
#define LBPF_NAME "lbpFeatureParams"
#define HOGF_NAME "HOGFeatureParams"
#define HFP_NAME "haarFeatureParams"
#define CV_HAAR_FEATURE_MAX 3
#define N_BINS 9
#define N_CELLS 4
#define CV_SUM_OFFSETS( p0, p1, p2, p3, rect, step ) \
/* (x, y) */ \
(p0) = (rect).x + (step) * (rect).y; \
/* (x + w, y) */ \
(p1) = (rect).x + (rect).width + (step) * (rect).y; \
/* (x + w, y) */ \
(p2) = (rect).x + (step) * ((rect).y + (rect).height); \
/* (x + w, y + h) */ \
(p3) = (rect).x + (rect).width + (step) * ((rect).y + (rect).height);
#define CV_TILTED_OFFSETS( p0, p1, p2, p3, rect, step ) \
/* (x, y) */ \
(p0) = (rect).x + (step) * (rect).y; \
/* (x - h, y + h) */ \
(p1) = (rect).x - (rect).height + (step) * ((rect).y + (rect).height);\
/* (x + w, y + w) */ \
(p2) = (rect).x + (rect).width + (step) * ((rect).y + (rect).width); \
/* (x + w - h, y + w + h) */ \
(p3) = (rect).x + (rect).width - (rect).height \
+ (step) * ((rect).y + (rect).width + (rect).height);
float calcNormFactor( const Mat& sum, const Mat& sqSum );
template<class Feature>
void _writeFeatures( const std::vector<Feature> features, FileStorage &fs, const Mat& featureMap )
{
fs << FEATURES << "[";
const Mat_<int>& featureMap_ = (const Mat_<int>&) featureMap;
for ( int fi = 0; fi < featureMap.cols; fi++ )
if( featureMap_( 0, fi ) >= 0 )
{
fs << "{";
features[fi].write( fs );
fs << "}";
}
fs << "]";
}
class CvParams
{
public:
CvParams();
virtual ~CvParams()
{
}
// from|to file
virtual void write( FileStorage &fs ) const = 0;
virtual bool read( const FileNode &node ) = 0;
// from|to screen
virtual void printDefaults() const;
virtual void printAttrs() const;
virtual bool scanAttr( const std::string prmName, const std::string val );
std::string name;
};
class CvFeatureParams : public CvParams
{
public:
enum FeatureType
{
HAAR = 0,
LBP = 1,
HOG = 2
};
CvFeatureParams();
virtual void init( const CvFeatureParams& fp );
virtual void write( FileStorage &fs ) const CV_OVERRIDE;
virtual bool read( const FileNode &node ) CV_OVERRIDE;
static Ptr<CvFeatureParams> create(CvFeatureParams::FeatureType featureType);
int maxCatCount; // 0 in case of numerical features
int featSize; // 1 in case of simple features (HAAR, LBP) and N_BINS(9)*N_CELLS(4) in case of Dalal's HOG features
int numFeatures;
};
class CvFeatureEvaluator
{
public:
virtual ~CvFeatureEvaluator()
{
}
virtual void init( const CvFeatureParams *_featureParams, int _maxSampleCount, Size _winSize );
virtual void setImage( const Mat& img, uchar clsLabel, int idx );
virtual void writeFeatures( FileStorage &fs, const Mat& featureMap ) const = 0;
virtual float operator()( int featureIdx, int sampleIdx ) = 0;
static Ptr<CvFeatureEvaluator> create(CvFeatureParams::FeatureType type);
int getNumFeatures() const
{
return numFeatures;
}
int getMaxCatCount() const
{
return featureParams->maxCatCount;
}
int getFeatureSize() const
{
return featureParams->featSize;
}
const Mat& getCls() const
{
return cls;
}
float getCls( int si ) const
{
return cls.at<float>( si, 0 );
}
protected:
virtual void generateFeatures() = 0;
int npos, nneg;
int numFeatures;
Size winSize;
CvFeatureParams *featureParams;
Mat cls;
};
class CvHaarFeatureParams : public CvFeatureParams
{
public:
CvHaarFeatureParams();
virtual void init( const CvFeatureParams& fp ) CV_OVERRIDE;
virtual void write( FileStorage &fs ) const CV_OVERRIDE;
virtual bool read( const FileNode &node ) CV_OVERRIDE;
virtual void printDefaults() const CV_OVERRIDE;
virtual void printAttrs() const CV_OVERRIDE;
virtual bool scanAttr( const std::string prm, const std::string val ) CV_OVERRIDE;
bool isIntegral;
};
class CvHaarEvaluator : public CvFeatureEvaluator
{
public:
class FeatureHaar
{
public:
FeatureHaar( Size patchSize );
bool eval( const Mat& image, Rect ROI, float* result ) const;
int getNumAreas();
const std::vector<float>& getWeights() const;
const std::vector<Rect>& getAreas() const;
void write( FileStorage ) const
{
}
;
float getInitMean() const;
float getInitSigma() const;
private:
int m_type;
int m_numAreas;
std::vector<float> m_weights;
float m_initMean;
float m_initSigma;
void generateRandomFeature( Size imageSize );
float getSum( const Mat& image, Rect imgROI ) const;
std::vector<Rect> m_areas; // areas within the patch over which to compute the feature
cv::Size m_initSize; // size of the patch used during training
cv::Size m_curSize; // size of the patches currently under investigation
float m_scaleFactorHeight; // scaling factor in vertical direction
float m_scaleFactorWidth; // scaling factor in horizontal direction
std::vector<Rect> m_scaleAreas; // areas after scaling
std::vector<float> m_scaleWeights; // weights after scaling
};
virtual void init( const CvFeatureParams *_featureParams, int _maxSampleCount, Size _winSize ) CV_OVERRIDE;
virtual void setImage( const Mat& img, uchar clsLabel = 0, int idx = 1 ) CV_OVERRIDE;
virtual float operator()( int featureIdx, int sampleIdx ) CV_OVERRIDE;
virtual void writeFeatures( FileStorage &fs, const Mat& featureMap ) const CV_OVERRIDE;
void writeFeature( FileStorage &fs ) const; // for old file format
const std::vector<CvHaarEvaluator::FeatureHaar>& getFeatures() const;
inline CvHaarEvaluator::FeatureHaar& getFeatures( int idx )
{
return features[idx];
}
void setWinSize( Size patchSize );
Size setWinSize() const;
virtual void generateFeatures() CV_OVERRIDE;
/**
* TODO new method
* \brief Overload the original generateFeatures in order to limit the number of the features
* @param numFeatures Number of the features
*/
virtual void generateFeatures( int numFeatures );
protected:
bool isIntegral;
/* TODO Added from MIL implementation */
Mat _ii_img;
void compute_integral( const cv::Mat & img, std::vector<cv::Mat_<float> > & ii_imgs )
{
Mat ii_img;
integral( img, ii_img, CV_32F );
split( ii_img, ii_imgs );
}
std::vector<FeatureHaar> features;
Mat sum; /* sum images (each row represents image) */
};
struct CvHOGFeatureParams : public CvFeatureParams
{
CvHOGFeatureParams();
};
class CvHOGEvaluator : public CvFeatureEvaluator
{
public:
virtual ~CvHOGEvaluator()
{
}
virtual void init( const CvFeatureParams *_featureParams, int _maxSampleCount, Size _winSize ) CV_OVERRIDE;
virtual void setImage( const Mat& img, uchar clsLabel, int idx ) CV_OVERRIDE;
virtual float operator()( int varIdx, int sampleIdx ) CV_OVERRIDE;
virtual void writeFeatures( FileStorage &fs, const Mat& featureMap ) const CV_OVERRIDE;
protected:
virtual void generateFeatures() CV_OVERRIDE;
virtual void integralHistogram( const Mat &img, std::vector<Mat> &histogram, Mat &norm, int nbins ) const;
class Feature
{
public:
Feature();
Feature( int offset, int x, int y, int cellW, int cellH );
float calc( const std::vector<Mat> &_hists, const Mat &_normSum, size_t y, int featComponent ) const;
void write( FileStorage &fs ) const;
void write( FileStorage &fs, int varIdx ) const;
Rect rect[N_CELLS]; //cells
struct
{
int p0, p1, p2, p3;
} fastRect[N_CELLS];
};
std::vector<Feature> features;
Mat normSum; //for nomalization calculation (L1 or L2)
std::vector<Mat> hist;
};
inline float CvHOGEvaluator::operator()( int varIdx, int sampleIdx )
{
int featureIdx = varIdx / ( N_BINS * N_CELLS );
int componentIdx = varIdx % ( N_BINS * N_CELLS );
//return features[featureIdx].calc( hist, sampleIdx, componentIdx);
return features[featureIdx].calc( hist, normSum, sampleIdx, componentIdx );
}
inline float CvHOGEvaluator::Feature::calc( const std::vector<Mat>& _hists, const Mat& _normSum, size_t y, int featComponent ) const
{
float normFactor;
float res;
int binIdx = featComponent % N_BINS;
int cellIdx = featComponent / N_BINS;
const float *phist = _hists[binIdx].ptr<float>( (int) y );
res = phist[fastRect[cellIdx].p0] - phist[fastRect[cellIdx].p1] - phist[fastRect[cellIdx].p2] + phist[fastRect[cellIdx].p3];
const float *pnormSum = _normSum.ptr<float>( (int) y );
normFactor = (float) ( pnormSum[fastRect[0].p0] - pnormSum[fastRect[1].p1] - pnormSum[fastRect[2].p2] + pnormSum[fastRect[3].p3] );
res = ( res > 0.001f ) ? ( res / ( normFactor + 0.001f ) ) : 0.f; //for cutting negative values, which apper due to floating precision
return res;
}
struct CvLBPFeatureParams : CvFeatureParams
{
CvLBPFeatureParams();
};
class CvLBPEvaluator : public CvFeatureEvaluator
{
public:
virtual ~CvLBPEvaluator() CV_OVERRIDE
{
}
virtual void init( const CvFeatureParams *_featureParams, int _maxSampleCount, Size _winSize ) CV_OVERRIDE;
virtual void setImage( const Mat& img, uchar clsLabel, int idx ) CV_OVERRIDE;
virtual float operator()( int featureIdx, int sampleIdx ) CV_OVERRIDE
{
return (float) features[featureIdx].calc( sum, sampleIdx );
}
virtual void writeFeatures( FileStorage &fs, const Mat& featureMap ) const CV_OVERRIDE;
protected:
virtual void generateFeatures() CV_OVERRIDE;
class Feature
{
public:
Feature();
Feature( int offset, int x, int y, int _block_w, int _block_h );
uchar calc( const Mat& _sum, size_t y ) const;
void write( FileStorage &fs ) const;
Rect rect;
int p[16];
};
std::vector<Feature> features;
Mat sum;
};
inline uchar CvLBPEvaluator::Feature::calc( const Mat &_sum, size_t y ) const
{
const int* psum = _sum.ptr<int>( (int) y );
int cval = psum[p[5]] - psum[p[6]] - psum[p[9]] + psum[p[10]];
return (uchar) ( ( psum[p[0]] - psum[p[1]] - psum[p[4]] + psum[p[5]] >= cval ? 128 : 0 ) | // 0
( psum[p[1]] - psum[p[2]] - psum[p[5]] + psum[p[6]] >= cval ? 64 : 0 ) | // 1
( psum[p[2]] - psum[p[3]] - psum[p[6]] + psum[p[7]] >= cval ? 32 : 0 ) | // 2
( psum[p[6]] - psum[p[7]] - psum[p[10]] + psum[p[11]] >= cval ? 16 : 0 ) | // 5
( psum[p[10]] - psum[p[11]] - psum[p[14]] + psum[p[15]] >= cval ? 8 : 0 ) | // 8
( psum[p[9]] - psum[p[10]] - psum[p[13]] + psum[p[14]] >= cval ? 4 : 0 ) | // 7
( psum[p[8]] - psum[p[9]] - psum[p[12]] + psum[p[13]] >= cval ? 2 : 0 ) | // 6
( psum[p[4]] - psum[p[5]] - psum[p[8]] + psum[p[9]] >= cval ? 1 : 0 ) ); // 3
}
} // namespace
//! @}
}}} // namespace cv
#endif

View File

@@ -0,0 +1,235 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2015, OpenCV Foundation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef __OPENCV_TRACKING_KALMAN_HPP_
#define __OPENCV_TRACKING_KALMAN_HPP_
#include "opencv2/core.hpp"
#include <limits>
namespace cv {
namespace detail {
inline namespace tracking {
//! @addtogroup tracking_detail
//! @{
inline namespace kalman_filters {
/** @brief The interface for Unscented Kalman filter and Augmented Unscented Kalman filter.
*/
class CV_EXPORTS UnscentedKalmanFilter
{
public:
virtual ~UnscentedKalmanFilter(){}
/** The function performs prediction step of the algorithm
* @param control - the current control vector,
* @return the predicted estimate of the state.
*/
virtual Mat predict( InputArray control = noArray() ) = 0;
/** The function performs correction step of the algorithm
* @param measurement - the current measurement vector,
* @return the corrected estimate of the state.
*/
virtual Mat correct( InputArray measurement ) = 0;
/**
* @return the process noise cross-covariance matrix.
*/
virtual Mat getProcessNoiseCov() const = 0;
/**
* @return the measurement noise cross-covariance matrix.
*/
virtual Mat getMeasurementNoiseCov() const = 0;
/**
* @return the error cross-covariance matrix.
*/
virtual Mat getErrorCov() const = 0;
/**
* @return the current estimate of the state.
*/
virtual Mat getState() const = 0;
};
/** @brief Model of dynamical system for Unscented Kalman filter.
* The interface for dynamical system model. It contains functions for computing the next state and the measurement.
* It must be inherited for using UKF.
*/
class CV_EXPORTS UkfSystemModel
{
public:
virtual ~UkfSystemModel(){}
/** The function for computing the next state from the previous state
* @param x_k - previous state vector,
* @param u_k - control vector,
* @param v_k - noise vector,
* @param x_kplus1 - next state vector.
*/
virtual void stateConversionFunction( const Mat& x_k, const Mat& u_k, const Mat& v_k, Mat& x_kplus1 ) = 0;
/** The function for computing the measurement from the state
* @param x_k - state vector,
* @param n_k - noise vector,
* @param z_k - measurement vector.
*/
virtual void measurementFunction( const Mat& x_k, const Mat& n_k, Mat& z_k ) = 0;
};
/** @brief Unscented Kalman filter parameters.
* The class for initialization parameters of Unscented Kalman filter
*/
class CV_EXPORTS UnscentedKalmanFilterParams
{
public:
int DP; //!< Dimensionality of the state vector.
int MP; //!< Dimensionality of the measurement vector.
int CP; //!< Dimensionality of the control vector.
int dataType; //!< Type of elements of vectors and matrices, default is CV_64F.
Mat stateInit; //!< Initial state, DP x 1, default is zero.
Mat errorCovInit; //!< State estimate cross-covariance matrix, DP x DP, default is identity.
Mat processNoiseCov; //!< Process noise cross-covariance matrix, DP x DP.
Mat measurementNoiseCov; //!< Measurement noise cross-covariance matrix, MP x MP.
// Parameters of algorithm
double alpha; //!< Default is 1e-3.
double k; //!< Default is 0.
double beta; //!< Default is 2.0.
//Dynamical system model
Ptr<UkfSystemModel> model; //!< Object of the class containing functions for computing the next state and the measurement.
/** The constructors.
*/
UnscentedKalmanFilterParams(){}
/**
* @param dp - dimensionality of the state vector,
* @param mp - dimensionality of the measurement vector,
* @param cp - dimensionality of the control vector,
* @param processNoiseCovDiag - value of elements on main diagonal process noise cross-covariance matrix,
* @param measurementNoiseCovDiag - value of elements on main diagonal measurement noise cross-covariance matrix,
* @param dynamicalSystem - ptr to object of the class containing functions for computing the next state and the measurement,
* @param type - type of the created matrices that should be CV_32F or CV_64F.
*/
UnscentedKalmanFilterParams( int dp, int mp, int cp, double processNoiseCovDiag, double measurementNoiseCovDiag,
Ptr<UkfSystemModel> dynamicalSystem, int type = CV_64F );
/** The function for initialization of Unscented Kalman filter
* @param dp - dimensionality of the state vector,
* @param mp - dimensionality of the measurement vector,
* @param cp - dimensionality of the control vector,
* @param processNoiseCovDiag - value of elements on main diagonal process noise cross-covariance matrix,
* @param measurementNoiseCovDiag - value of elements on main diagonal measurement noise cross-covariance matrix,
* @param dynamicalSystem - ptr to object of the class containing functions for computing the next state and the measurement,
* @param type - type of the created matrices that should be CV_32F or CV_64F.
*/
void init( int dp, int mp, int cp, double processNoiseCovDiag, double measurementNoiseCovDiag,
Ptr<UkfSystemModel> dynamicalSystem, int type = CV_64F );
};
/** @brief Augmented Unscented Kalman filter parameters.
* The class for initialization parameters of Augmented Unscented Kalman filter
*/
class CV_EXPORTS AugmentedUnscentedKalmanFilterParams: public UnscentedKalmanFilterParams
{
public:
AugmentedUnscentedKalmanFilterParams(){}
/**
* @param dp - dimensionality of the state vector,
* @param mp - dimensionality of the measurement vector,
* @param cp - dimensionality of the control vector,
* @param processNoiseCovDiag - value of elements on main diagonal process noise cross-covariance matrix,
* @param measurementNoiseCovDiag - value of elements on main diagonal measurement noise cross-covariance matrix,
* @param dynamicalSystem - ptr to object of the class containing functions for computing the next state and the measurement,
* @param type - type of the created matrices that should be CV_32F or CV_64F.
*/
AugmentedUnscentedKalmanFilterParams( int dp, int mp, int cp, double processNoiseCovDiag, double measurementNoiseCovDiag,
Ptr<UkfSystemModel> dynamicalSystem, int type = CV_64F );
/** The function for initialization of Augmented Unscented Kalman filter
* @param dp - dimensionality of the state vector,
* @param mp - dimensionality of the measurement vector,
* @param cp - dimensionality of the control vector,
* @param processNoiseCovDiag - value of elements on main diagonal process noise cross-covariance matrix,
* @param measurementNoiseCovDiag - value of elements on main diagonal measurement noise cross-covariance matrix,
* @param dynamicalSystem - object of the class containing functions for computing the next state and the measurement,
* @param type - type of the created matrices that should be CV_32F or CV_64F.
*/
void init( int dp, int mp, int cp, double processNoiseCovDiag, double measurementNoiseCovDiag,
Ptr<UkfSystemModel> dynamicalSystem, int type = CV_64F );
};
/** @brief Unscented Kalman Filter factory method
* The class implements an Unscented Kalman filter <https://en.wikipedia.org/wiki/Kalman_filter#Unscented_Kalman_filter>.
* @param params - an object of the UnscentedKalmanFilterParams class containing UKF parameters.
* @return pointer to the object of the UnscentedKalmanFilterImpl class implementing UnscentedKalmanFilter.
*/
CV_EXPORTS Ptr<UnscentedKalmanFilter> createUnscentedKalmanFilter( const UnscentedKalmanFilterParams &params );
/** @brief Augmented Unscented Kalman Filter factory method
* The class implements an Augmented Unscented Kalman filter http://becs.aalto.fi/en/research/bayes/ekfukf/documentation.pdf, page 31-33.
* AUKF is more accurate than UKF but its computational complexity is larger.
* @param params - an object of the AugmentedUnscentedKalmanFilterParams class containing AUKF parameters.
* @return pointer to the object of the AugmentedUnscentedKalmanFilterImpl class implementing UnscentedKalmanFilter.
*/
CV_EXPORTS Ptr<UnscentedKalmanFilter> createAugmentedUnscentedKalmanFilter( const AugmentedUnscentedKalmanFilterParams &params );
} // namespace
//! @}
}}} // namespace
#endif

View File

@@ -0,0 +1,293 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef __OPENCV_ONLINEBOOSTING_HPP__
#define __OPENCV_ONLINEBOOSTING_HPP__
#include "opencv2/core.hpp"
namespace cv {
namespace detail {
inline namespace tracking {
//! @addtogroup tracking_detail
//! @{
inline namespace online_boosting {
//TODO based on the original implementation
//http://vision.ucsd.edu/~bbabenko/project_miltrack.shtml
class BaseClassifier;
class WeakClassifierHaarFeature;
class EstimatedGaussDistribution;
class ClassifierThreshold;
class Detector;
class StrongClassifierDirectSelection
{
public:
StrongClassifierDirectSelection( int numBaseClf, int numWeakClf, Size patchSz, const Rect& sampleROI, bool useFeatureEx = false, int iterationInit =
0 );
virtual ~StrongClassifierDirectSelection();
void initBaseClassifier();
bool update( const Mat& image, int target, float importance = 1.0 );
float eval( const Mat& response );
std::vector<int> getSelectedWeakClassifier();
float classifySmooth( const std::vector<Mat>& images, const Rect& sampleROI, int& idx );
int getNumBaseClassifier();
Size getPatchSize() const;
Rect getROI() const;
bool getUseFeatureExchange() const;
int getReplacedClassifier() const;
void replaceWeakClassifier( int idx );
int getSwappedClassifier() const;
private:
//StrongClassifier
int numBaseClassifier;
int numAllWeakClassifier;
int numWeakClassifier;
int iterInit;
BaseClassifier** baseClassifier;
std::vector<float> alpha;
cv::Size patchSize;
bool useFeatureExchange;
//StrongClassifierDirectSelection
std::vector<bool> m_errorMask;
std::vector<float> m_errors;
std::vector<float> m_sumErrors;
Detector* detector;
Rect ROI;
int replacedClassifier;
int swappedClassifier;
};
class BaseClassifier
{
public:
BaseClassifier( int numWeakClassifier, int iterationInit );
BaseClassifier( int numWeakClassifier, int iterationInit, WeakClassifierHaarFeature** weakCls );
WeakClassifierHaarFeature** getReferenceWeakClassifier()
{
return weakClassifier;
}
;
void trainClassifier( const Mat& image, int target, float importance, std::vector<bool>& errorMask );
int selectBestClassifier( std::vector<bool>& errorMask, float importance, std::vector<float> & errors );
int computeReplaceWeakestClassifier( const std::vector<float> & errors );
void replaceClassifierStatistic( int sourceIndex, int targetIndex );
int getIdxOfNewWeakClassifier()
{
return m_idxOfNewWeakClassifier;
}
;
int eval( const Mat& image );
virtual ~BaseClassifier();
float getError( int curWeakClassifier );
void getErrors( float* errors );
int getSelectedClassifier() const;
void replaceWeakClassifier( int index );
protected:
void generateRandomClassifier();
WeakClassifierHaarFeature** weakClassifier;
bool m_referenceWeakClassifier;
int m_numWeakClassifier;
int m_selectedClassifier;
int m_idxOfNewWeakClassifier;
std::vector<float> m_wCorrect;
std::vector<float> m_wWrong;
int m_iterationInit;
};
class EstimatedGaussDistribution
{
public:
EstimatedGaussDistribution();
EstimatedGaussDistribution( float P_mean, float R_mean, float P_sigma, float R_sigma );
virtual ~EstimatedGaussDistribution();
void update( float value ); //, float timeConstant = -1.0);
float getMean();
float getSigma();
void setValues( float mean, float sigma );
private:
float m_mean;
float m_sigma;
float m_P_mean;
float m_P_sigma;
float m_R_mean;
float m_R_sigma;
};
class WeakClassifierHaarFeature
{
public:
WeakClassifierHaarFeature();
virtual ~WeakClassifierHaarFeature();
bool update( float value, int target );
int eval( float value );
private:
float sigma;
float mean;
ClassifierThreshold* m_classifier;
void getInitialDistribution( EstimatedGaussDistribution *distribution );
void generateRandomClassifier( EstimatedGaussDistribution* m_posSamples, EstimatedGaussDistribution* m_negSamples );
};
class Detector
{
public:
Detector( StrongClassifierDirectSelection* classifier );
virtual
~Detector( void );
void
classifySmooth( const std::vector<Mat>& image, float minMargin = 0 );
int
getNumDetections();
float
getConfidence( int patchIdx );
float
getConfidenceOfDetection( int detectionIdx );
float getConfidenceOfBestDetection()
{
return m_maxConfidence;
}
;
int
getPatchIdxOfBestDetection();
int
getPatchIdxOfDetection( int detectionIdx );
const std::vector<int> &
getIdxDetections() const
{
return m_idxDetections;
}
;
const std::vector<float> &
getConfidences() const
{
return m_confidences;
}
;
const cv::Mat &
getConfImageDisplay() const
{
return m_confImageDisplay;
}
private:
void
prepareConfidencesMemory( int numPatches );
void
prepareDetectionsMemory( int numDetections );
StrongClassifierDirectSelection* m_classifier;
std::vector<float> m_confidences;
int m_sizeConfidences;
int m_numDetections;
std::vector<int> m_idxDetections;
int m_sizeDetections;
int m_idxBestDetection;
float m_maxConfidence;
cv::Mat_<float> m_confMatrix;
cv::Mat_<float> m_confMatrixSmooth;
cv::Mat_<unsigned char> m_confImageDisplay;
};
class ClassifierThreshold
{
public:
ClassifierThreshold( EstimatedGaussDistribution* posSamples, EstimatedGaussDistribution* negSamples );
virtual ~ClassifierThreshold();
void update( float value, int target );
int eval( float value );
void* getDistribution( int target );
private:
EstimatedGaussDistribution* m_posSamples;
EstimatedGaussDistribution* m_negSamples;
float m_threshold;
int m_parity;
};
} // namespace
//! @}
}}} // namespace
#endif

View File

@@ -0,0 +1,64 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef OPENCV_TLD_DATASET
#define OPENCV_TLD_DATASET
#include "opencv2/core.hpp"
namespace cv {
namespace detail {
inline namespace tracking {
//! @addtogroup tracking_detail
//! @{
namespace tld
{
CV_EXPORTS cv::Rect2d tld_InitDataset(int videoInd, const char* rootPath = "TLD_dataset", int datasetInd = 0);
CV_EXPORTS cv::String tld_getNextDatasetFrame();
}
//! @}
}}}
#endif

View File

@@ -0,0 +1,46 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifdef __OPENCV_BUILD
#error this is a compatibility header which should not be used inside the OpenCV library
#endif
#include "opencv2/tracking.hpp"

View File

@@ -0,0 +1,566 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
#ifndef __OPENCV_TRACKING_TRACKING_BY_MATCHING_HPP__
#define __OPENCV_TRACKING_TRACKING_BY_MATCHING_HPP__
#include <deque>
#include <iostream>
#include <string>
#include <unordered_map>
#include <vector>
#include <memory>
#include <map>
#include <tuple>
#include <set>
#include "opencv2/core.hpp"
#include "opencv2/imgproc.hpp"
namespace cv {
namespace detail {
inline namespace tracking {
//! @addtogroup tracking_detail
//! @{
namespace tbm { //Tracking-by-Matching
///
/// \brief The TrackedObject struct defines properties of detected object.
///
struct CV_EXPORTS TrackedObject {
cv::Rect rect; ///< Detected object ROI (zero area if N/A).
double confidence; ///< Detection confidence level (-1 if N/A).
int frame_idx; ///< Frame index where object was detected (-1 if N/A).
int object_id; ///< Unique object identifier (-1 if N/A).
uint64_t timestamp; ///< Timestamp in milliseconds.
///
/// \brief Default constructor.
///
TrackedObject()
: confidence(-1),
frame_idx(-1),
object_id(-1),
timestamp(0) {}
///
/// \brief Constructor with parameters.
/// \param rect Bounding box of detected object.
/// \param confidence Confidence of detection.
/// \param frame_idx Index of frame.
/// \param object_id Object ID.
///
TrackedObject(const cv::Rect &rect, float confidence, int frame_idx,
int object_id)
: rect(rect),
confidence(confidence),
frame_idx(frame_idx),
object_id(object_id),
timestamp(0) {}
};
using TrackedObjects = std::deque<TrackedObject>;
bool operator==(const TrackedObject& first, const TrackedObject& second);
bool operator!=(const TrackedObject& first, const TrackedObject& second);
/// (object id, detected objects) pairs collection.
using ObjectTracks = std::unordered_map<int, TrackedObjects>;
///
/// \brief The IImageDescriptor class declares base class for image
/// descriptor.
///
class CV_EXPORTS IImageDescriptor {
public:
///
/// \brief Descriptor size getter.
/// \return Descriptor size.
///
virtual cv::Size size() const = 0;
///
/// \brief Computes image descriptor.
/// \param[in] mat Color image.
/// \param[out] descr Computed descriptor.
///
virtual void compute(const cv::Mat &mat, CV_OUT cv::Mat& descr) = 0;
///
/// \brief Computes image descriptors in batches.
/// \param[in] mats Images of interest.
/// \param[out] descrs Matrices to store the computed descriptors.
///
virtual void compute(const std::vector<cv::Mat> &mats,
CV_OUT std::vector<cv::Mat>& descrs) = 0;
virtual ~IImageDescriptor() {}
};
///
/// \brief Uses resized image as descriptor.
///
class CV_EXPORTS ResizedImageDescriptor : public IImageDescriptor {
public:
///
/// \brief Constructor.
/// \param[in] descr_size Size of the descriptor (resized image).
/// \param[in] interpolation Interpolation algorithm.
///
explicit ResizedImageDescriptor(const cv::Size &descr_size,
const cv::InterpolationFlags interpolation)
: descr_size_(descr_size), interpolation_(interpolation) {
CV_Assert(descr_size.width > 0);
CV_Assert(descr_size.height > 0);
}
///
/// \brief Returns descriptor size.
/// \return Number of elements in the descriptor.
///
cv::Size size() const override { return descr_size_; }
///
/// \brief Computes image descriptor.
/// \param[in] mat Frame containing the image of interest.
/// \param[out] descr Matrix to store the computed descriptor.
///
void compute(const cv::Mat &mat, CV_OUT cv::Mat& descr) override {
CV_Assert(!mat.empty());
cv::resize(mat, descr, descr_size_, 0, 0, interpolation_);
}
///
/// \brief Computes images descriptors.
/// \param[in] mats Frames containing images of interest.
/// \param[out] descrs Matrices to store the computed descriptors.
//
void compute(const std::vector<cv::Mat> &mats,
CV_OUT std::vector<cv::Mat>& descrs) override {
descrs.resize(mats.size());
for (size_t i = 0; i < mats.size(); i++) {
compute(mats[i], descrs[i]);
}
}
private:
cv::Size descr_size_;
cv::InterpolationFlags interpolation_;
};
///
/// \brief The IDescriptorDistance class declares an interface for distance
/// computation between reidentification descriptors.
///
class CV_EXPORTS IDescriptorDistance {
public:
///
/// \brief Computes distance between two descriptors.
/// \param[in] descr1 First descriptor.
/// \param[in] descr2 Second descriptor.
/// \return Distance between two descriptors.
///
virtual float compute(const cv::Mat &descr1, const cv::Mat &descr2) = 0;
///
/// \brief Computes distances between two descriptors in batches.
/// \param[in] descrs1 Batch of first descriptors.
/// \param[in] descrs2 Batch of second descriptors.
/// \return Distances between descriptors.
///
virtual std::vector<float> compute(const std::vector<cv::Mat> &descrs1,
const std::vector<cv::Mat> &descrs2) = 0;
virtual ~IDescriptorDistance() {}
};
///
/// \brief The CosDistance class allows computing cosine distance between two
/// reidentification descriptors.
///
class CV_EXPORTS CosDistance : public IDescriptorDistance {
public:
///
/// \brief CosDistance constructor.
/// \param[in] descriptor_size Descriptor size.
///
explicit CosDistance(const cv::Size &descriptor_size);
///
/// \brief Computes distance between two descriptors.
/// \param descr1 First descriptor.
/// \param descr2 Second descriptor.
/// \return Distance between two descriptors.
///
float compute(const cv::Mat &descr1, const cv::Mat &descr2) override;
///
/// \brief Computes distances between two descriptors in batches.
/// \param[in] descrs1 Batch of first descriptors.
/// \param[in] descrs2 Batch of second descriptors.
/// \return Distances between descriptors.
///
std::vector<float> compute(
const std::vector<cv::Mat> &descrs1,
const std::vector<cv::Mat> &descrs2) override;
private:
cv::Size descriptor_size_;
};
///
/// \brief Computes distance between images
/// using MatchTemplate function from OpenCV library
/// and its cross-correlation computation method in particular.
///
class CV_EXPORTS MatchTemplateDistance : public IDescriptorDistance {
public:
///
/// \brief Constructs the distance object.
///
/// \param[in] type Method of MatchTemplate function computation.
/// \param[in] scale Scale parameter for the distance.
/// Final distance is computed as:
/// scale * distance + offset.
/// \param[in] offset Offset parameter for the distance.
/// Final distance is computed as:
/// scale * distance + offset.
///
MatchTemplateDistance(int type = cv::TemplateMatchModes::TM_CCORR_NORMED,
float scale = -1, float offset = 1)
: type_(type), scale_(scale), offset_(offset) {}
///
/// \brief Computes distance between image descriptors.
/// \param[in] descr1 First image descriptor.
/// \param[in] descr2 Second image descriptor.
/// \return Distance between image descriptors.
///
float compute(const cv::Mat &descr1, const cv::Mat &descr2) override;
///
/// \brief Computes distances between two descriptors in batches.
/// \param[in] descrs1 Batch of first descriptors.
/// \param[in] descrs2 Batch of second descriptors.
/// \return Distances between descriptors.
///
std::vector<float> compute(const std::vector<cv::Mat> &descrs1,
const std::vector<cv::Mat> &descrs2) override;
virtual ~MatchTemplateDistance() {}
private:
int type_; ///< Method of MatchTemplate function computation.
float scale_; ///< Scale parameter for the distance. Final distance is
/// computed as: scale * distance + offset.
float offset_; ///< Offset parameter for the distance. Final distance is
/// computed as: scale * distance + offset.
};
///
/// \brief The TrackerParams struct stores parameters of TrackerByMatching
///
struct CV_EXPORTS TrackerParams {
size_t min_track_duration; ///< Min track duration in milliseconds.
size_t forget_delay; ///< Forget about track if the last bounding box in
/// track was detected more than specified number of
/// frames ago.
float aff_thr_fast; ///< Affinity threshold which is used to determine if
/// tracklet and detection should be combined (fast
/// descriptor is used).
float aff_thr_strong; ///< Affinity threshold which is used to determine if
/// tracklet and detection should be combined(strong
/// descriptor is used).
float shape_affinity_w; ///< Shape affinity weight.
float motion_affinity_w; ///< Motion affinity weight.
float time_affinity_w; ///< Time affinity weight.
float min_det_conf; ///< Min confidence of detection.
cv::Vec2f bbox_aspect_ratios_range; ///< Bounding box aspect ratios range.
cv::Vec2f bbox_heights_range; ///< Bounding box heights range.
int predict; ///< How many frames are used to predict bounding box in case
/// of lost track.
float strong_affinity_thr; ///< If 'fast' confidence is greater than this
/// threshold then 'strong' Re-ID approach is
/// used.
float reid_thr; ///< Affinity threshold for re-identification.
bool drop_forgotten_tracks; ///< Drop forgotten tracks. If it's enabled it
/// disables an ability to get detection log.
int max_num_objects_in_track; ///< The number of objects in track is
/// restricted by this parameter. If it is negative or zero, the max number of
/// objects in track is not restricted.
///
/// Default constructor.
///
TrackerParams();
};
///
/// \brief The Track class describes tracks.
///
class CV_EXPORTS Track {
public:
///
/// \brief Track constructor.
/// \param objs Detected objects sequence.
/// \param last_image Image of last image in the detected object sequence.
/// \param descriptor_fast Fast descriptor.
/// \param descriptor_strong Strong descriptor (reid embedding).
///
Track(const TrackedObjects &objs, const cv::Mat &last_image,
const cv::Mat &descriptor_fast, const cv::Mat &descriptor_strong)
: objects(objs),
predicted_rect(!objs.empty() ? objs.back().rect : cv::Rect()),
last_image(last_image),
descriptor_fast(descriptor_fast),
descriptor_strong(descriptor_strong),
lost(0),
length(1) {
CV_Assert(!objs.empty());
first_object = objs[0];
}
///
/// \brief empty returns if track does not contain objects.
/// \return true if track does not contain objects.
///
bool empty() const { return objects.empty(); }
///
/// \brief size returns number of detected objects in a track.
/// \return number of detected objects in a track.
///
size_t size() const { return objects.size(); }
///
/// \brief operator [] return const reference to detected object with
/// specified index.
/// \param i Index of object.
/// \return const reference to detected object with specified index.
///
const TrackedObject &operator[](size_t i) const { return objects[i]; }
///
/// \brief operator [] return non-const reference to detected object with
/// specified index.
/// \param i Index of object.
/// \return non-const reference to detected object with specified index.
///
TrackedObject &operator[](size_t i) { return objects[i]; }
///
/// \brief back returns const reference to last object in track.
/// \return const reference to last object in track.
///
const TrackedObject &back() const {
CV_Assert(!empty());
return objects.back();
}
///
/// \brief back returns non-const reference to last object in track.
/// \return non-const reference to last object in track.
///
TrackedObject &back() {
CV_Assert(!empty());
return objects.back();
}
TrackedObjects objects; ///< Detected objects;
cv::Rect predicted_rect; ///< Rectangle that represents predicted position
/// and size of bounding box if track has been lost.
cv::Mat last_image; ///< Image of last detected object in track.
cv::Mat descriptor_fast; ///< Fast descriptor.
cv::Mat descriptor_strong; ///< Strong descriptor (reid embedding).
size_t lost; ///< How many frames ago track has been lost.
TrackedObject first_object; ///< First object in track.
size_t length; ///< Length of a track including number of objects that were
/// removed from track in order to avoid memory usage growth.
};
///
/// \brief Tracker-by-Matching algorithm interface.
///
/// This class is implementation of tracking-by-matching system. It uses two
/// different appearance measures to compute affinity between bounding boxes:
/// some fast descriptor and some strong descriptor. Each time the assignment
/// problem is solved. The assignment problem in our case is how to establish
/// correspondence between existing tracklets and recently detected objects.
/// First step is to compute an affinity matrix between tracklets and
/// detections. The affinity equals to
/// appearance_affinity * motion_affinity * shape_affinity.
/// Where appearance is 1 - distance(tracklet_fast_dscr, detection_fast_dscr).
/// Second step is to solve the assignment problem using Kuhn-Munkres
/// algorithm. If correspondence between some tracklet and detection is
/// established with low confidence (affinity) then the strong descriptor is
/// used to determine if there is correspondence between tracklet and detection.
///
class CV_EXPORTS ITrackerByMatching {
public:
using Descriptor = std::shared_ptr<IImageDescriptor>;
using Distance = std::shared_ptr<IDescriptorDistance>;
///
/// \brief Destructor for the tracker
///
virtual ~ITrackerByMatching() {}
///
/// \brief Process given frame.
/// \param[in] frame Colored image (CV_8UC3).
/// \param[in] detections Detected objects on the frame.
/// \param[in] timestamp Timestamp must be positive and measured in
/// milliseconds
///
virtual void process(const cv::Mat &frame, const TrackedObjects &detections,
uint64_t timestamp) = 0;
///
/// \brief Pipeline parameters getter.
/// \return Parameters of pipeline.
///
virtual const TrackerParams &params() const = 0;
///
/// \brief Pipeline parameters setter.
/// \param[in] params Parameters of pipeline.
///
virtual void setParams(const TrackerParams &params) = 0;
///
/// \brief Fast descriptor getter.
/// \return Fast descriptor used in pipeline.
///
virtual const Descriptor &descriptorFast() const = 0;
///
/// \brief Fast descriptor setter.
/// \param[in] val Fast descriptor used in pipeline.
///
virtual void setDescriptorFast(const Descriptor &val) = 0;
///
/// \brief Strong descriptor getter.
/// \return Strong descriptor used in pipeline.
///
virtual const Descriptor &descriptorStrong() const = 0;
///
/// \brief Strong descriptor setter.
/// \param[in] val Strong descriptor used in pipeline.
///
virtual void setDescriptorStrong(const Descriptor &val) = 0;
///
/// \brief Fast distance getter.
/// \return Fast distance used in pipeline.
///
virtual const Distance &distanceFast() const = 0;
///
/// \brief Fast distance setter.
/// \param[in] val Fast distance used in pipeline.
///
virtual void setDistanceFast(const Distance &val) = 0;
///
/// \brief Strong distance getter.
/// \return Strong distance used in pipeline.
///
virtual const Distance &distanceStrong() const = 0;
///
/// \brief Strong distance setter.
/// \param[in] val Strong distance used in pipeline.
///
virtual void setDistanceStrong(const Distance &val) = 0;
///
/// \brief Returns number of counted people.
/// \return a number of counted people.
///
virtual size_t count() const = 0;
///
/// \brief Get active tracks to draw
/// \return Active tracks.
///
virtual std::unordered_map<size_t, std::vector<cv::Point> > getActiveTracks() const = 0;
///
/// \brief Get tracked detections.
/// \return Tracked detections.
///
virtual TrackedObjects trackedDetections() const = 0;
///
/// \brief Draws active tracks on a given frame.
/// \param[in] frame Colored image (CV_8UC3).
/// \return Colored image with drawn active tracks.
///
virtual cv::Mat drawActiveTracks(const cv::Mat &frame) = 0;
///
/// \brief isTrackForgotten returns true if track is forgotten.
/// \param id Track ID.
/// \return true if track is forgotten.
///
virtual bool isTrackForgotten(size_t id) const = 0;
///
/// \brief tracks Returns all tracks including forgotten (lost too many frames
/// ago).
/// \return Set of tracks {id, track}.
///
virtual const std::unordered_map<size_t, Track> &tracks() const = 0;
///
/// \brief isTrackValid Checks whether track is valid (duration > threshold).
/// \param track_id Index of checked track.
/// \return True if track duration exceeds some predefined value.
///
virtual bool isTrackValid(size_t track_id) const = 0;
///
/// \brief dropForgottenTracks Removes tracks from memory that were lost too
/// many frames ago.
///
virtual void dropForgottenTracks() = 0;
///
/// \brief dropForgottenTrack Check that the track was lost too many frames
/// ago
/// and removes it frm memory.
///
virtual void dropForgottenTrack(size_t track_id) = 0;
};
///
/// \brief The factory to create Tracker-by-Matching algorithm implementation.
///
CV_EXPORTS cv::Ptr<ITrackerByMatching> createTrackerByMatching(const TrackerParams &params = TrackerParams());
} // namespace tbm
//! @}
}}} // namespace
#endif // #ifndef __OPENCV_TRACKING_TRACKING_BY_MATCHING_HPP__

View File

@@ -0,0 +1,932 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
#ifndef OPENCV_TRACKING_DETAIL_HPP
#define OPENCV_TRACKING_DETAIL_HPP
/*
* Partially based on:
* ====================================================================================================================
* - [AAM] S. Salti, A. Cavallaro, L. Di Stefano, Adaptive Appearance Modeling for Video Tracking: Survey and Evaluation
* - [AMVOT] X. Li, W. Hu, C. Shen, Z. Zhang, A. Dick, A. van den Hengel, A Survey of Appearance Models in Visual Object Tracking
*
* This Tracking API has been designed with PlantUML. If you modify this API please change UML files under modules/tracking/doc/uml
*
*/
#include "opencv2/video/detail/tracking.detail.hpp"
#include "feature.hpp" // CvHaarEvaluator
#include "onlineBoosting.hpp" // StrongClassifierDirectSelection
namespace cv {
namespace detail {
inline namespace tracking {
/** @addtogroup tracking_detail
@{
Long-term optical tracking API
------------------------------
Long-term optical tracking is an important issue for many computer vision applications in
real world scenario. The development in this area is very fragmented and this API is an unique
interface useful for plug several algorithms and compare them. This work is partially based on
@cite AAM and @cite AMVOT .
These algorithms start from a bounding box of the target and with their internal representation they
avoid the drift during the tracking. These long-term trackers are able to evaluate online the
quality of the location of the target in the new frame, without ground truth.
There are three main components: the TrackerContribSampler, the TrackerContribFeatureSet and the TrackerModel. The
first component is the object that computes the patches over the frame based on the last target
location. The TrackerContribFeatureSet is the class that manages the Features, is possible plug many kind
of these (HAAR, HOG, LBP, Feature2D, etc). The last component is the internal representation of the
target, it is the appearance model. It stores all state candidates and compute the trajectory (the
most likely target states). The class TrackerTargetState represents a possible state of the target.
The TrackerContribSampler and the TrackerContribFeatureSet are the visual representation of the target, instead
the TrackerModel is the statistical model.
A recent benchmark between these algorithms can be found in @cite OOT
Creating Your Own %Tracker
--------------------
If you want to create a new tracker, here's what you have to do. First, decide on the name of the class
for the tracker (to meet the existing style, we suggest something with prefix "tracker", e.g.
trackerMIL, trackerBoosting) -- we shall refer to this choice as to "classname" in subsequent.
- Declare your tracker in modules/tracking/include/opencv2/tracking/tracker.hpp. Your tracker should inherit from
Tracker (please, see the example below). You should declare the specialized Param structure,
where you probably will want to put the data, needed to initialize your tracker. You should
get something similar to :
@code
class CV_EXPORTS_W TrackerMIL : public Tracker
{
public:
struct CV_EXPORTS Params
{
Params();
//parameters for sampler
float samplerInitInRadius; // radius for gathering positive instances during init
int samplerInitMaxNegNum; // # negative samples to use during init
float samplerSearchWinSize; // size of search window
float samplerTrackInRadius; // radius for gathering positive instances during tracking
int samplerTrackMaxPosNum; // # positive samples to use during tracking
int samplerTrackMaxNegNum; // # negative samples to use during tracking
int featureSetNumFeatures; // #features
void read( const FileNode& fn );
void write( FileStorage& fs ) const;
};
@endcode
of course, you can also add any additional methods of your choice. It should be pointed out,
however, that it is not expected to have a constructor declared, as creation should be done via
the corresponding create() method.
- Finally, you should implement the function with signature :
@code
Ptr<classname> classname::create(const classname::Params &parameters){
...
}
@endcode
That function can (and probably will) return a pointer to some derived class of "classname",
which will probably have a real constructor.
Every tracker has three component TrackerContribSampler, TrackerContribFeatureSet and TrackerModel. The first two
are instantiated from Tracker base class, instead the last component is abstract, so you must
implement your TrackerModel.
### TrackerContribSampler
TrackerContribSampler is already instantiated, but you should define the sampling algorithm and add the
classes (or single class) to TrackerContribSampler. You can choose one of the ready implementation as
TrackerContribSamplerCSC or you can implement your sampling method, in this case the class must inherit
TrackerContribSamplerAlgorithm. Fill the samplingImpl method that writes the result in "sample" output
argument.
Example of creating specialized TrackerContribSamplerAlgorithm TrackerContribSamplerCSC : :
@code
class CV_EXPORTS_W TrackerContribSamplerCSC : public TrackerContribSamplerAlgorithm
{
public:
TrackerContribSamplerCSC( const TrackerContribSamplerCSC::Params &parameters = TrackerContribSamplerCSC::Params() );
~TrackerContribSamplerCSC();
...
protected:
bool samplingImpl( const Mat& image, Rect boundingBox, std::vector<Mat>& sample );
...
};
@endcode
Example of adding TrackerContribSamplerAlgorithm to TrackerContribSampler : :
@code
//sampler is the TrackerContribSampler
Ptr<TrackerContribSamplerAlgorithm> CSCSampler = new TrackerContribSamplerCSC( CSCparameters );
if( !sampler->addTrackerSamplerAlgorithm( CSCSampler ) )
return false;
//or add CSC sampler with default parameters
//sampler->addTrackerSamplerAlgorithm( "CSC" );
@endcode
@sa
TrackerContribSamplerCSC, TrackerContribSamplerAlgorithm
### TrackerContribFeatureSet
TrackerContribFeatureSet is already instantiated (as first) , but you should define what kinds of features
you'll use in your tracker. You can use multiple feature types, so you can add a ready
implementation as TrackerContribFeatureHAAR in your TrackerContribFeatureSet or develop your own implementation.
In this case, in the computeImpl method put the code that extract the features and in the selection
method optionally put the code for the refinement and selection of the features.
Example of creating specialized TrackerFeature TrackerContribFeatureHAAR : :
@code
class CV_EXPORTS_W TrackerContribFeatureHAAR : public TrackerFeature
{
public:
TrackerContribFeatureHAAR( const TrackerContribFeatureHAAR::Params &parameters = TrackerContribFeatureHAAR::Params() );
~TrackerContribFeatureHAAR();
void selection( Mat& response, int npoints );
...
protected:
bool computeImpl( const std::vector<Mat>& images, Mat& response );
...
};
@endcode
Example of adding TrackerFeature to TrackerContribFeatureSet : :
@code
//featureSet is the TrackerContribFeatureSet
Ptr<TrackerFeature> trackerFeature = new TrackerContribFeatureHAAR( HAARparameters );
featureSet->addTrackerFeature( trackerFeature );
@endcode
@sa
TrackerContribFeatureHAAR, TrackerContribFeatureSet
### TrackerModel
TrackerModel is abstract, so in your implementation you must develop your TrackerModel that inherit
from TrackerModel. Fill the method for the estimation of the state "modelEstimationImpl", that
estimates the most likely target location, see @cite AAM table I (ME) for further information. Fill
"modelUpdateImpl" in order to update the model, see @cite AAM table I (MU). In this class you can use
the :cConfidenceMap and :cTrajectory to storing the model. The first represents the model on the all
possible candidate states and the second represents the list of all estimated states.
Example of creating specialized TrackerModel TrackerMILModel : :
@code
class TrackerMILModel : public TrackerModel
{
public:
TrackerMILModel( const Rect& boundingBox );
~TrackerMILModel();
...
protected:
void modelEstimationImpl( const std::vector<Mat>& responses );
void modelUpdateImpl();
...
};
@endcode
And add it in your Tracker : :
@code
bool TrackerMIL::initImpl( const Mat& image, const Rect2d& boundingBox )
{
...
//model is the general TrackerModel field of the general Tracker
model = new TrackerMILModel( boundingBox );
...
}
@endcode
In the last step you should define the TrackerStateEstimator based on your implementation or you can
use one of ready class as TrackerStateEstimatorMILBoosting. It represent the statistical part of the
model that estimates the most likely target state.
Example of creating specialized TrackerStateEstimator TrackerStateEstimatorMILBoosting : :
@code
class CV_EXPORTS_W TrackerStateEstimatorMILBoosting : public TrackerStateEstimator
{
class TrackerMILTargetState : public TrackerTargetState
{
...
};
public:
TrackerStateEstimatorMILBoosting( int nFeatures = 250 );
~TrackerStateEstimatorMILBoosting();
...
protected:
Ptr<TrackerTargetState> estimateImpl( const std::vector<ConfidenceMap>& confidenceMaps );
void updateImpl( std::vector<ConfidenceMap>& confidenceMaps );
...
};
@endcode
And add it in your TrackerModel : :
@code
//model is the TrackerModel of your Tracker
Ptr<TrackerStateEstimatorMILBoosting> stateEstimator = new TrackerStateEstimatorMILBoosting( params.featureSetNumFeatures );
model->setTrackerStateEstimator( stateEstimator );
@endcode
@sa
TrackerModel, TrackerStateEstimatorMILBoosting, TrackerTargetState
During this step, you should define your TrackerTargetState based on your implementation.
TrackerTargetState base class has only the bounding box (upper-left position, width and height), you
can enrich it adding scale factor, target rotation, etc.
Example of creating specialized TrackerTargetState TrackerMILTargetState : :
@code
class TrackerMILTargetState : public TrackerTargetState
{
public:
TrackerMILTargetState( const Point2f& position, int targetWidth, int targetHeight, bool foreground, const Mat& features );
~TrackerMILTargetState();
...
private:
bool isTarget;
Mat targetFeatures;
...
};
@endcode
*/
/************************************ TrackerContribFeature Base Classes ************************************/
/** @brief Abstract base class for TrackerContribFeature that represents the feature.
*/
class CV_EXPORTS TrackerContribFeature : public TrackerFeature
{
public:
virtual ~TrackerContribFeature();
/** @brief Create TrackerContribFeature by tracker feature type
@param trackerFeatureType The TrackerContribFeature name
The modes available now:
- "HAAR" -- Haar Feature-based
The modes that will be available soon:
- "HOG" -- Histogram of Oriented Gradients features
- "LBP" -- Local Binary Pattern features
- "FEATURE2D" -- All types of Feature2D
*/
static Ptr<TrackerContribFeature> create( const String& trackerFeatureType );
/** @brief Identify most effective features
@param response Collection of response for the specific TrackerContribFeature
@param npoints Max number of features
@note This method modifies the response parameter
*/
virtual void selection( Mat& response, int npoints ) = 0;
/** @brief Get the name of the specific TrackerContribFeature
*/
String getClassName() const;
protected:
String className;
};
/** @brief Class that manages the extraction and selection of features
@cite AAM Feature Extraction and Feature Set Refinement (Feature Processing and Feature Selection).
See table I and section III C @cite AMVOT Appearance modelling -\> Visual representation (Table II,
section 3.1 - 3.2)
TrackerContribFeatureSet is an aggregation of TrackerContribFeature
@sa
TrackerContribFeature
*/
class CV_EXPORTS TrackerContribFeatureSet
{
public:
TrackerContribFeatureSet();
~TrackerContribFeatureSet();
/** @brief Extract features from the images collection
@param images The input images
*/
void extraction( const std::vector<Mat>& images );
/** @brief Identify most effective features for all feature types (optional)
*/
void selection();
/** @brief Remove outliers for all feature types (optional)
*/
void removeOutliers();
/** @brief Add TrackerContribFeature in the collection. Return true if TrackerContribFeature is added, false otherwise
@param trackerFeatureType The TrackerContribFeature name
The modes available now:
- "HAAR" -- Haar Feature-based
The modes that will be available soon:
- "HOG" -- Histogram of Oriented Gradients features
- "LBP" -- Local Binary Pattern features
- "FEATURE2D" -- All types of Feature2D
Example TrackerContribFeatureSet::addTrackerFeature : :
@code
//sample usage:
Ptr<TrackerContribFeature> trackerFeature = ...;
featureSet->addTrackerFeature( trackerFeature );
//or add CSC sampler with default parameters
//featureSet->addTrackerFeature( "HAAR" );
@endcode
@note If you use the second method, you must initialize the TrackerContribFeature
*/
bool addTrackerFeature( String trackerFeatureType );
/** @overload
@param feature The TrackerContribFeature class
*/
bool addTrackerFeature( Ptr<TrackerContribFeature>& feature );
/** @brief Get the TrackerContribFeature collection (TrackerContribFeature name, TrackerContribFeature pointer)
*/
const std::vector<std::pair<String, Ptr<TrackerContribFeature> > >& getTrackerFeature() const;
/** @brief Get the responses
@note Be sure to call extraction before getResponses Example TrackerContribFeatureSet::getResponses : :
*/
const std::vector<Mat>& getResponses() const;
private:
void clearResponses();
bool blockAddTrackerFeature;
std::vector<std::pair<String, Ptr<TrackerContribFeature> > > features; //list of features
std::vector<Mat> responses; //list of response after compute
};
/************************************ TrackerContribSampler Base Classes ************************************/
/** @brief Abstract base class for TrackerContribSamplerAlgorithm that represents the algorithm for the specific
sampler.
*/
class CV_EXPORTS TrackerContribSamplerAlgorithm : public TrackerSamplerAlgorithm
{
public:
/**
* \brief Destructor
*/
virtual ~TrackerContribSamplerAlgorithm();
/** @brief Create TrackerContribSamplerAlgorithm by tracker sampler type.
@param trackerSamplerType The trackerSamplerType name
The modes available now:
- "CSC" -- Current State Center
- "CS" -- Current State
*/
static Ptr<TrackerContribSamplerAlgorithm> create( const String& trackerSamplerType );
/** @brief Computes the regions starting from a position in an image.
Return true if samples are computed, false otherwise
@param image The current frame
@param boundingBox The bounding box from which regions can be calculated
@param sample The computed samples @cite AAM Fig. 1 variable Sk
*/
virtual bool sampling(const Mat& image, const Rect& boundingBox, std::vector<Mat>& sample) CV_OVERRIDE;
/** @brief Get the name of the specific TrackerContribSamplerAlgorithm
*/
String getClassName() const;
protected:
String className;
virtual bool samplingImpl( const Mat& image, Rect boundingBox, std::vector<Mat>& sample ) = 0;
};
/**
* \brief Class that manages the sampler in order to select regions for the update the model of the tracker
* [AAM] Sampling e Labeling. See table I and section III B
*/
/** @brief Class that manages the sampler in order to select regions for the update the model of the tracker
@cite AAM Sampling e Labeling. See table I and section III B
TrackerContribSampler is an aggregation of TrackerContribSamplerAlgorithm
@sa
TrackerContribSamplerAlgorithm
*/
class CV_EXPORTS TrackerContribSampler
{
public:
/**
* \brief Constructor
*/
TrackerContribSampler();
/**
* \brief Destructor
*/
~TrackerContribSampler();
/** @brief Computes the regions starting from a position in an image
@param image The current frame
@param boundingBox The bounding box from which regions can be calculated
*/
void sampling( const Mat& image, Rect boundingBox );
/** @brief Return the collection of the TrackerContribSamplerAlgorithm
*/
const std::vector<std::pair<String, Ptr<TrackerContribSamplerAlgorithm> > >& getSamplers() const;
/** @brief Return the samples from all TrackerContribSamplerAlgorithm, @cite AAM Fig. 1 variable Sk
*/
const std::vector<Mat>& getSamples() const;
/** @brief Add TrackerContribSamplerAlgorithm in the collection. Return true if sampler is added, false otherwise
@param trackerSamplerAlgorithmType The TrackerContribSamplerAlgorithm name
The modes available now:
- "CSC" -- Current State Center
- "CS" -- Current State
- "PF" -- Particle Filtering
Example TrackerContribSamplerAlgorithm::addTrackerContribSamplerAlgorithm : :
@code
TrackerContribSamplerCSC::Params CSCparameters;
Ptr<TrackerContribSamplerAlgorithm> CSCSampler = new TrackerContribSamplerCSC( CSCparameters );
if( !sampler->addTrackerSamplerAlgorithm( CSCSampler ) )
return false;
//or add CSC sampler with default parameters
//sampler->addTrackerSamplerAlgorithm( "CSC" );
@endcode
@note If you use the second method, you must initialize the TrackerContribSamplerAlgorithm
*/
bool addTrackerSamplerAlgorithm( String trackerSamplerAlgorithmType );
/** @overload
@param sampler The TrackerContribSamplerAlgorithm
*/
bool addTrackerSamplerAlgorithm( Ptr<TrackerContribSamplerAlgorithm>& sampler );
private:
std::vector<std::pair<String, Ptr<TrackerContribSamplerAlgorithm> > > samplers;
std::vector<Mat> samples;
bool blockAddTrackerSampler;
void clearSamples();
};
/** @brief TrackerStateEstimatorAdaBoosting based on ADA-Boosting
*/
class CV_EXPORTS TrackerStateEstimatorAdaBoosting : public TrackerStateEstimator
{
public:
/** @brief Implementation of the target state for TrackerAdaBoostingTargetState
*/
class CV_EXPORTS TrackerAdaBoostingTargetState : public TrackerTargetState
{
public:
/**
* \brief Constructor
* \param position Top left corner of the bounding box
* \param width Width of the bounding box
* \param height Height of the bounding box
* \param foreground label for target or background
* \param responses list of features
*/
TrackerAdaBoostingTargetState( const Point2f& position, int width, int height, bool foreground, const Mat& responses );
/**
* \brief Destructor
*/
~TrackerAdaBoostingTargetState()
{
}
;
/** @brief Set the features extracted from TrackerContribFeatureSet
@param responses The features extracted
*/
void setTargetResponses( const Mat& responses );
/** @brief Set label: true for target foreground, false for background
@param foreground Label for background/foreground
*/
void setTargetFg( bool foreground );
/** @brief Get the features extracted
*/
Mat getTargetResponses() const;
/** @brief Get the label. Return true for target foreground, false for background
*/
bool isTargetFg() const;
private:
bool isTarget;
Mat targetResponses;
};
/** @brief Constructor
@param numClassifer Number of base classifiers
@param initIterations Number of iterations in the initialization
@param nFeatures Number of features/weak classifiers
@param patchSize tracking rect
@param ROI initial ROI
*/
TrackerStateEstimatorAdaBoosting( int numClassifer, int initIterations, int nFeatures, Size patchSize, const Rect& ROI );
/**
* \brief Destructor
*/
~TrackerStateEstimatorAdaBoosting();
/** @brief Get the sampling ROI
*/
Rect getSampleROI() const;
/** @brief Set the sampling ROI
@param ROI the sampling ROI
*/
void setSampleROI( const Rect& ROI );
/** @brief Set the current confidenceMap
@param confidenceMap The current :cConfidenceMap
*/
void setCurrentConfidenceMap( ConfidenceMap& confidenceMap );
/** @brief Get the list of the selected weak classifiers for the classification step
*/
std::vector<int> computeSelectedWeakClassifier();
/** @brief Get the list of the weak classifiers that should be replaced
*/
std::vector<int> computeReplacedClassifier();
/** @brief Get the list of the weak classifiers that replace those to be replaced
*/
std::vector<int> computeSwappedClassifier();
protected:
Ptr<TrackerTargetState> estimateImpl( const std::vector<ConfidenceMap>& confidenceMaps ) CV_OVERRIDE;
void updateImpl( std::vector<ConfidenceMap>& confidenceMaps ) CV_OVERRIDE;
Ptr<StrongClassifierDirectSelection> boostClassifier;
private:
int numBaseClassifier;
int iterationInit;
int numFeatures;
bool trained;
Size initPatchSize;
Rect sampleROI;
std::vector<int> replacedClassifier;
std::vector<int> swappedClassifier;
ConfidenceMap currentConfidenceMap;
};
/**
* \brief TrackerStateEstimator based on SVM
*/
class CV_EXPORTS TrackerStateEstimatorSVM : public TrackerStateEstimator
{
public:
TrackerStateEstimatorSVM();
~TrackerStateEstimatorSVM();
protected:
Ptr<TrackerTargetState> estimateImpl( const std::vector<ConfidenceMap>& confidenceMaps ) CV_OVERRIDE;
void updateImpl( std::vector<ConfidenceMap>& confidenceMaps ) CV_OVERRIDE;
};
/************************************ Specific TrackerSamplerAlgorithm Classes ************************************/
/** @brief TrackerSampler based on CSC (current state centered), used by MIL algorithm TrackerMIL
*/
class CV_EXPORTS TrackerContribSamplerCSC : public TrackerContribSamplerAlgorithm
{
public:
enum
{
MODE_INIT_POS = 1, //!< mode for init positive samples
MODE_INIT_NEG = 2, //!< mode for init negative samples
MODE_TRACK_POS = 3, //!< mode for update positive samples
MODE_TRACK_NEG = 4, //!< mode for update negative samples
MODE_DETECT = 5 //!< mode for detect samples
};
struct CV_EXPORTS Params
{
Params();
float initInRad; //!< radius for gathering positive instances during init
float trackInPosRad; //!< radius for gathering positive instances during tracking
float searchWinSize; //!< size of search window
int initMaxNegNum; //!< # negative samples to use during init
int trackMaxPosNum; //!< # positive samples to use during training
int trackMaxNegNum; //!< # negative samples to use during training
};
/** @brief Constructor
@param parameters TrackerContribSamplerCSC parameters TrackerContribSamplerCSC::Params
*/
TrackerContribSamplerCSC( const TrackerContribSamplerCSC::Params &parameters = TrackerContribSamplerCSC::Params() );
/** @brief Set the sampling mode of TrackerContribSamplerCSC
@param samplingMode The sampling mode
The modes are:
- "MODE_INIT_POS = 1" -- for the positive sampling in initialization step
- "MODE_INIT_NEG = 2" -- for the negative sampling in initialization step
- "MODE_TRACK_POS = 3" -- for the positive sampling in update step
- "MODE_TRACK_NEG = 4" -- for the negative sampling in update step
- "MODE_DETECT = 5" -- for the sampling in detection step
*/
void setMode( int samplingMode );
~TrackerContribSamplerCSC();
protected:
bool samplingImpl(const Mat& image, Rect boundingBox, std::vector<Mat>& sample) CV_OVERRIDE;
private:
Params params;
int mode;
RNG rng;
std::vector<Mat> sampleImage( const Mat& img, int x, int y, int w, int h, float inrad, float outrad = 0, int maxnum = 1000000 );
};
/** @brief TrackerContribSampler based on CS (current state), used by algorithm TrackerBoosting
*/
class CV_EXPORTS TrackerSamplerCS : public TrackerContribSamplerAlgorithm
{
public:
enum
{
MODE_POSITIVE = 1, //!< mode for positive samples
MODE_NEGATIVE = 2, //!< mode for negative samples
MODE_CLASSIFY = 3 //!< mode for classify samples
};
struct CV_EXPORTS Params
{
Params();
float overlap; //!<overlapping for the search windows
float searchFactor; //!<search region parameter
};
/** @brief Constructor
@param parameters TrackerSamplerCS parameters TrackerSamplerCS::Params
*/
TrackerSamplerCS( const TrackerSamplerCS::Params &parameters = TrackerSamplerCS::Params() );
/** @brief Set the sampling mode of TrackerSamplerCS
@param samplingMode The sampling mode
The modes are:
- "MODE_POSITIVE = 1" -- for the positive sampling
- "MODE_NEGATIVE = 2" -- for the negative sampling
- "MODE_CLASSIFY = 3" -- for the sampling in classification step
*/
void setMode( int samplingMode );
~TrackerSamplerCS();
bool samplingImpl( const Mat& image, Rect boundingBox, std::vector<Mat>& sample ) CV_OVERRIDE;
Rect getROI() const;
private:
Rect getTrackingROI( float searchFactor );
Rect RectMultiply( const Rect & rect, float f );
std::vector<Mat> patchesRegularScan( const Mat& image, Rect trackingROI, Size patchSize );
void setCheckedROI( Rect imageROI );
Params params;
int mode;
Rect trackedPatch;
Rect validROI;
Rect ROI;
};
/** @brief This sampler is based on particle filtering.
In principle, it can be thought of as performing some sort of optimization (and indeed, this
tracker uses opencv's optim module), where tracker seeks to find the rectangle in given frame,
which is the most *"similar"* to the initial rectangle (the one, given through the constructor).
The optimization performed is stochastic and somehow resembles genetic algorithms, where on each new
image received (submitted via TrackerSamplerPF::sampling()) we start with the region bounded by
boundingBox, then generate several "perturbed" boxes, take the ones most similar to the original.
This selection round is repeated several times. At the end, we hope that only the most promising box
remaining, and these are combined to produce the subrectangle of image, which is put as a sole
element in array sample.
It should be noted, that the definition of "similarity" between two rectangles is based on comparing
their histograms. As experiments show, tracker is *not* very succesfull if target is assumed to
strongly change its dimensions.
*/
class CV_EXPORTS TrackerSamplerPF : public TrackerContribSamplerAlgorithm
{
public:
/** @brief This structure contains all the parameters that can be varied during the course of sampling
algorithm. Below is the structure exposed, together with its members briefly explained with
reference to the above discussion on algorithm's working.
*/
struct CV_EXPORTS Params
{
Params();
int iterationNum; //!< number of selection rounds
int particlesNum; //!< number of "perturbed" boxes on each round
double alpha; //!< with each new round we exponentially decrease the amount of "perturbing" we allow (like in simulated annealing)
//!< and this very alpha controls how fast annealing happens, ie. how fast perturbing decreases
Mat_<double> std; //!< initial values for perturbing (1-by-4 array, as each rectangle is given by 4 values -- coordinates of opposite vertices,
//!< hence we have 4 values to perturb)
};
/** @brief Constructor
@param chosenRect Initial rectangle, that is supposed to contain target we'd like to track.
@param parameters
*/
TrackerSamplerPF(const Mat& chosenRect,const TrackerSamplerPF::Params &parameters = TrackerSamplerPF::Params());
protected:
bool samplingImpl( const Mat& image, Rect boundingBox, std::vector<Mat>& sample ) CV_OVERRIDE;
private:
Params params;
Ptr<MinProblemSolver> _solver;
Ptr<MinProblemSolver::Function> _function;
};
/************************************ Specific TrackerContribFeature Classes ************************************/
/**
* \brief TrackerContribFeature based on Feature2D
*/
class CV_EXPORTS TrackerFeatureFeature2d : public TrackerContribFeature
{
public:
/**
* \brief Constructor
* \param detectorType string of FeatureDetector
* \param descriptorType string of DescriptorExtractor
*/
TrackerFeatureFeature2d( String detectorType, String descriptorType );
~TrackerFeatureFeature2d() CV_OVERRIDE;
void selection( Mat& response, int npoints ) CV_OVERRIDE;
protected:
bool computeImpl( const std::vector<Mat>& images, Mat& response ) CV_OVERRIDE;
private:
std::vector<KeyPoint> keypoints;
};
/**
* \brief TrackerContribFeature based on HOG
*/
class CV_EXPORTS TrackerFeatureHOG : public TrackerContribFeature
{
public:
TrackerFeatureHOG();
~TrackerFeatureHOG() CV_OVERRIDE;
void selection( Mat& response, int npoints ) CV_OVERRIDE;
protected:
bool computeImpl( const std::vector<Mat>& images, Mat& response ) CV_OVERRIDE;
};
/** @brief TrackerContribFeature based on HAAR features, used by TrackerMIL and many others algorithms
@note HAAR features implementation is copied from apps/traincascade and modified according to MIL
*/
class CV_EXPORTS TrackerContribFeatureHAAR : public TrackerContribFeature
{
public:
struct CV_EXPORTS Params
{
Params();
int numFeatures; //!< # of rects
Size rectSize; //!< rect size
bool isIntegral; //!< true if input images are integral, false otherwise
};
/** @brief Constructor
@param parameters TrackerContribFeatureHAAR parameters TrackerContribFeatureHAAR::Params
*/
TrackerContribFeatureHAAR( const TrackerContribFeatureHAAR::Params &parameters = TrackerContribFeatureHAAR::Params() );
~TrackerContribFeatureHAAR() CV_OVERRIDE;
/** @brief Compute the features only for the selected indices in the images collection
@param selFeatures indices of selected features
@param images The images
@param response Collection of response for the specific TrackerContribFeature
*/
bool extractSelected( const std::vector<int> selFeatures, const std::vector<Mat>& images, Mat& response );
/** @brief Identify most effective features
@param response Collection of response for the specific TrackerContribFeature
@param npoints Max number of features
@note This method modifies the response parameter
*/
void selection( Mat& response, int npoints ) CV_OVERRIDE;
/** @brief Swap the feature in position source with the feature in position target
@param source The source position
@param target The target position
*/
bool swapFeature( int source, int target );
/** @brief Swap the feature in position id with the feature input
@param id The position
@param feature The feature
*/
bool swapFeature( int id, CvHaarEvaluator::FeatureHaar& feature );
/** @brief Get the feature in position id
@param id The position
*/
CvHaarEvaluator::FeatureHaar& getFeatureAt( int id );
protected:
bool computeImpl( const std::vector<Mat>& images, Mat& response ) CV_OVERRIDE;
private:
Params params;
Ptr<CvHaarEvaluator> featureEvaluator;
};
/**
* \brief TrackerContribFeature based on LBP
*/
class CV_EXPORTS TrackerFeatureLBP : public TrackerContribFeature
{
public:
TrackerFeatureLBP();
~TrackerFeatureLBP();
void selection( Mat& response, int npoints ) CV_OVERRIDE;
protected:
bool computeImpl( const std::vector<Mat>& images, Mat& response ) CV_OVERRIDE;
};
//! @}
}}} // namespace
#endif // OPENCV_TRACKING_DETAIL_HPP

View File

@@ -0,0 +1,538 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef OPENCV_TRACKING_LEGACY_HPP
#define OPENCV_TRACKING_LEGACY_HPP
/*
* Partially based on:
* ====================================================================================================================
* - [AAM] S. Salti, A. Cavallaro, L. Di Stefano, Adaptive Appearance Modeling for Video Tracking: Survey and Evaluation
* - [AMVOT] X. Li, W. Hu, C. Shen, Z. Zhang, A. Dick, A. van den Hengel, A Survey of Appearance Models in Visual Object Tracking
*
* This Tracking API has been designed with PlantUML. If you modify this API please change UML files under modules/tracking/doc/uml
*
*/
#include "tracking_internals.hpp"
namespace cv {
namespace legacy {
#ifndef CV_DOXYGEN
inline namespace tracking {
#endif
using namespace cv::detail::tracking;
/** @addtogroup tracking_legacy
@{
*/
/************************************ Tracker Base Class ************************************/
/** @brief Base abstract class for the long-term tracker:
*/
class CV_EXPORTS_W Tracker : public virtual Algorithm
{
public:
Tracker();
virtual ~Tracker() CV_OVERRIDE;
/** @brief Initialize the tracker with a known bounding box that surrounded the target
@param image The initial frame
@param boundingBox The initial bounding box
@return True if initialization went succesfully, false otherwise
*/
CV_WRAP bool init( InputArray image, const Rect2d& boundingBox );
/** @brief Update the tracker, find the new most likely bounding box for the target
@param image The current frame
@param boundingBox The bounding box that represent the new target location, if true was returned, not
modified otherwise
@return True means that target was located and false means that tracker cannot locate target in
current frame. Note, that latter *does not* imply that tracker has failed, maybe target is indeed
missing from the frame (say, out of sight)
*/
CV_WRAP bool update( InputArray image, CV_OUT Rect2d& boundingBox );
virtual void read( const FileNode& fn ) CV_OVERRIDE = 0;
virtual void write( FileStorage& fs ) const CV_OVERRIDE = 0;
protected:
virtual bool initImpl( const Mat& image, const Rect2d& boundingBox ) = 0;
virtual bool updateImpl( const Mat& image, Rect2d& boundingBox ) = 0;
bool isInit;
Ptr<TrackerContribFeatureSet> featureSet;
Ptr<TrackerContribSampler> sampler;
Ptr<TrackerModel> model;
};
/************************************ Specific Tracker Classes ************************************/
/** @brief The MIL algorithm trains a classifier in an online manner to separate the object from the
background.
Multiple Instance Learning avoids the drift problem for a robust tracking. The implementation is
based on @cite MIL .
Original code can be found here <http://vision.ucsd.edu/~bbabenko/project_miltrack.shtml>
*/
class CV_EXPORTS_W TrackerMIL : public cv::legacy::Tracker
{
public:
struct CV_EXPORTS Params : cv::TrackerMIL::Params
{
void read( const FileNode& fn );
void write( FileStorage& fs ) const;
};
/** @brief Constructor
@param parameters MIL parameters TrackerMIL::Params
*/
static Ptr<legacy::TrackerMIL> create(const TrackerMIL::Params &parameters);
CV_WRAP static Ptr<legacy::TrackerMIL> create();
virtual ~TrackerMIL() CV_OVERRIDE {}
};
/** @brief the Boosting tracker
This is a real-time object tracking based on a novel on-line version of the AdaBoost algorithm.
The classifier uses the surrounding background as negative examples in update step to avoid the
drifting problem. The implementation is based on @cite OLB .
*/
class CV_EXPORTS_W TrackerBoosting : public cv::legacy::Tracker
{
public:
struct CV_EXPORTS Params
{
Params();
int numClassifiers; //!<the number of classifiers to use in a OnlineBoosting algorithm
float samplerOverlap; //!<search region parameters to use in a OnlineBoosting algorithm
float samplerSearchFactor; //!< search region parameters to use in a OnlineBoosting algorithm
int iterationInit; //!<the initial iterations
int featureSetNumFeatures; //!< # features
/**
* \brief Read parameters from a file
*/
void read( const FileNode& fn );
/**
* \brief Write parameters to a file
*/
void write( FileStorage& fs ) const;
};
/** @brief Constructor
@param parameters BOOSTING parameters TrackerBoosting::Params
*/
static Ptr<legacy::TrackerBoosting> create(const TrackerBoosting::Params &parameters);
CV_WRAP static Ptr<legacy::TrackerBoosting> create();
virtual ~TrackerBoosting() CV_OVERRIDE {}
};
/** @brief the Median Flow tracker
Implementation of a paper @cite MedianFlow .
The tracker is suitable for very smooth and predictable movements when object is visible throughout
the whole sequence. It's quite and accurate for this type of problems (in particular, it was shown
by authors to outperform MIL). During the implementation period the code at
<http://www.aonsquared.co.uk/node/5>, the courtesy of the author Arthur Amarra, was used for the
reference purpose.
*/
class CV_EXPORTS_W TrackerMedianFlow : public cv::legacy::Tracker
{
public:
struct CV_EXPORTS Params
{
Params(); //!<default constructor
//!<note that the default values of parameters are recommended for most of use cases
int pointsInGrid; //!<square root of number of keypoints used; increase it to trade
//!<accurateness for speed
cv::Size winSize; //!<window size parameter for Lucas-Kanade optical flow
int maxLevel; //!<maximal pyramid level number for Lucas-Kanade optical flow
TermCriteria termCriteria; //!<termination criteria for Lucas-Kanade optical flow
cv::Size winSizeNCC; //!<window size around a point for normalized cross-correlation check
double maxMedianLengthOfDisplacementDifference; //!<criterion for loosing the tracked object
void read( const FileNode& /*fn*/ );
void write( FileStorage& /*fs*/ ) const;
};
/** @brief Constructor
@param parameters Median Flow parameters TrackerMedianFlow::Params
*/
static Ptr<legacy::TrackerMedianFlow> create(const TrackerMedianFlow::Params &parameters);
CV_WRAP static Ptr<legacy::TrackerMedianFlow> create();
virtual ~TrackerMedianFlow() CV_OVERRIDE {}
};
/** @brief the TLD (Tracking, learning and detection) tracker
TLD is a novel tracking framework that explicitly decomposes the long-term tracking task into
tracking, learning and detection.
The tracker follows the object from frame to frame. The detector localizes all appearances that
have been observed so far and corrects the tracker if necessary. The learning estimates detector's
errors and updates it to avoid these errors in the future. The implementation is based on @cite TLD .
The Median Flow algorithm (see cv::TrackerMedianFlow) was chosen as a tracking component in this
implementation, following authors. The tracker is supposed to be able to handle rapid motions, partial
occlusions, object absence etc.
*/
class CV_EXPORTS_W TrackerTLD : public cv::legacy::Tracker
{
public:
struct CV_EXPORTS Params
{
Params();
void read( const FileNode& /*fn*/ );
void write( FileStorage& /*fs*/ ) const;
};
/** @brief Constructor
@param parameters TLD parameters TrackerTLD::Params
*/
static Ptr<legacy::TrackerTLD> create(const TrackerTLD::Params &parameters);
CV_WRAP static Ptr<legacy::TrackerTLD> create();
virtual ~TrackerTLD() CV_OVERRIDE {}
};
/** @brief the KCF (Kernelized Correlation Filter) tracker
* KCF is a novel tracking framework that utilizes properties of circulant matrix to enhance the processing speed.
* This tracking method is an implementation of @cite KCF_ECCV which is extended to KCF with color-names features (@cite KCF_CN).
* The original paper of KCF is available at <http://www.robots.ox.ac.uk/~joao/publications/henriques_tpami2015.pdf>
* as well as the matlab implementation. For more information about KCF with color-names features, please refer to
* <http://www.cvl.isy.liu.se/research/objrec/visualtracking/colvistrack/index.html>.
*/
class CV_EXPORTS_W TrackerKCF : public cv::legacy::Tracker
{
public:
/**
* \brief Feature type to be used in the tracking grayscale, colornames, compressed color-names
* The modes available now:
- "GRAY" -- Use grayscale values as the feature
- "CN" -- Color-names feature
*/
typedef enum cv::tracking::TrackerKCF::MODE MODE;
struct CV_EXPORTS Params : cv::tracking::TrackerKCF::Params
{
void read(const FileNode& /*fn*/);
void write(FileStorage& /*fs*/) const;
};
virtual void setFeatureExtractor(void(*)(const Mat, const Rect, Mat&), bool pca_func = false) = 0;
/** @brief Constructor
@param parameters KCF parameters TrackerKCF::Params
*/
static Ptr<legacy::TrackerKCF> create(const TrackerKCF::Params &parameters);
CV_WRAP static Ptr<legacy::TrackerKCF> create();
virtual ~TrackerKCF() CV_OVERRIDE {}
};
#if 0 // legacy variant is not available
/** @brief the GOTURN (Generic Object Tracking Using Regression Networks) tracker
* GOTURN (@cite GOTURN) is kind of trackers based on Convolutional Neural Networks (CNN). While taking all advantages of CNN trackers,
* GOTURN is much faster due to offline training without online fine-tuning nature.
* GOTURN tracker addresses the problem of single target tracking: given a bounding box label of an object in the first frame of the video,
* we track that object through the rest of the video. NOTE: Current method of GOTURN does not handle occlusions; however, it is fairly
* robust to viewpoint changes, lighting changes, and deformations.
* Inputs of GOTURN are two RGB patches representing Target and Search patches resized to 227x227.
* Outputs of GOTURN are predicted bounding box coordinates, relative to Search patch coordinate system, in format X1,Y1,X2,Y2.
* Original paper is here: <http://davheld.github.io/GOTURN/GOTURN.pdf>
* As long as original authors implementation: <https://github.com/davheld/GOTURN#train-the-tracker>
* Implementation of training algorithm is placed in separately here due to 3d-party dependencies:
* <https://github.com/Auron-X/GOTURN_Training_Toolkit>
* GOTURN architecture goturn.prototxt and trained model goturn.caffemodel are accessible on opencv_extra GitHub repository.
*/
class CV_EXPORTS_W TrackerGOTURN : public cv::legacy::Tracker
{
public:
struct CV_EXPORTS Params
{
Params();
void read(const FileNode& /*fn*/);
void write(FileStorage& /*fs*/) const;
String modelTxt;
String modelBin;
};
/** @brief Constructor
@param parameters GOTURN parameters TrackerGOTURN::Params
*/
static Ptr<legacy::TrackerGOTURN> create(const TrackerGOTURN::Params &parameters);
CV_WRAP static Ptr<legacy::TrackerGOTURN> create();
virtual ~TrackerGOTURN() CV_OVERRIDE {}
};
#endif
/** @brief the MOSSE (Minimum Output Sum of Squared %Error) tracker
The implementation is based on @cite MOSSE Visual Object Tracking using Adaptive Correlation Filters
@note this tracker works with grayscale images, if passed bgr ones, they will get converted internally.
*/
class CV_EXPORTS_W TrackerMOSSE : public cv::legacy::Tracker
{
public:
/** @brief Constructor
*/
CV_WRAP static Ptr<legacy::TrackerMOSSE> create();
virtual ~TrackerMOSSE() CV_OVERRIDE {}
};
/************************************ MultiTracker Class ---By Laksono Kurnianggoro---) ************************************/
/** @brief This class is used to track multiple objects using the specified tracker algorithm.
* The %MultiTracker is naive implementation of multiple object tracking.
* It process the tracked objects independently without any optimization accross the tracked objects.
*/
class CV_EXPORTS_W MultiTracker : public Algorithm
{
public:
/**
* \brief Constructor.
*/
CV_WRAP MultiTracker();
/**
* \brief Destructor
*/
~MultiTracker() CV_OVERRIDE;
/**
* \brief Add a new object to be tracked.
*
* @param newTracker tracking algorithm to be used
* @param image input image
* @param boundingBox a rectangle represents ROI of the tracked object
*/
CV_WRAP bool add(Ptr<cv::legacy::Tracker> newTracker, InputArray image, const Rect2d& boundingBox);
/**
* \brief Add a set of objects to be tracked.
* @param newTrackers list of tracking algorithms to be used
* @param image input image
* @param boundingBox list of the tracked objects
*/
bool add(std::vector<Ptr<legacy::Tracker> > newTrackers, InputArray image, std::vector<Rect2d> boundingBox);
/**
* \brief Update the current tracking status.
* The result will be saved in the internal storage.
* @param image input image
*/
bool update(InputArray image);
/**
* \brief Update the current tracking status.
* @param image input image
* @param boundingBox the tracking result, represent a list of ROIs of the tracked objects.
*/
CV_WRAP bool update(InputArray image, CV_OUT std::vector<Rect2d> & boundingBox);
/**
* \brief Returns a reference to a storage for the tracked objects, each object corresponds to one tracker algorithm
*/
CV_WRAP const std::vector<Rect2d>& getObjects() const;
/**
* \brief Returns a pointer to a new instance of MultiTracker
*/
CV_WRAP static Ptr<MultiTracker> create();
protected:
//!< storage for the tracker algorithms.
std::vector< Ptr<Tracker> > trackerList;
//!< storage for the tracked objects, each object corresponds to one tracker algorithm.
std::vector<Rect2d> objects;
};
/************************************ Multi-Tracker Classes ---By Tyan Vladimir---************************************/
/** @brief Base abstract class for the long-term Multi Object Trackers:
@sa Tracker, MultiTrackerTLD
*/
class CV_EXPORTS MultiTracker_Alt
{
public:
/** @brief Constructor for Multitracker
*/
MultiTracker_Alt()
{
targetNum = 0;
}
/** @brief Add a new target to a tracking-list and initialize the tracker with a known bounding box that surrounded the target
@param image The initial frame
@param boundingBox The initial bounding box of target
@param tracker_algorithm Multi-tracker algorithm
@return True if new target initialization went succesfully, false otherwise
*/
bool addTarget(InputArray image, const Rect2d& boundingBox, Ptr<legacy::Tracker> tracker_algorithm);
/** @brief Update all trackers from the tracking-list, find a new most likely bounding boxes for the targets
@param image The current frame
@return True means that all targets were located and false means that tracker couldn't locate one of the targets in
current frame. Note, that latter *does not* imply that tracker has failed, maybe target is indeed
missing from the frame (say, out of sight)
*/
bool update(InputArray image);
/** @brief Current number of targets in tracking-list
*/
int targetNum;
/** @brief Trackers list for Multi-Object-Tracker
*/
std::vector <Ptr<Tracker> > trackers;
/** @brief Bounding Boxes list for Multi-Object-Tracker
*/
std::vector <Rect2d> boundingBoxes;
/** @brief List of randomly generated colors for bounding boxes display
*/
std::vector<Scalar> colors;
};
/** @brief Multi Object %Tracker for TLD.
TLD is a novel tracking framework that explicitly decomposes
the long-term tracking task into tracking, learning and detection.
The tracker follows the object from frame to frame. The detector localizes all appearances that
have been observed so far and corrects the tracker if necessary. The learning estimates detector's
errors and updates it to avoid these errors in the future. The implementation is based on @cite TLD .
The Median Flow algorithm (see cv::TrackerMedianFlow) was chosen as a tracking component in this
implementation, following authors. The tracker is supposed to be able to handle rapid motions, partial
occlusions, object absence etc.
@sa Tracker, MultiTracker, TrackerTLD
*/
class CV_EXPORTS MultiTrackerTLD : public MultiTracker_Alt
{
public:
/** @brief Update all trackers from the tracking-list, find a new most likely bounding boxes for the targets by
optimized update method using some techniques to speedup calculations specifically for MO TLD. The only limitation
is that all target bounding boxes should have approximately same aspect ratios. Speed boost is around 20%
@param image The current frame.
@return True means that all targets were located and false means that tracker couldn't locate one of the targets in
current frame. Note, that latter *does not* imply that tracker has failed, maybe target is indeed
missing from the frame (say, out of sight)
*/
bool update_opt(InputArray image);
};
/*********************************** CSRT ************************************/
/** @brief the CSRT tracker
The implementation is based on @cite Lukezic_IJCV2018 Discriminative Correlation Filter with Channel and Spatial Reliability
*/
class CV_EXPORTS_W TrackerCSRT : public cv::legacy::Tracker
{
public:
struct CV_EXPORTS Params : cv::tracking::TrackerCSRT::Params
{
/**
* \brief Read parameters from a file
*/
void read(const FileNode& /*fn*/);
/**
* \brief Write parameters to a file
*/
void write(cv::FileStorage& fs) const;
};
/** @brief Constructor
@param parameters CSRT parameters TrackerCSRT::Params
*/
static Ptr<legacy::TrackerCSRT> create(const TrackerCSRT::Params &parameters);
CV_WRAP static Ptr<legacy::TrackerCSRT> create();
CV_WRAP virtual void setInitialMask(InputArray mask) = 0;
virtual ~TrackerCSRT() CV_OVERRIDE {}
};
CV_EXPORTS_W Ptr<cv::Tracker> upgradeTrackingAPI(const Ptr<legacy::Tracker>& legacy_tracker);
//! @}
#ifndef CV_DOXYGEN
} // namespace
#endif
}} // namespace
#endif // OPENCV_TRACKING_LEGACY_HPP