添加项目文件。
This commit is contained in:
70
3rdparty/opencv/inc/opencv2/gapi/infer/bindings_ie.hpp
vendored
Normal file
70
3rdparty/opencv/inc/opencv2/gapi/infer/bindings_ie.hpp
vendored
Normal file
@@ -0,0 +1,70 @@
|
||||
// This file is part of OpenCV project.
|
||||
// It is subject to the license terms in the LICENSE file found in the top-level directory
|
||||
// of this distribution and at http://opencv.org/license.html.
|
||||
//
|
||||
// Copyright (C) 2020 Intel Corporation
|
||||
|
||||
#ifndef OPENCV_GAPI_INFER_BINDINGS_IE_HPP
|
||||
#define OPENCV_GAPI_INFER_BINDINGS_IE_HPP
|
||||
|
||||
#include <opencv2/gapi/util/any.hpp>
|
||||
#include "opencv2/gapi/own/exports.hpp" // GAPI_EXPORTS
|
||||
#include <opencv2/gapi/gkernel.hpp> // GKernelPackage
|
||||
#include <opencv2/gapi/infer/ie.hpp> // Params
|
||||
|
||||
#include <string>
|
||||
|
||||
namespace cv {
|
||||
namespace gapi {
|
||||
namespace ie {
|
||||
|
||||
// NB: Used by python wrapper
|
||||
// This class can be marked as SIMPLE, because it's implemented as pimpl
|
||||
class GAPI_EXPORTS_W_SIMPLE PyParams {
|
||||
public:
|
||||
GAPI_WRAP
|
||||
PyParams() = default;
|
||||
|
||||
GAPI_WRAP
|
||||
PyParams(const std::string &tag,
|
||||
const std::string &model,
|
||||
const std::string &weights,
|
||||
const std::string &device);
|
||||
|
||||
GAPI_WRAP
|
||||
PyParams(const std::string &tag,
|
||||
const std::string &model,
|
||||
const std::string &device);
|
||||
|
||||
GAPI_WRAP
|
||||
PyParams& constInput(const std::string &layer_name,
|
||||
const cv::Mat &data,
|
||||
TraitAs hint = TraitAs::TENSOR);
|
||||
|
||||
GAPI_WRAP
|
||||
PyParams& cfgNumRequests(size_t nireq);
|
||||
|
||||
GAPI_WRAP
|
||||
PyParams& cfgBatchSize(const size_t size);
|
||||
|
||||
GBackend backend() const;
|
||||
std::string tag() const;
|
||||
cv::util::any params() const;
|
||||
|
||||
private:
|
||||
std::shared_ptr<Params<cv::gapi::Generic>> m_priv;
|
||||
};
|
||||
|
||||
GAPI_EXPORTS_W PyParams params(const std::string &tag,
|
||||
const std::string &model,
|
||||
const std::string &weights,
|
||||
const std::string &device);
|
||||
|
||||
GAPI_EXPORTS_W PyParams params(const std::string &tag,
|
||||
const std::string &model,
|
||||
const std::string &device);
|
||||
} // namespace ie
|
||||
} // namespace gapi
|
||||
} // namespace cv
|
||||
|
||||
#endif // OPENCV_GAPI_INFER_BINDINGS_IE_HPP
|
||||
479
3rdparty/opencv/inc/opencv2/gapi/infer/ie.hpp
vendored
Normal file
479
3rdparty/opencv/inc/opencv2/gapi/infer/ie.hpp
vendored
Normal file
@@ -0,0 +1,479 @@
|
||||
// This file is part of OpenCV project.
|
||||
// It is subject to the license terms in the LICENSE file found in the top-level directory
|
||||
// of this distribution and at http://opencv.org/license.html.
|
||||
//
|
||||
// Copyright (C) 2019-2021 Intel Corporation
|
||||
|
||||
#ifndef OPENCV_GAPI_INFER_IE_HPP
|
||||
#define OPENCV_GAPI_INFER_IE_HPP
|
||||
|
||||
#include <unordered_map>
|
||||
#include <unordered_set>
|
||||
#include <string>
|
||||
#include <array>
|
||||
#include <tuple> // tuple, tuple_size
|
||||
#include <map>
|
||||
|
||||
#include <opencv2/gapi/opencv_includes.hpp>
|
||||
#include <opencv2/gapi/util/any.hpp>
|
||||
|
||||
#include <opencv2/core/cvdef.h> // GAPI_EXPORTS
|
||||
#include <opencv2/gapi/gkernel.hpp> // GKernelPackage
|
||||
#include <opencv2/gapi/infer.hpp> // Generic
|
||||
|
||||
namespace cv {
|
||||
namespace gapi {
|
||||
// FIXME: introduce a new sub-namespace for NN?
|
||||
|
||||
/**
|
||||
* @brief This namespace contains G-API OpenVINO backend functions,
|
||||
* structures, and symbols.
|
||||
*/
|
||||
namespace ie {
|
||||
|
||||
GAPI_EXPORTS cv::gapi::GBackend backend();
|
||||
|
||||
/**
|
||||
* Specifies how G-API and IE should trait input data
|
||||
*
|
||||
* In OpenCV, the same cv::Mat is used to represent both
|
||||
* image and tensor data. Sometimes those are hardly distinguishable,
|
||||
* so this extra parameter is used to give G-API a hint.
|
||||
*
|
||||
* This hint controls how G-API reinterprets the data when converting
|
||||
* it to IE Blob format (and which layout/etc is assigned to this data).
|
||||
*/
|
||||
enum class TraitAs: int
|
||||
{
|
||||
TENSOR, //!< G-API traits an associated cv::Mat as a raw tensor and passes dimensions as-is
|
||||
IMAGE //!< G-API traits an associated cv::Mat as an image so creates an "image" blob (NCHW/NHWC, etc)
|
||||
};
|
||||
|
||||
using IEConfig = std::map<std::string, std::string>;
|
||||
|
||||
namespace detail {
|
||||
struct ParamDesc {
|
||||
std::string model_path;
|
||||
std::string weights_path;
|
||||
std::string device_id;
|
||||
|
||||
std::vector<std::string> input_names;
|
||||
std::vector<std::string> output_names;
|
||||
|
||||
using ConstInput = std::pair<cv::Mat, TraitAs>;
|
||||
std::unordered_map<std::string, ConstInput> const_inputs;
|
||||
|
||||
std::size_t num_in;
|
||||
std::size_t num_out;
|
||||
|
||||
enum class Kind {Load, Import};
|
||||
Kind kind;
|
||||
bool is_generic;
|
||||
IEConfig config;
|
||||
|
||||
std::map<std::string, std::vector<std::size_t>> reshape_table;
|
||||
std::unordered_set<std::string> layer_names_to_reshape;
|
||||
|
||||
// NB: Number of asyncrhonious infer requests
|
||||
size_t nireq;
|
||||
|
||||
// NB: An optional config to setup RemoteContext for IE
|
||||
cv::util::any context_config;
|
||||
|
||||
// NB: batch_size can't be equal to 1 by default, because some of models
|
||||
// have 2D (Layout::NC) input and if the first dimension not equal to 1
|
||||
// net.setBatchSize(1) will overwrite it.
|
||||
cv::optional<size_t> batch_size;
|
||||
};
|
||||
} // namespace detail
|
||||
|
||||
// FIXME: this is probably a shared (reusable) thing
|
||||
template<typename Net>
|
||||
struct PortCfg {
|
||||
using In = std::array
|
||||
< std::string
|
||||
, std::tuple_size<typename Net::InArgs>::value >;
|
||||
using Out = std::array
|
||||
< std::string
|
||||
, std::tuple_size<typename Net::OutArgs>::value >;
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief This structure provides functions
|
||||
* that fill inference parameters for "OpenVINO Toolkit" model.
|
||||
*/
|
||||
template<typename Net> class Params {
|
||||
public:
|
||||
/** @brief Class constructor.
|
||||
|
||||
Constructs Params based on model information and specifies default values for other
|
||||
inference description parameters. Model is loaded and compiled using "OpenVINO Toolkit".
|
||||
|
||||
@param model Path to topology IR (.xml file).
|
||||
@param weights Path to weights (.bin file).
|
||||
@param device target device to use.
|
||||
*/
|
||||
Params(const std::string &model,
|
||||
const std::string &weights,
|
||||
const std::string &device)
|
||||
: desc{ model, weights, device, {}, {}, {}
|
||||
, std::tuple_size<typename Net::InArgs>::value // num_in
|
||||
, std::tuple_size<typename Net::OutArgs>::value // num_out
|
||||
, detail::ParamDesc::Kind::Load
|
||||
, false
|
||||
, {}
|
||||
, {}
|
||||
, {}
|
||||
, 1u
|
||||
, {}
|
||||
, {}} {
|
||||
};
|
||||
|
||||
/** @overload
|
||||
Use this constructor to work with pre-compiled network.
|
||||
Model is imported from a pre-compiled blob.
|
||||
|
||||
@param model Path to model.
|
||||
@param device target device to use.
|
||||
*/
|
||||
Params(const std::string &model,
|
||||
const std::string &device)
|
||||
: desc{ model, {}, device, {}, {}, {}
|
||||
, std::tuple_size<typename Net::InArgs>::value // num_in
|
||||
, std::tuple_size<typename Net::OutArgs>::value // num_out
|
||||
, detail::ParamDesc::Kind::Import
|
||||
, false
|
||||
, {}
|
||||
, {}
|
||||
, {}
|
||||
, 1u
|
||||
, {}
|
||||
, {}} {
|
||||
};
|
||||
|
||||
/** @brief Specifies sequence of network input layers names for inference.
|
||||
|
||||
The function is used to associate cv::gapi::infer<> inputs with the model inputs.
|
||||
Number of names has to match the number of network inputs as defined in G_API_NET().
|
||||
In case a network has only single input layer, there is no need to specify name manually.
|
||||
|
||||
@param layer_names std::array<std::string, N> where N is the number of inputs
|
||||
as defined in the @ref G_API_NET. Contains names of input layers.
|
||||
@return reference to this parameter structure.
|
||||
*/
|
||||
Params<Net>& cfgInputLayers(const typename PortCfg<Net>::In &layer_names) {
|
||||
desc.input_names.clear();
|
||||
desc.input_names.reserve(layer_names.size());
|
||||
std::copy(layer_names.begin(), layer_names.end(),
|
||||
std::back_inserter(desc.input_names));
|
||||
return *this;
|
||||
}
|
||||
|
||||
/** @brief Specifies sequence of network output layers names for inference.
|
||||
|
||||
The function is used to associate cv::gapi::infer<> outputs with the model outputs.
|
||||
Number of names has to match the number of network outputs as defined in G_API_NET().
|
||||
In case a network has only single output layer, there is no need to specify name manually.
|
||||
|
||||
@param layer_names std::array<std::string, N> where N is the number of outputs
|
||||
as defined in the @ref G_API_NET. Contains names of output layers.
|
||||
@return reference to this parameter structure.
|
||||
*/
|
||||
Params<Net>& cfgOutputLayers(const typename PortCfg<Net>::Out &layer_names) {
|
||||
desc.output_names.clear();
|
||||
desc.output_names.reserve(layer_names.size());
|
||||
std::copy(layer_names.begin(), layer_names.end(),
|
||||
std::back_inserter(desc.output_names));
|
||||
return *this;
|
||||
}
|
||||
|
||||
/** @brief Specifies a constant input.
|
||||
|
||||
The function is used to set a constant input. This input has to be
|
||||
a preprocessed tensor if its type is TENSOR. Need to provide name of the
|
||||
network layer which will receive provided data.
|
||||
|
||||
@param layer_name Name of network layer.
|
||||
@param data cv::Mat that contains data which will be associated with network layer.
|
||||
@param hint Input type @sa cv::gapi::ie::TraitAs.
|
||||
@return reference to this parameter structure.
|
||||
*/
|
||||
Params<Net>& constInput(const std::string &layer_name,
|
||||
const cv::Mat &data,
|
||||
TraitAs hint = TraitAs::TENSOR) {
|
||||
desc.const_inputs[layer_name] = {data, hint};
|
||||
return *this;
|
||||
}
|
||||
|
||||
/** @brief Specifies OpenVINO plugin configuration.
|
||||
|
||||
The function is used to set configuration for OpenVINO plugin. Some parameters
|
||||
can be different for each plugin. Please follow https://docs.openvinotoolkit.org/latest/index.html
|
||||
to check information about specific plugin.
|
||||
|
||||
@param cfg Map of pairs: (config parameter name, config parameter value).
|
||||
@return reference to this parameter structure.
|
||||
*/
|
||||
Params& pluginConfig(const IEConfig& cfg) {
|
||||
desc.config = cfg;
|
||||
return *this;
|
||||
}
|
||||
|
||||
/** @overload
|
||||
Function with a rvalue parameter.
|
||||
|
||||
@param cfg rvalue map of pairs: (config parameter name, config parameter value).
|
||||
@return reference to this parameter structure.
|
||||
*/
|
||||
Params& pluginConfig(IEConfig&& cfg) {
|
||||
desc.config = std::move(cfg);
|
||||
return *this;
|
||||
}
|
||||
|
||||
/** @brief Specifies configuration for RemoteContext in InferenceEngine.
|
||||
|
||||
When RemoteContext is configured the backend imports the networks using the context.
|
||||
It also expects cv::MediaFrames to be actually remote, to operate with blobs via the context.
|
||||
|
||||
@param ctx_cfg cv::util::any value which holds InferenceEngine::ParamMap.
|
||||
@return reference to this parameter structure.
|
||||
*/
|
||||
Params& cfgContextParams(const cv::util::any& ctx_cfg) {
|
||||
desc.context_config = ctx_cfg;
|
||||
return *this;
|
||||
}
|
||||
|
||||
/** @overload
|
||||
Function with an rvalue parameter.
|
||||
|
||||
@param ctx_cfg cv::util::any value which holds InferenceEngine::ParamMap.
|
||||
@return reference to this parameter structure.
|
||||
*/
|
||||
Params& cfgContextParams(cv::util::any&& ctx_cfg) {
|
||||
desc.context_config = std::move(ctx_cfg);
|
||||
return *this;
|
||||
}
|
||||
|
||||
/** @brief Specifies number of asynchronous inference requests.
|
||||
|
||||
@param nireq Number of inference asynchronous requests.
|
||||
@return reference to this parameter structure.
|
||||
*/
|
||||
Params& cfgNumRequests(size_t nireq) {
|
||||
GAPI_Assert(nireq > 0 && "Number of infer requests must be greater than zero!");
|
||||
desc.nireq = nireq;
|
||||
return *this;
|
||||
}
|
||||
|
||||
/** @brief Specifies new input shapes for the network inputs.
|
||||
|
||||
The function is used to specify new input shapes for the network inputs.
|
||||
Follow https://docs.openvinotoolkit.org/latest/classInferenceEngine_1_1networkNetwork.html
|
||||
for additional information.
|
||||
|
||||
@param reshape_table Map of pairs: name of corresponding data and its dimension.
|
||||
@return reference to this parameter structure.
|
||||
*/
|
||||
Params<Net>& cfgInputReshape(const std::map<std::string, std::vector<std::size_t>>& reshape_table) {
|
||||
desc.reshape_table = reshape_table;
|
||||
return *this;
|
||||
}
|
||||
|
||||
/** @overload */
|
||||
Params<Net>& cfgInputReshape(std::map<std::string, std::vector<std::size_t>>&& reshape_table) {
|
||||
desc.reshape_table = std::move(reshape_table);
|
||||
return *this;
|
||||
}
|
||||
|
||||
/** @overload
|
||||
|
||||
@param layer_name Name of layer.
|
||||
@param layer_dims New dimensions for this layer.
|
||||
@return reference to this parameter structure.
|
||||
*/
|
||||
Params<Net>& cfgInputReshape(const std::string& layer_name, const std::vector<size_t>& layer_dims) {
|
||||
desc.reshape_table.emplace(layer_name, layer_dims);
|
||||
return *this;
|
||||
}
|
||||
|
||||
/** @overload */
|
||||
Params<Net>& cfgInputReshape(std::string&& layer_name, std::vector<size_t>&& layer_dims) {
|
||||
desc.reshape_table.emplace(layer_name, layer_dims);
|
||||
return *this;
|
||||
}
|
||||
|
||||
/** @overload
|
||||
|
||||
@param layer_names set of names of network layers that will be used for network reshape.
|
||||
@return reference to this parameter structure.
|
||||
*/
|
||||
Params<Net>& cfgInputReshape(const std::unordered_set<std::string>& layer_names) {
|
||||
desc.layer_names_to_reshape = layer_names;
|
||||
return *this;
|
||||
}
|
||||
|
||||
/** @overload
|
||||
|
||||
@param layer_names rvalue set of the selected layers will be reshaped automatically
|
||||
its input image size.
|
||||
@return reference to this parameter structure.
|
||||
*/
|
||||
Params<Net>& cfgInputReshape(std::unordered_set<std::string>&& layer_names) {
|
||||
desc.layer_names_to_reshape = std::move(layer_names);
|
||||
return *this;
|
||||
}
|
||||
|
||||
/** @brief Specifies the inference batch size.
|
||||
|
||||
The function is used to specify inference batch size.
|
||||
Follow https://docs.openvinotoolkit.org/latest/classInferenceEngine_1_1CNNNetwork.html#a8e9d19270a48aab50cb5b1c43eecb8e9 for additional information
|
||||
|
||||
@param size batch size which will be used.
|
||||
@return reference to this parameter structure.
|
||||
*/
|
||||
Params<Net>& cfgBatchSize(const size_t size) {
|
||||
desc.batch_size = cv::util::make_optional(size);
|
||||
return *this;
|
||||
}
|
||||
|
||||
// BEGIN(G-API's network parametrization API)
|
||||
GBackend backend() const { return cv::gapi::ie::backend(); }
|
||||
std::string tag() const { return Net::tag(); }
|
||||
cv::util::any params() const { return { desc }; }
|
||||
// END(G-API's network parametrization API)
|
||||
|
||||
protected:
|
||||
detail::ParamDesc desc;
|
||||
};
|
||||
|
||||
/*
|
||||
* @brief This structure provides functions for generic network type that
|
||||
* fill inference parameters.
|
||||
* @see struct Generic
|
||||
*/
|
||||
template<>
|
||||
class Params<cv::gapi::Generic> {
|
||||
public:
|
||||
/** @brief Class constructor.
|
||||
|
||||
Constructs Params based on model information and sets default values for other
|
||||
inference description parameters. Model is loaded and compiled using OpenVINO Toolkit.
|
||||
|
||||
@param tag string tag of the network for which these parameters are intended.
|
||||
@param model path to topology IR (.xml file).
|
||||
@param weights path to weights (.bin file).
|
||||
@param device target device to use.
|
||||
*/
|
||||
Params(const std::string &tag,
|
||||
const std::string &model,
|
||||
const std::string &weights,
|
||||
const std::string &device)
|
||||
: desc{ model, weights, device, {}, {}, {}, 0u, 0u,
|
||||
detail::ParamDesc::Kind::Load, true, {}, {}, {}, 1u,
|
||||
{}, {}},
|
||||
m_tag(tag) {
|
||||
};
|
||||
|
||||
/** @overload
|
||||
|
||||
This constructor for pre-compiled networks. Model is imported from pre-compiled
|
||||
blob.
|
||||
|
||||
@param tag string tag of the network for which these parameters are intended.
|
||||
@param model path to model.
|
||||
@param device target device to use.
|
||||
*/
|
||||
Params(const std::string &tag,
|
||||
const std::string &model,
|
||||
const std::string &device)
|
||||
: desc{ model, {}, device, {}, {}, {}, 0u, 0u,
|
||||
detail::ParamDesc::Kind::Import, true, {}, {}, {}, 1u,
|
||||
{}, {}},
|
||||
m_tag(tag) {
|
||||
};
|
||||
|
||||
/** @see ie::Params::pluginConfig. */
|
||||
Params& pluginConfig(const IEConfig& cfg) {
|
||||
desc.config = cfg;
|
||||
return *this;
|
||||
}
|
||||
|
||||
/** @overload */
|
||||
Params& pluginConfig(IEConfig&& cfg) {
|
||||
desc.config = std::move(cfg);
|
||||
return *this;
|
||||
}
|
||||
|
||||
/** @see ie::Params::constInput. */
|
||||
Params& constInput(const std::string &layer_name,
|
||||
const cv::Mat &data,
|
||||
TraitAs hint = TraitAs::TENSOR) {
|
||||
desc.const_inputs[layer_name] = {data, hint};
|
||||
return *this;
|
||||
}
|
||||
|
||||
/** @see ie::Params::cfgNumRequests. */
|
||||
Params& cfgNumRequests(size_t nireq) {
|
||||
GAPI_Assert(nireq > 0 && "Number of infer requests must be greater than zero!");
|
||||
desc.nireq = nireq;
|
||||
return *this;
|
||||
}
|
||||
|
||||
/** @see ie::Params::cfgInputReshape */
|
||||
Params& cfgInputReshape(const std::map<std::string, std::vector<std::size_t>>&reshape_table) {
|
||||
desc.reshape_table = reshape_table;
|
||||
return *this;
|
||||
}
|
||||
|
||||
/** @overload */
|
||||
Params& cfgInputReshape(std::map<std::string, std::vector<std::size_t>> && reshape_table) {
|
||||
desc.reshape_table = std::move(reshape_table);
|
||||
return *this;
|
||||
}
|
||||
|
||||
/** @overload */
|
||||
Params& cfgInputReshape(std::string && layer_name, std::vector<size_t> && layer_dims) {
|
||||
desc.reshape_table.emplace(layer_name, layer_dims);
|
||||
return *this;
|
||||
}
|
||||
|
||||
/** @overload */
|
||||
Params& cfgInputReshape(const std::string & layer_name, const std::vector<size_t>&layer_dims) {
|
||||
desc.reshape_table.emplace(layer_name, layer_dims);
|
||||
return *this;
|
||||
}
|
||||
|
||||
/** @overload */
|
||||
Params& cfgInputReshape(std::unordered_set<std::string> && layer_names) {
|
||||
desc.layer_names_to_reshape = std::move(layer_names);
|
||||
return *this;
|
||||
}
|
||||
|
||||
/** @overload */
|
||||
Params& cfgInputReshape(const std::unordered_set<std::string>&layer_names) {
|
||||
desc.layer_names_to_reshape = layer_names;
|
||||
return *this;
|
||||
}
|
||||
|
||||
/** @see ie::Params::cfgBatchSize */
|
||||
Params& cfgBatchSize(const size_t size) {
|
||||
desc.batch_size = cv::util::make_optional(size);
|
||||
return *this;
|
||||
}
|
||||
|
||||
// BEGIN(G-API's network parametrization API)
|
||||
GBackend backend() const { return cv::gapi::ie::backend(); }
|
||||
std::string tag() const { return m_tag; }
|
||||
cv::util::any params() const { return { desc }; }
|
||||
// END(G-API's network parametrization API)
|
||||
|
||||
protected:
|
||||
detail::ParamDesc desc;
|
||||
std::string m_tag;
|
||||
};
|
||||
|
||||
} // namespace ie
|
||||
} // namespace gapi
|
||||
} // namespace cv
|
||||
|
||||
#endif // OPENCV_GAPI_INFER_IE_HPP
|
||||
284
3rdparty/opencv/inc/opencv2/gapi/infer/onnx.hpp
vendored
Normal file
284
3rdparty/opencv/inc/opencv2/gapi/infer/onnx.hpp
vendored
Normal file
@@ -0,0 +1,284 @@
|
||||
// This file is part of OpenCV project.
|
||||
// It is subject to the license terms in the LICENSE file found in the top-level directory
|
||||
// of this distribution and at http://opencv.org/license.html.
|
||||
//
|
||||
// Copyright (C) 2020-2021 Intel Corporation
|
||||
|
||||
#ifndef OPENCV_GAPI_INFER_ONNX_HPP
|
||||
#define OPENCV_GAPI_INFER_ONNX_HPP
|
||||
|
||||
#include <unordered_map>
|
||||
#include <string>
|
||||
#include <array>
|
||||
#include <tuple> // tuple, tuple_size
|
||||
|
||||
#include <opencv2/gapi/opencv_includes.hpp>
|
||||
#include <opencv2/gapi/util/any.hpp>
|
||||
|
||||
#include <opencv2/core/cvdef.h> // GAPI_EXPORTS
|
||||
#include <opencv2/gapi/gkernel.hpp> // GKernelPackage
|
||||
|
||||
namespace cv {
|
||||
namespace gapi {
|
||||
|
||||
/**
|
||||
* @brief This namespace contains G-API ONNX Runtime backend functions, structures, and symbols.
|
||||
*/
|
||||
namespace onnx {
|
||||
|
||||
GAPI_EXPORTS cv::gapi::GBackend backend();
|
||||
|
||||
enum class TraitAs: int {
|
||||
TENSOR, //!< G-API traits an associated cv::Mat as a raw tensor
|
||||
// and passes dimensions as-is
|
||||
IMAGE //!< G-API traits an associated cv::Mat as an image so
|
||||
// creates an "image" blob (NCHW/NHWC, etc)
|
||||
};
|
||||
|
||||
using PostProc = std::function<void(const std::unordered_map<std::string, cv::Mat> &,
|
||||
std::unordered_map<std::string, cv::Mat> &)>;
|
||||
|
||||
namespace detail {
|
||||
/**
|
||||
* @brief This structure contains description of inference parameters
|
||||
* which is specific to ONNX models.
|
||||
*/
|
||||
struct ParamDesc {
|
||||
std::string model_path; //!< Path to model.
|
||||
|
||||
// NB: nun_* may differ from topology's real input/output port numbers
|
||||
// (e.g. topology's partial execution)
|
||||
std::size_t num_in; //!< How many inputs are defined in the operation
|
||||
std::size_t num_out; //!< How many outputs are defined in the operation
|
||||
|
||||
// NB: Here order follows the `Net` API
|
||||
std::vector<std::string> input_names; //!< Names of input network layers.
|
||||
std::vector<std::string> output_names; //!< Names of output network layers.
|
||||
|
||||
using ConstInput = std::pair<cv::Mat, TraitAs>;
|
||||
std::unordered_map<std::string, ConstInput> const_inputs; //!< Map with pair of name of network layer and ConstInput which will be associated with this.
|
||||
|
||||
std::vector<cv::Scalar> mean; //!< Mean values for preprocessing.
|
||||
std::vector<cv::Scalar> stdev; //!< Standard deviation values for preprocessing.
|
||||
|
||||
std::vector<cv::GMatDesc> out_metas; //!< Out meta information about your output (type, dimension).
|
||||
PostProc custom_post_proc; //!< Post processing function.
|
||||
|
||||
std::vector<bool> normalize; //!< Vector of bool values that enabled or disabled normalize of input data.
|
||||
|
||||
std::vector<std::string> names_to_remap; //!< Names of output layers that will be processed in PostProc function.
|
||||
};
|
||||
} // namespace detail
|
||||
|
||||
template<typename Net>
|
||||
struct PortCfg {
|
||||
using In = std::array
|
||||
< std::string
|
||||
, std::tuple_size<typename Net::InArgs>::value >;
|
||||
using Out = std::array
|
||||
< std::string
|
||||
, std::tuple_size<typename Net::OutArgs>::value >;
|
||||
using NormCoefs = std::array
|
||||
< cv::Scalar
|
||||
, std::tuple_size<typename Net::InArgs>::value >;
|
||||
using Normalize = std::array
|
||||
< bool
|
||||
, std::tuple_size<typename Net::InArgs>::value >;
|
||||
};
|
||||
|
||||
/**
|
||||
* Contains description of inference parameters and kit of functions that
|
||||
* fill this parameters.
|
||||
*/
|
||||
template<typename Net> class Params {
|
||||
public:
|
||||
/** @brief Class constructor.
|
||||
|
||||
Constructs Params based on model information and sets default values for other
|
||||
inference description parameters.
|
||||
|
||||
@param model Path to model (.onnx file).
|
||||
*/
|
||||
Params(const std::string &model) {
|
||||
desc.model_path = model;
|
||||
desc.num_in = std::tuple_size<typename Net::InArgs>::value;
|
||||
desc.num_out = std::tuple_size<typename Net::OutArgs>::value;
|
||||
};
|
||||
|
||||
/** @brief Specifies sequence of network input layers names for inference.
|
||||
|
||||
The function is used to associate data of graph inputs with input layers of
|
||||
network topology. Number of names has to match the number of network inputs. If a network
|
||||
has only one input layer, there is no need to call it as the layer is
|
||||
associated with input automatically but this doesn't prevent you from
|
||||
doing it yourself. Count of names has to match to number of network inputs.
|
||||
|
||||
@param layer_names std::array<std::string, N> where N is the number of inputs
|
||||
as defined in the @ref G_API_NET. Contains names of input layers.
|
||||
@return the reference on modified object.
|
||||
*/
|
||||
Params<Net>& cfgInputLayers(const typename PortCfg<Net>::In &layer_names) {
|
||||
desc.input_names.assign(layer_names.begin(), layer_names.end());
|
||||
return *this;
|
||||
}
|
||||
|
||||
/** @brief Specifies sequence of output layers names for inference.
|
||||
|
||||
The function is used to associate data of graph outputs with output layers of
|
||||
network topology. If a network has only one output layer, there is no need to call it
|
||||
as the layer is associated with ouput automatically but this doesn't prevent
|
||||
you from doing it yourself. Count of names has to match to number of network
|
||||
outputs or you can set your own output but for this case you have to
|
||||
additionally use @ref cfgPostProc function.
|
||||
|
||||
@param layer_names std::array<std::string, N> where N is the number of outputs
|
||||
as defined in the @ref G_API_NET. Contains names of output layers.
|
||||
@return the reference on modified object.
|
||||
*/
|
||||
Params<Net>& cfgOutputLayers(const typename PortCfg<Net>::Out &layer_names) {
|
||||
desc.output_names.assign(layer_names.begin(), layer_names.end());
|
||||
return *this;
|
||||
}
|
||||
|
||||
/** @brief Sets a constant input.
|
||||
|
||||
The function is used to set constant input. This input has to be
|
||||
a prepared tensor since preprocessing is disabled for this case. You should
|
||||
provide name of network layer which will receive provided data.
|
||||
|
||||
@param layer_name Name of network layer.
|
||||
@param data cv::Mat that contains data which will be associated with network layer.
|
||||
@param hint Type of input (TENSOR).
|
||||
@return the reference on modified object.
|
||||
*/
|
||||
Params<Net>& constInput(const std::string &layer_name,
|
||||
const cv::Mat &data,
|
||||
TraitAs hint = TraitAs::TENSOR) {
|
||||
desc.const_inputs[layer_name] = {data, hint};
|
||||
return *this;
|
||||
}
|
||||
|
||||
/** @brief Specifies mean value and standard deviation for preprocessing.
|
||||
|
||||
The function is used to set mean value and standard deviation for preprocessing
|
||||
of input data.
|
||||
|
||||
@param m std::array<cv::Scalar, N> where N is the number of inputs
|
||||
as defined in the @ref G_API_NET. Contains mean values.
|
||||
@param s std::array<cv::Scalar, N> where N is the number of inputs
|
||||
as defined in the @ref G_API_NET. Contains standard deviation values.
|
||||
@return the reference on modified object.
|
||||
*/
|
||||
Params<Net>& cfgMeanStd(const typename PortCfg<Net>::NormCoefs &m,
|
||||
const typename PortCfg<Net>::NormCoefs &s) {
|
||||
desc.mean.assign(m.begin(), m.end());
|
||||
desc.stdev.assign(s.begin(), s.end());
|
||||
return *this;
|
||||
}
|
||||
|
||||
/** @brief Configures graph output and provides the post processing function from user.
|
||||
|
||||
The function is used when you work with networks with dynamic outputs.
|
||||
Since we can't know dimensions of inference result needs provide them for
|
||||
construction of graph output. This dimensions can differ from inference result.
|
||||
So you have to provide @ref PostProc function that gets information from inference
|
||||
result and fill output which is constructed by dimensions from out_metas.
|
||||
|
||||
@param out_metas Out meta information about your output (type, dimension).
|
||||
@param remap_function Post processing function, which has two parameters. First is onnx
|
||||
result, second is graph output. Both parameters is std::map that contain pair of
|
||||
layer's name and cv::Mat.
|
||||
@return the reference on modified object.
|
||||
*/
|
||||
Params<Net>& cfgPostProc(const std::vector<cv::GMatDesc> &out_metas,
|
||||
const PostProc &remap_function) {
|
||||
desc.out_metas = out_metas;
|
||||
desc.custom_post_proc = remap_function;
|
||||
return *this;
|
||||
}
|
||||
|
||||
/** @overload
|
||||
Function with a rvalue parameters.
|
||||
|
||||
@param out_metas rvalue out meta information about your output (type, dimension).
|
||||
@param remap_function rvalue post processing function, which has two parameters. First is onnx
|
||||
result, second is graph output. Both parameters is std::map that contain pair of
|
||||
layer's name and cv::Mat.
|
||||
@return the reference on modified object.
|
||||
*/
|
||||
Params<Net>& cfgPostProc(std::vector<cv::GMatDesc> &&out_metas,
|
||||
PostProc &&remap_function) {
|
||||
desc.out_metas = std::move(out_metas);
|
||||
desc.custom_post_proc = std::move(remap_function);
|
||||
return *this;
|
||||
}
|
||||
|
||||
/** @overload
|
||||
The function has additional parameter names_to_remap. This parameter provides
|
||||
information about output layers which will be used for inference and post
|
||||
processing function.
|
||||
|
||||
@param out_metas Out meta information.
|
||||
@param remap_function Post processing function.
|
||||
@param names_to_remap Names of output layers. network's inference will
|
||||
be done on these layers. Inference's result will be processed in post processing
|
||||
function using these names.
|
||||
@return the reference on modified object.
|
||||
*/
|
||||
Params<Net>& cfgPostProc(const std::vector<cv::GMatDesc> &out_metas,
|
||||
const PostProc &remap_function,
|
||||
const std::vector<std::string> &names_to_remap) {
|
||||
desc.out_metas = out_metas;
|
||||
desc.custom_post_proc = remap_function;
|
||||
desc.names_to_remap = names_to_remap;
|
||||
return *this;
|
||||
}
|
||||
|
||||
/** @overload
|
||||
Function with a rvalue parameters and additional parameter names_to_remap.
|
||||
|
||||
@param out_metas rvalue out meta information.
|
||||
@param remap_function rvalue post processing function.
|
||||
@param names_to_remap rvalue names of output layers. network's inference will
|
||||
be done on these layers. Inference's result will be processed in post processing
|
||||
function using these names.
|
||||
@return the reference on modified object.
|
||||
*/
|
||||
Params<Net>& cfgPostProc(std::vector<cv::GMatDesc> &&out_metas,
|
||||
PostProc &&remap_function,
|
||||
std::vector<std::string> &&names_to_remap) {
|
||||
desc.out_metas = std::move(out_metas);
|
||||
desc.custom_post_proc = std::move(remap_function);
|
||||
desc.names_to_remap = std::move(names_to_remap);
|
||||
return *this;
|
||||
}
|
||||
|
||||
/** @brief Specifies normalize parameter for preprocessing.
|
||||
|
||||
The function is used to set normalize parameter for preprocessing of input data.
|
||||
|
||||
@param normalizations std::array<cv::Scalar, N> where N is the number of inputs
|
||||
as defined in the @ref G_API_NET. Сontains bool values that enabled or disabled
|
||||
normalize of input data.
|
||||
@return the reference on modified object.
|
||||
*/
|
||||
Params<Net>& cfgNormalize(const typename PortCfg<Net>::Normalize &normalizations) {
|
||||
desc.normalize.assign(normalizations.begin(), normalizations.end());
|
||||
return *this;
|
||||
}
|
||||
|
||||
// BEGIN(G-API's network parametrization API)
|
||||
GBackend backend() const { return cv::gapi::onnx::backend(); }
|
||||
std::string tag() const { return Net::tag(); }
|
||||
cv::util::any params() const { return { desc }; }
|
||||
// END(G-API's network parametrization API)
|
||||
|
||||
protected:
|
||||
detail::ParamDesc desc;
|
||||
};
|
||||
|
||||
} // namespace onnx
|
||||
} // namespace gapi
|
||||
} // namespace cv
|
||||
|
||||
#endif // OPENCV_GAPI_INFER_HPP
|
||||
138
3rdparty/opencv/inc/opencv2/gapi/infer/parsers.hpp
vendored
Normal file
138
3rdparty/opencv/inc/opencv2/gapi/infer/parsers.hpp
vendored
Normal file
@@ -0,0 +1,138 @@
|
||||
// This file is part of OpenCV project.
|
||||
// It is subject to the license terms in the LICENSE file found in the top-level directory
|
||||
// of this distribution and at http://opencv.org/license.html.
|
||||
//
|
||||
// Copyright (C) 2020 Intel Corporation
|
||||
|
||||
|
||||
#ifndef OPENCV_GAPI_PARSERS_HPP
|
||||
#define OPENCV_GAPI_PARSERS_HPP
|
||||
|
||||
#include <utility> // std::tuple
|
||||
|
||||
#include <opencv2/gapi/gmat.hpp>
|
||||
#include <opencv2/gapi/gkernel.hpp>
|
||||
|
||||
namespace cv { namespace gapi {
|
||||
namespace nn {
|
||||
namespace parsers {
|
||||
using GRects = GArray<Rect>;
|
||||
using GDetections = std::tuple<GArray<Rect>, GArray<int>>;
|
||||
|
||||
G_TYPED_KERNEL(GParseSSDBL, <GDetections(GMat, GOpaque<Size>, float, int)>,
|
||||
"org.opencv.nn.parsers.parseSSD_BL") {
|
||||
static std::tuple<GArrayDesc,GArrayDesc> outMeta(const GMatDesc&, const GOpaqueDesc&, float, int) {
|
||||
return std::make_tuple(empty_array_desc(), empty_array_desc());
|
||||
}
|
||||
};
|
||||
|
||||
G_TYPED_KERNEL(GParseSSD, <GRects(GMat, GOpaque<Size>, float, bool, bool)>,
|
||||
"org.opencv.nn.parsers.parseSSD") {
|
||||
static GArrayDesc outMeta(const GMatDesc&, const GOpaqueDesc&, float, bool, bool) {
|
||||
return empty_array_desc();
|
||||
}
|
||||
};
|
||||
|
||||
G_TYPED_KERNEL(GParseYolo, <GDetections(GMat, GOpaque<Size>, float, float, std::vector<float>)>,
|
||||
"org.opencv.nn.parsers.parseYolo") {
|
||||
static std::tuple<GArrayDesc, GArrayDesc> outMeta(const GMatDesc&, const GOpaqueDesc&,
|
||||
float, float, const std::vector<float>&) {
|
||||
return std::make_tuple(empty_array_desc(), empty_array_desc());
|
||||
}
|
||||
static const std::vector<float>& defaultAnchors() {
|
||||
static std::vector<float> anchors {
|
||||
0.57273f, 0.677385f, 1.87446f, 2.06253f, 3.33843f, 5.47434f, 7.88282f, 3.52778f, 9.77052f, 9.16828f
|
||||
};
|
||||
return anchors;
|
||||
}
|
||||
};
|
||||
} // namespace parsers
|
||||
} // namespace nn
|
||||
|
||||
/** @brief Parses output of SSD network.
|
||||
|
||||
Extracts detection information (box, confidence, label) from SSD output and
|
||||
filters it by given confidence and label.
|
||||
|
||||
@note Function textual ID is "org.opencv.nn.parsers.parseSSD_BL"
|
||||
|
||||
@param in Input CV_32F tensor with {1,1,N,7} dimensions.
|
||||
@param inSz Size to project detected boxes to (size of the input image).
|
||||
@param confidenceThreshold If confidence of the
|
||||
detection is smaller than confidence threshold, detection is rejected.
|
||||
@param filterLabel If provided (!= -1), only detections with
|
||||
given label will get to the output.
|
||||
@return a tuple with a vector of detected boxes and a vector of appropriate labels.
|
||||
*/
|
||||
GAPI_EXPORTS_W std::tuple<GArray<Rect>, GArray<int>> parseSSD(const GMat& in,
|
||||
const GOpaque<Size>& inSz,
|
||||
const float confidenceThreshold = 0.5f,
|
||||
const int filterLabel = -1);
|
||||
|
||||
/** @brief Parses output of SSD network.
|
||||
|
||||
Extracts detection information (box, confidence) from SSD output and
|
||||
filters it by given confidence and by going out of bounds.
|
||||
|
||||
@note Function textual ID is "org.opencv.nn.parsers.parseSSD"
|
||||
|
||||
@param in Input CV_32F tensor with {1,1,N,7} dimensions.
|
||||
@param inSz Size to project detected boxes to (size of the input image).
|
||||
@param confidenceThreshold If confidence of the
|
||||
detection is smaller than confidence threshold, detection is rejected.
|
||||
@param alignmentToSquare If provided true, bounding boxes are extended to squares.
|
||||
The center of the rectangle remains unchanged, the side of the square is
|
||||
the larger side of the rectangle.
|
||||
@param filterOutOfBounds If provided true, out-of-frame boxes are filtered.
|
||||
@return a vector of detected bounding boxes.
|
||||
*/
|
||||
GAPI_EXPORTS_W GArray<Rect> parseSSD(const GMat& in,
|
||||
const GOpaque<Size>& inSz,
|
||||
const float confidenceThreshold,
|
||||
const bool alignmentToSquare,
|
||||
const bool filterOutOfBounds);
|
||||
|
||||
/** @brief Parses output of Yolo network.
|
||||
|
||||
Extracts detection information (box, confidence, label) from Yolo output,
|
||||
filters it by given confidence and performs non-maximum supression for overlapping boxes.
|
||||
|
||||
@note Function textual ID is "org.opencv.nn.parsers.parseYolo"
|
||||
|
||||
@param in Input CV_32F tensor with {1,13,13,N} dimensions, N should satisfy:
|
||||
\f[\texttt{N} = (\texttt{num_classes} + \texttt{5}) * \texttt{5},\f]
|
||||
where num_classes - a number of classes Yolo network was trained with.
|
||||
@param inSz Size to project detected boxes to (size of the input image).
|
||||
@param confidenceThreshold If confidence of the
|
||||
detection is smaller than confidence threshold, detection is rejected.
|
||||
@param nmsThreshold Non-maximum supression threshold which controls minimum
|
||||
relative box intersection area required for rejecting the box with a smaller confidence.
|
||||
If 1.f, nms is not performed and no boxes are rejected.
|
||||
@param anchors Anchors Yolo network was trained with.
|
||||
@note The default anchor values are specified for YOLO v2 Tiny as described in Intel Open Model Zoo
|
||||
<a href="https://github.com/openvinotoolkit/open_model_zoo/blob/master/models/public/yolo-v2-tiny-tf/yolo-v2-tiny-tf.md">documentation</a>.
|
||||
@return a tuple with a vector of detected boxes and a vector of appropriate labels.
|
||||
*/
|
||||
GAPI_EXPORTS_W std::tuple<GArray<Rect>, GArray<int>> parseYolo(const GMat& in,
|
||||
const GOpaque<Size>& inSz,
|
||||
const float confidenceThreshold = 0.5f,
|
||||
const float nmsThreshold = 0.5f,
|
||||
const std::vector<float>& anchors
|
||||
= nn::parsers::GParseYolo::defaultAnchors());
|
||||
|
||||
} // namespace gapi
|
||||
} // namespace cv
|
||||
|
||||
// Reimport parseSSD & parseYolo under their initial namespace
|
||||
namespace cv {
|
||||
namespace gapi {
|
||||
namespace streaming {
|
||||
|
||||
using cv::gapi::parseSSD;
|
||||
using cv::gapi::parseYolo;
|
||||
|
||||
} // namespace streaming
|
||||
} // namespace gapi
|
||||
} // namespace cv
|
||||
|
||||
#endif // OPENCV_GAPI_PARSERS_HPP
|
||||
Reference in New Issue
Block a user