未验证 提交 a817813b 编写于 作者: A Anatoliy Talamanov 提交者: GitHub

Merge pull request #24045 from TolyaTalamanov:at/add-onnx-directml-execution-provider

G-API: Support DirectML Execution Provider for ONNXRT Backend #24045

### Pull Request Readiness Checklist

See details at https://github.com/opencv/opencv/wiki/How_to_contribute#making-a-good-pull-request

- [ ] I agree to contribute to the project under Apache 2 License.
- [ ] To the best of my knowledge, the proposed patch is not based on a code under GPL or another license that is incompatible with OpenCV
- [ ] The PR is proposed to the proper branch
- [ ] There is a reference to the original bug report and related work
- [ ] There is accuracy test, performance test and test data in opencv_extra repository, if applicable
      Patch to opencv_extra has the same branch name.
- [ ] The feature is well documented and sample code can be built with the project CMake
上级 a25e809d
......@@ -16,7 +16,22 @@ if(ONNXRT_ROOT_DIR)
CMAKE_FIND_ROOT_PATH_BOTH)
endif()
macro(detect_onxxrt_ep filename dir have_ep_var)
find_path(ORT_EP_INCLUDE ${filename} ${dir} CMAKE_FIND_ROOT_PATH_BOTH)
if(ORT_EP_INCLUDE)
set(${have_ep_var} TRUE)
endif()
endmacro()
if(ORT_LIB AND ORT_INCLUDE)
# Check DirectML Execution Provider availability
get_filename_component(dml_dir ${ONNXRT_ROOT_DIR}/include/onnxruntime/core/providers/dml ABSOLUTE)
detect_onxxrt_ep(
dml_provider_factory.h
${dml_dir}
HAVE_ONNX_DML
)
set(HAVE_ONNX TRUE)
# For CMake output only
set(ONNX_LIBRARIES "${ORT_LIB}" CACHE STRING "ONNX Runtime libraries")
......
......@@ -162,6 +162,7 @@ set(gapi_srcs
# ONNX backend
src/backends/onnx/gonnxbackend.cpp
src/backends/onnx/dml_ep.cpp
# Render backend
src/backends/render/grenderocv.cpp
......@@ -366,6 +367,9 @@ endif()
if(HAVE_ONNX)
ocv_target_link_libraries(${the_module} PRIVATE ${ONNX_LIBRARY})
ocv_target_compile_definitions(${the_module} PRIVATE HAVE_ONNX=1)
if(HAVE_ONNX_DML)
ocv_target_compile_definitions(${the_module} PRIVATE HAVE_ONNX_DML=1)
endif()
if(TARGET opencv_test_gapi)
ocv_target_compile_definitions(opencv_test_gapi PRIVATE HAVE_ONNX=1)
ocv_target_link_libraries(opencv_test_gapi PRIVATE ${ONNX_LIBRARY})
......
......@@ -34,7 +34,10 @@ public:
PyParams& cfgNormalize(const std::string &layer_name, bool flag);
GAPI_WRAP
PyParams& cfgExecutionProvider(ep::OpenVINO ov_ep);
PyParams& cfgAddExecutionProvider(ep::OpenVINO ep);
GAPI_WRAP
PyParams& cfgAddExecutionProvider(ep::DirectML ep);
GAPI_WRAP
PyParams& cfgDisableMemPattern();
......
......@@ -45,28 +45,13 @@ struct GAPI_EXPORTS_W_SIMPLE OpenVINO {
/** @brief Class constructor.
Constructs OpenVINO parameters based on device information.
Constructs OpenVINO parameters based on device type information.
@param device Target device to use.
@param dev_type Target device type to use. ("CPU_FP32", "GPU_FP16", etc)
*/
GAPI_WRAP
OpenVINO(const std::string &device)
: device_id(device) {
}
/** @brief Specifies OpenVINO Execution Provider device type.
This function is used to override the accelerator hardware type
and precision at runtime. If this option is not explicitly configured, default
hardware and precision specified during onnxruntime build time is used.
@param type Device type ("CPU_FP32", "GPU_FP16", etc)
@return reference to this parameter structure.
*/
GAPI_WRAP
OpenVINO& cfgDeviceType(const std::string &type) {
device_type = cv::util::make_optional(type);
return *this;
explicit OpenVINO(const std::string &dev_type)
: device_type(dev_type) {
}
/** @brief Specifies OpenVINO Execution Provider cache dir.
......@@ -86,15 +71,14 @@ struct GAPI_EXPORTS_W_SIMPLE OpenVINO {
/** @brief Specifies OpenVINO Execution Provider number of threads.
This function is used to override the accelerator default value
of number of threads with this value at runtime. If this option
is not explicitly set, default value of 8 is used during build time.
of number of threads with this value at runtime.
@param nthreads Number of threads.
@return reference to this parameter structure.
*/
GAPI_WRAP
OpenVINO& cfgNumThreads(size_t nthreads) {
num_of_threads = cv::util::make_optional(nthreads);
num_of_threads = nthreads;
return *this;
}
......@@ -127,15 +111,39 @@ struct GAPI_EXPORTS_W_SIMPLE OpenVINO {
return *this;
}
std::string device_id;
std::string device_type;
std::string cache_dir;
cv::optional<std::string> device_type;
cv::optional<size_t> num_of_threads;
size_t num_of_threads = 0;
bool enable_opencl_throttling = false;
bool enable_dynamic_shapes = false;
};
using EP = cv::util::variant<cv::util::monostate, OpenVINO>;
/**
* @brief This structure provides functions
* that fill inference options for ONNX DirectML Execution Provider.
* Please follow https://onnxruntime.ai/docs/execution-providers/DirectML-ExecutionProvider.html#directml-execution-provider
*/
class GAPI_EXPORTS_W_SIMPLE DirectML {
public:
// NB: Used from python.
/// @private -- Exclude this constructor from OpenCV documentation
GAPI_WRAP
DirectML() = default;
/** @brief Class constructor.
Constructs DirectML parameters based on device id.
@param device_id Target device id to use. ("0", "1", etc)
*/
GAPI_WRAP
explicit DirectML(const int device_id) : ddesc(device_id) { };
using DeviceDesc = cv::util::variant<int>;
DeviceDesc ddesc;
};
using EP = cv::util::variant<cv::util::monostate, OpenVINO, DirectML>;
} // namespace ep
......@@ -191,7 +199,7 @@ struct ParamDesc {
std::unordered_map<std::string, std::pair<cv::Scalar, cv::Scalar> > generic_mstd;
std::unordered_map<std::string, bool> generic_norm;
cv::gapi::onnx::ep::EP execution_provider;
std::vector<cv::gapi::onnx::ep::EP> execution_providers;
bool disable_mem_pattern;
};
} // namespace detail
......@@ -395,17 +403,31 @@ public:
return *this;
}
/** @brief Specifies execution provider for runtime.
/** @brief Adds execution provider for runtime.
The function is used to set ONNX Runtime OpenVINO Execution Provider options.
The function is used to add ONNX Runtime OpenVINO Execution Provider options.
@param ovep OpenVINO Execution Provider options.
@param ep OpenVINO Execution Provider options.
@see cv::gapi::onnx::ep::OpenVINO.
@return the reference on modified object.
*/
Params<Net>& cfgExecutionProvider(ep::OpenVINO&& ovep) {
desc.execution_provider = std::move(ovep);
Params<Net>& cfgAddExecutionProvider(ep::OpenVINO&& ep) {
desc.execution_providers.emplace_back(std::move(ep));
return *this;
}
/** @brief Adds execution provider for runtime.
The function is used to add ONNX Runtime DirectML Execution Provider options.
@param ep DirectML Execution Provider options.
@see cv::gapi::onnx::ep::DirectML.
@return the reference on modified object.
*/
Params<Net>& cfgAddExecutionProvider(ep::DirectML&& ep) {
desc.execution_providers.emplace_back(std::move(ep));
return *this;
}
......@@ -447,20 +469,29 @@ public:
Params(const std::string& tag, const std::string& model_path)
: desc{model_path, 0u, 0u, {}, {}, {}, {}, {}, {}, {}, {}, {}, true, {}, {}, {}, false }, m_tag(tag) {}
/** @see onnx::Params::cfgMeanStdDev. */
void cfgMeanStdDev(const std::string &layer,
const cv::Scalar &m,
const cv::Scalar &s) {
desc.generic_mstd[layer] = std::make_pair(m, s);
}
/** @see onnx::Params::cfgNormalize. */
void cfgNormalize(const std::string &layer, bool flag) {
desc.generic_norm[layer] = flag;
}
void cfgExecutionProvider(ep::OpenVINO&& ov_ep) {
desc.execution_provider = std::move(ov_ep);
/** @see onnx::Params::cfgAddExecutionProvider. */
void cfgAddExecutionProvider(ep::OpenVINO&& ep) {
desc.execution_providers.emplace_back(std::move(ep));
}
/** @see onnx::Params::cfgAddExecutionProvider. */
void cfgAddExecutionProvider(ep::DirectML&& ep) {
desc.execution_providers.emplace_back(std::move(ep));
}
/** @see onnx::Params::cfgDisableMemPattern. */
void cfgDisableMemPattern() {
desc.disable_mem_pattern = true;
}
......
......@@ -30,6 +30,7 @@ using map_string_and_vector_size_t = std::map<std::string, std::vector<size_t>>
using map_string_and_vector_float = std::map<std::string, std::vector<float>>;
using map_int_and_double = std::map<int, double>;
using ep_OpenVINO = cv::gapi::onnx::ep::OpenVINO;
using ep_DirectML = cv::gapi::onnx::ep::DirectML;
// NB: Python wrapper generate T_U for T<U>
// This behavior is only observed for inputs
......
......@@ -22,8 +22,14 @@ cv::gapi::onnx::PyParams& cv::gapi::onnx::PyParams::cfgNormalize(const std::stri
}
cv::gapi::onnx::PyParams&
cv::gapi::onnx::PyParams::cfgExecutionProvider(cv::gapi::onnx::ep::OpenVINO ov_ep) {
m_priv->cfgExecutionProvider(std::move(ov_ep));
cv::gapi::onnx::PyParams::cfgAddExecutionProvider(cv::gapi::onnx::ep::OpenVINO ep) {
m_priv->cfgAddExecutionProvider(std::move(ep));
return *this;
}
cv::gapi::onnx::PyParams&
cv::gapi::onnx::PyParams::cfgAddExecutionProvider(cv::gapi::onnx::ep::DirectML ep) {
m_priv->cfgAddExecutionProvider(std::move(ep));
return *this;
}
......
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
//
// Copyright (C) 2023 Intel Corporation
#include "backends/onnx/dml_ep.hpp"
#include "logger.hpp"
#ifdef HAVE_ONNX
#include <onnxruntime_cxx_api.h>
#ifdef HAVE_ONNX_DML
#include "../providers/dml/dml_provider_factory.h"
void cv::gimpl::onnx::addDMLExecutionProvider(Ort::SessionOptions *session_options,
const cv::gapi::onnx::ep::DirectML &dml_ep) {
namespace ep = cv::gapi::onnx::ep;
GAPI_Assert(cv::util::holds_alternative<int>(dml_ep.ddesc));
const int device_id = cv::util::get<int>(dml_ep.ddesc);
try {
OrtSessionOptionsAppendExecutionProvider_DML(*session_options, device_id);
} catch (const std::exception &e) {
std::stringstream ss;
ss << "ONNX Backend: Failed to enable DirectML"
<< " Execution Provider: " << e.what();
cv::util::throw_error(std::runtime_error(ss.str()));
}
}
#else // HAVE_ONNX_DML
void cv::gimpl::onnx::addDMLExecutionProvider(Ort::SessionOptions*,
const cv::gapi::onnx::ep::DirectML&) {
util::throw_error(std::runtime_error("G-API has been compiled with ONNXRT"
" without DirectML support"));
}
#endif // HAVE_ONNX_DML
#endif // HAVE_ONNX
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
//
// Copyright (C) 2023 Intel Corporation
#ifndef OPENCV_GAPI_DML_EP_HPP
#define OPENCV_GAPI_DML_EP_HPP
#include "opencv2/gapi/infer/onnx.hpp"
#ifdef HAVE_ONNX
#include <onnxruntime_cxx_api.h>
namespace cv {
namespace gimpl {
namespace onnx {
void addDMLExecutionProvider(Ort::SessionOptions *session_options,
const cv::gapi::onnx::ep::DirectML &dml_ep);
}}}
#endif // HAVE_ONNX
#endif // OPENCV_GAPI_DML_EP_HPP
......@@ -9,6 +9,8 @@
#ifdef HAVE_ONNX
#include "backends/onnx/dml_ep.hpp"
#include <ade/util/algorithm.hpp> // any_of
#include <ade/util/zip_range.hpp>
#include <opencv2/gapi/infer.hpp>
......@@ -143,37 +145,44 @@ public:
void run();
};
static void appendExecutionProvider(Ort::SessionOptions *session_options,
const cv::gapi::onnx::ep::EP &execution_provider) {
static void addOpenVINOExecutionProvider(Ort::SessionOptions *session_options,
const cv::gapi::onnx::ep::OpenVINO &ov_ep) {
OrtOpenVINOProviderOptions options;
options.device_type = ov_ep.device_type.c_str();
options.cache_dir = ov_ep.cache_dir.c_str();
options.num_of_threads = ov_ep.num_of_threads;
options.enable_opencl_throttling = ov_ep.enable_opencl_throttling;
options.enable_dynamic_shapes = ov_ep.enable_dynamic_shapes;
options.context = nullptr;
try {
session_options->AppendExecutionProvider_OpenVINO(options);
} catch (const std::exception &e) {
std::stringstream ss;
ss << "ONNX Backend: Failed to enable OpenVINO"
<< " Execution Provider: " << e.what();
cv::util::throw_error(std::runtime_error(ss.str()));
}
}
static void addExecutionProvider(Ort::SessionOptions *session_options,
const cv::gapi::onnx::ep::EP &execution_provider) {
namespace ep = cv::gapi::onnx::ep;
switch (execution_provider.index()) {
case ep::EP::index_of<ep::OpenVINO>(): {
GAPI_LOG_INFO(NULL, "OpenVINO Execution Provider is selected.");
const auto &ovep = cv::util::get<ep::OpenVINO>(execution_provider);
OrtOpenVINOProviderOptions options;
options.device_id = ovep.device_id.c_str();
options.cache_dir = ovep.cache_dir.c_str();
options.enable_opencl_throttling = ovep.enable_opencl_throttling;
options.enable_dynamic_shapes = ovep.enable_dynamic_shapes;
// NB: If are not specified, will be taken from onnxruntime build.
if (ovep.device_type) {
options.device_type = ovep.device_type->c_str();
}
if (ovep.num_of_threads) {
options.num_of_threads = *ovep.num_of_threads;
}
try {
session_options->AppendExecutionProvider_OpenVINO(options);
} catch (const std::exception &e) {
std::stringstream ss;
ss << "ONNX Backend: Failed to enable OpenVINO Execution Provider: "
<< e.what() << "\nMake sure that onnxruntime has"
" been compiled with OpenVINO support.";
cv::util::throw_error(std::runtime_error(ss.str()));
}
GAPI_LOG_INFO(NULL, "OpenVINO Execution Provider is added.");
const auto &ov_ep = cv::util::get<ep::OpenVINO>(execution_provider);
addOpenVINOExecutionProvider(session_options, ov_ep);
break;
}
case ep::EP::index_of<ep::DirectML>(): {
GAPI_LOG_INFO(NULL, "DirectML Execution Provider is added.");
const auto &dml_ep = cv::util::get<ep::DirectML>(execution_provider);
addDMLExecutionProvider(session_options, dml_ep);
break;
}
default:
GAPI_LOG_INFO(NULL, "CPU Execution Provider is added.");
break;
}
}
......@@ -629,7 +638,10 @@ ONNXCompiled::ONNXCompiled(const gapi::onnx::detail::ParamDesc &pp)
}
// Create and initialize the ONNX session
Ort::SessionOptions session_options;
cv::gimpl::onnx::appendExecutionProvider(&session_options, pp.execution_provider);
GAPI_LOG_INFO(NULL, "Adding Execution Providers for \"" << pp.model_path << "\"");
for (const auto &ep : pp.execution_providers) {
cv::gimpl::onnx::addExecutionProvider(&session_options, ep);
}
if (pp.disable_mem_pattern) {
session_options.DisableMemPattern();
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册