未验证 提交 eb82ba36 编写于 作者: A Anatoliy Talamanov 提交者: GitHub

Merge pull request #19322 from TolyaTalamanov:at/python-callbacks

[G-API] Introduce cv.gin/cv.descr_of for python

* Implement cv.gin/cv.descr_of

* Fix macos build

* Fix gcomputation tests

* Add test

* Add using to a void exceeded length for windows build

* Add using to a void exceeded length for windows build

* Fix comments to review

* Fix comments to review

* Update from latest master

* Avoid graph compilation to obtain in/out info

* Fix indentation

* Fix comments to review

* Avoid using default in switches

* Post output meta for giebackend
上级 7bcb51ed
......@@ -1909,14 +1909,14 @@ kmeans(const GMat& data, const int K, const GMat& bestLabels,
- Function textual ID is "org.opencv.core.kmeansNDNoInit"
- #KMEANS_USE_INITIAL_LABELS flag must not be set while using this overload.
*/
GAPI_EXPORTS std::tuple<GOpaque<double>,GMat,GMat>
GAPI_EXPORTS_W std::tuple<GOpaque<double>,GMat,GMat>
kmeans(const GMat& data, const int K, const TermCriteria& criteria, const int attempts,
const KmeansFlags flags);
/** @overload
@note Function textual ID is "org.opencv.core.kmeans2D"
*/
GAPI_EXPORTS std::tuple<GOpaque<double>,GArray<int>,GArray<Point2f>>
GAPI_EXPORTS_W std::tuple<GOpaque<double>,GArray<int>,GArray<Point2f>>
kmeans(const GArray<Point2f>& data, const int K, const GArray<int>& bestLabels,
const TermCriteria& criteria, const int attempts, const KmeansFlags flags);
......@@ -1935,7 +1935,7 @@ namespace streaming {
@param src Input tensor
@return Size (tensor dimensions).
*/
GAPI_EXPORTS GOpaque<Size> size(const GMat& src);
GAPI_EXPORTS_W GOpaque<Size> size(const GMat& src);
/** @overload
Gets dimensions from rectangle.
......
......@@ -249,6 +249,30 @@ template<typename... Ts> inline GRunArgsP gout(Ts&... args)
return GRunArgsP{ GRunArgP(detail::wrap_host_helper<Ts>::wrap_out(args))... };
}
struct GTypeInfo;
using GTypesInfo = std::vector<GTypeInfo>;
// FIXME: Needed for python bridge, must be moved to more appropriate header
namespace detail {
struct ExtractArgsCallback
{
cv::GRunArgs operator()(const cv::GTypesInfo& info) const { return c(info); }
using CallBackT = std::function<cv::GRunArgs(const cv::GTypesInfo& info)>;
CallBackT c;
};
struct ExtractMetaCallback
{
cv::GMetaArgs operator()(const cv::GTypesInfo& info) const { return c(info); }
using CallBackT = std::function<cv::GMetaArgs(const cv::GTypesInfo& info)>;
CallBackT c;
};
void constructGraphOutputs(const cv::GTypesInfo &out_info,
cv::GRunArgs &args,
cv::GRunArgsP &outs);
} // namespace detail
} // namespace cv
#endif // OPENCV_GAPI_GARG_HPP
......@@ -368,8 +368,6 @@ private:
detail::GArrayU m_ref;
};
using GArrayP2f = GArray<cv::Point2f>;
/** @} */
} // namespace cv
......
......@@ -258,7 +258,8 @@ public:
void apply(GRunArgs &&ins, GRunArgsP &&outs, GCompileArgs &&args = {}); // Arg-to-arg overload
/// @private -- Exclude this function from OpenCV documentation
GAPI_WRAP GRunArgs apply(GRunArgs &&ins, GCompileArgs &&args = {});
GAPI_WRAP GRunArgs apply(const cv::detail::ExtractArgsCallback &callback,
GCompileArgs &&args = {});
/// @private -- Exclude this function from OpenCV documentation
void apply(const std::vector<cv::Mat>& ins, // Compatibility overload
......@@ -436,7 +437,11 @@ public:
*
* @sa @ref gapi_compile_args
*/
GAPI_WRAP GStreamingCompiled compileStreaming(GMetaArgs &&in_metas, GCompileArgs &&args = {});
GStreamingCompiled compileStreaming(GMetaArgs &&in_metas, GCompileArgs &&args = {});
/// @private -- Exclude this function from OpenCV documentation
GAPI_WRAP GStreamingCompiled compileStreaming(const cv::detail::ExtractMetaCallback &callback,
GCompileArgs &&args = {});
/**
* @brief Compile the computation for streaming mode.
......
......@@ -30,6 +30,7 @@ struct GTypeInfo
{
GShape shape;
cv::detail::OpaqueKind kind;
detail::HostCtor ctor;
};
using GShapes = std::vector<GShape>;
......
......@@ -135,7 +135,7 @@ GRunArg value_of(const GOrigin &origin);
// Transform run-time computation arguments into a collection of metadata
// extracted from that arguments
GMetaArg GAPI_EXPORTS descr_of(const GRunArg &arg );
GMetaArgs GAPI_EXPORTS_W descr_of(const GRunArgs &args);
GMetaArgs GAPI_EXPORTS descr_of(const GRunArgs &args);
// Transform run-time operation result argument into metadata extracted from that argument
// Used to compare the metadata, which generated at compile time with the metadata result operation in run time
......
......@@ -180,7 +180,10 @@ public:
* @param ins vector of inputs to process.
* @sa gin
*/
GAPI_WRAP void setSource(GRunArgs &&ins);
void setSource(GRunArgs &&ins);
/// @private -- Exclude this function from OpenCV documentation
GAPI_WRAP void setSource(const cv::detail::ExtractArgsCallback& callback);
/**
* @brief Specify an input video stream for a single-input
......@@ -251,6 +254,7 @@ public:
bool pull(cv::GRunArgsP &&outs);
// NB: Used from python
/// @private -- Exclude this function from OpenCV documentation
GAPI_WRAP std::tuple<bool, cv::GRunArgs> pull();
/**
......
......@@ -1158,7 +1158,7 @@ if there are 2 channels, or have 2 columns if there is a single channel. Mat sho
@param src Input gray-scale image @ref CV_8UC1; or input set of @ref CV_32S or @ref CV_32F
2D points stored in Mat.
*/
GAPI_EXPORTS GOpaque<Rect> boundingRect(const GMat& src);
GAPI_EXPORTS_W GOpaque<Rect> boundingRect(const GMat& src);
/** @overload
......
......@@ -85,11 +85,11 @@ the larger side of the rectangle.
@param filterOutOfBounds If provided true, out-of-frame boxes are filtered.
@return a vector of detected bounding boxes.
*/
GAPI_EXPORTS GArray<Rect> parseSSD(const GMat& in,
const GOpaque<Size>& inSz,
const float confidenceThreshold = 0.5f,
const bool alignmentToSquare = false,
const bool filterOutOfBounds = false);
GAPI_EXPORTS_W GArray<Rect> parseSSD(const GMat& in,
const GOpaque<Size>& inSz,
const float confidenceThreshold = 0.5f,
const bool alignmentToSquare = false,
const bool filterOutOfBounds = false);
/** @brief Parses output of Yolo network.
......
......@@ -8,6 +8,32 @@ using gapi_GKernelPackage = cv::gapi::GKernelPackage;
using gapi_GNetPackage = cv::gapi::GNetPackage;
using gapi_ie_PyParams = cv::gapi::ie::PyParams;
using gapi_wip_IStreamSource_Ptr = cv::Ptr<cv::gapi::wip::IStreamSource>;
using detail_ExtractArgsCallback = cv::detail::ExtractArgsCallback;
using detail_ExtractMetaCallback = cv::detail::ExtractMetaCallback;
// NB: Python wrapper generate T_U for T<U>
// This behavior is only observed for inputs
using GOpaque_bool = cv::GOpaque<bool>;
using GOpaque_int = cv::GOpaque<int>;
using GOpaque_double = cv::GOpaque<double>;
using GOpaque_float = cv::GOpaque<double>;
using GOpaque_string = cv::GOpaque<std::string>;
using GOpaque_Point = cv::GOpaque<cv::Point>;
using GOpaque_Point2f = cv::GOpaque<cv::Point2f>;
using GOpaque_Size = cv::GOpaque<cv::Size>;
using GOpaque_Rect = cv::GOpaque<cv::Rect>;
using GArray_bool = cv::GArray<bool>;
using GArray_int = cv::GArray<int>;
using GArray_double = cv::GArray<double>;
using GArray_float = cv::GArray<double>;
using GArray_string = cv::GArray<std::string>;
using GArray_Point = cv::GArray<cv::Point>;
using GArray_Point2f = cv::GArray<cv::Point2f>;
using GArray_Size = cv::GArray<cv::Size>;
using GArray_Rect = cv::GArray<cv::Rect>;
using GArray_Scalar = cv::GArray<cv::Scalar>;
using GArray_Mat = cv::GArray<cv::Mat>;
// FIXME: Python wrapper generate code without namespace std,
// so it cause error: "string wasn't declared"
......@@ -32,38 +58,76 @@ bool pyopencv_to(PyObject* obj, GRunArgs& value, const ArgInfo& info)
return pyopencv_to_generic_vec(obj, value, info);
}
static PyObject* from_grunarg(const GRunArg& v)
template <>
PyObject* pyopencv_from(const cv::detail::OpaqueRef& o)
{
switch (o.getKind())
{
case cv::detail::OpaqueKind::CV_BOOL : return pyopencv_from(o.rref<bool>());
case cv::detail::OpaqueKind::CV_INT : return pyopencv_from(o.rref<int>());
case cv::detail::OpaqueKind::CV_DOUBLE : return pyopencv_from(o.rref<double>());
case cv::detail::OpaqueKind::CV_FLOAT : return pyopencv_from(o.rref<float>());
case cv::detail::OpaqueKind::CV_STRING : return pyopencv_from(o.rref<std::string>());
case cv::detail::OpaqueKind::CV_POINT : return pyopencv_from(o.rref<cv::Point>());
case cv::detail::OpaqueKind::CV_POINT2F : return pyopencv_from(o.rref<cv::Point2f>());
case cv::detail::OpaqueKind::CV_SIZE : return pyopencv_from(o.rref<cv::Size>());
case cv::detail::OpaqueKind::CV_RECT : return pyopencv_from(o.rref<cv::Rect>());
case cv::detail::OpaqueKind::CV_UNKNOWN : break;
case cv::detail::OpaqueKind::CV_UINT64 : break;
case cv::detail::OpaqueKind::CV_SCALAR : break;
case cv::detail::OpaqueKind::CV_MAT : break;
case cv::detail::OpaqueKind::CV_DRAW_PRIM : break;
}
PyErr_SetString(PyExc_TypeError, "Unsupported GOpaque type");
return NULL;
};
template <>
PyObject* pyopencv_from(const cv::detail::VectorRef& v)
{
switch (v.getKind())
{
case cv::detail::OpaqueKind::CV_BOOL : return pyopencv_from_generic_vec(v.rref<bool>());
case cv::detail::OpaqueKind::CV_INT : return pyopencv_from_generic_vec(v.rref<int>());
case cv::detail::OpaqueKind::CV_DOUBLE : return pyopencv_from_generic_vec(v.rref<double>());
case cv::detail::OpaqueKind::CV_FLOAT : return pyopencv_from_generic_vec(v.rref<float>());
case cv::detail::OpaqueKind::CV_STRING : return pyopencv_from_generic_vec(v.rref<std::string>());
case cv::detail::OpaqueKind::CV_POINT : return pyopencv_from_generic_vec(v.rref<cv::Point>());
case cv::detail::OpaqueKind::CV_POINT2F : return pyopencv_from_generic_vec(v.rref<cv::Point2f>());
case cv::detail::OpaqueKind::CV_SIZE : return pyopencv_from_generic_vec(v.rref<cv::Size>());
case cv::detail::OpaqueKind::CV_RECT : return pyopencv_from_generic_vec(v.rref<cv::Rect>());
case cv::detail::OpaqueKind::CV_SCALAR : return pyopencv_from_generic_vec(v.rref<cv::Scalar>());
case cv::detail::OpaqueKind::CV_MAT : return pyopencv_from_generic_vec(v.rref<cv::Mat>());
case cv::detail::OpaqueKind::CV_UNKNOWN : break;
case cv::detail::OpaqueKind::CV_UINT64 : break;
case cv::detail::OpaqueKind::CV_DRAW_PRIM : break;
}
PyErr_SetString(PyExc_TypeError, "Unsupported GArray type");
return NULL;
}
template <>
PyObject* pyopencv_from(const GRunArg& v)
{
switch (v.index())
{
case GRunArg::index_of<cv::Mat>():
{
const auto& m = util::get<cv::Mat>(v);
return pyopencv_from(m);
}
return pyopencv_from(util::get<cv::Mat>(v));
case GRunArg::index_of<cv::Scalar>():
{
const auto& s = util::get<cv::Scalar>(v);
return pyopencv_from(s);
}
return pyopencv_from(util::get<cv::Scalar>(v));
case GRunArg::index_of<cv::detail::VectorRef>():
{
const auto& vref = util::get<cv::detail::VectorRef>(v);
switch (vref.getKind())
{
case cv::detail::OpaqueKind::CV_POINT2F:
return pyopencv_from(vref.rref<cv::Point2f>());
default:
PyErr_SetString(PyExc_TypeError, "Unsupported kind for GArray");
return NULL;
}
}
default:
PyErr_SetString(PyExc_TypeError, "Failed to unpack GRunArgs");
return NULL;
return pyopencv_from(util::get<cv::detail::VectorRef>(v));
case GRunArg::index_of<cv::detail::OpaqueRef>():
return pyopencv_from(util::get<cv::detail::OpaqueRef>(v));
}
GAPI_Assert(false);
PyErr_SetString(PyExc_TypeError, "Failed to unpack GRunArgs");
return NULL;
}
template<>
......@@ -74,7 +138,7 @@ PyObject* pyopencv_from(const GRunArgs& value)
// NB: It doesn't make sense to return list with a single element
if (n == 1)
{
PyObject* item = from_grunarg(value[0]);
PyObject* item = pyopencv_from(value[0]);
if(!item)
{
return NULL;
......@@ -85,7 +149,7 @@ PyObject* pyopencv_from(const GRunArgs& value)
PyObject* list = PyList_New(n);
for(i = 0; i < n; ++i)
{
PyObject* item = from_grunarg(value[i]);
PyObject* item = pyopencv_from(value[i]);
if(!item)
{
Py_DECREF(list);
......@@ -110,6 +174,26 @@ PyObject* pyopencv_from(const GMetaArgs& value)
return pyopencv_from_generic_vec(value);
}
template <typename T>
void pyopencv_to_with_check(PyObject* from, T& to, const std::string& msg = "")
{
if (!pyopencv_to(from, to, ArgInfo("", false)))
{
cv::util::throw_error(std::logic_error(msg));
}
}
template <typename T>
void pyopencv_to_generic_vec_with_check(PyObject* from,
std::vector<T>& to,
const std::string& msg = "")
{
if (!pyopencv_to_generic_vec(from, to, ArgInfo("", false)))
{
cv::util::throw_error(std::logic_error(msg));
}
}
template <typename T>
static PyObject* extract_proto_args(PyObject* py_args, PyObject* kw)
{
......@@ -128,9 +212,13 @@ static PyObject* extract_proto_args(PyObject* py_args, PyObject* kw)
{
args.emplace_back(reinterpret_cast<pyopencv_GMat_t*>(item)->v);
}
else if (PyObject_TypeCheck(item, reinterpret_cast<PyTypeObject*>(pyopencv_GArrayP2f_TypePtr)))
else if (PyObject_TypeCheck(item, reinterpret_cast<PyTypeObject*>(pyopencv_GOpaqueT_TypePtr)))
{
args.emplace_back(reinterpret_cast<pyopencv_GOpaqueT_t*>(item)->v.strip());
}
else if (PyObject_TypeCheck(item, reinterpret_cast<PyTypeObject*>(pyopencv_GArrayT_TypePtr)))
{
args.emplace_back(reinterpret_cast<pyopencv_GArrayP2f_t*>(item)->v.strip());
args.emplace_back(reinterpret_cast<pyopencv_GArrayT_t*>(item)->v.strip());
}
else
{
......@@ -152,63 +240,270 @@ static PyObject* pyopencv_cv_GOut(PyObject* , PyObject* py_args, PyObject* kw)
return extract_proto_args<GProtoOutputArgs>(py_args, kw);
}
static PyObject* pyopencv_cv_gin(PyObject* , PyObject* py_args, PyObject* kw)
static cv::detail::OpaqueRef extract_opaque_ref(PyObject* from, cv::detail::OpaqueKind kind)
{
using namespace cv;
#define HANDLE_CASE(T, O) case cv::detail::OpaqueKind::CV_##T: \
{ \
O obj{}; \
pyopencv_to_with_check(from, obj, "Failed to obtain " # O); \
return cv::detail::OpaqueRef{std::move(obj)}; \
}
#define UNSUPPORTED(T) case cv::detail::OpaqueKind::CV_##T: break
switch (kind)
{
HANDLE_CASE(BOOL, bool);
HANDLE_CASE(INT, int);
HANDLE_CASE(DOUBLE, double);
HANDLE_CASE(FLOAT, float);
HANDLE_CASE(STRING, std::string);
HANDLE_CASE(POINT, cv::Point);
HANDLE_CASE(POINT2F, cv::Point2f);
HANDLE_CASE(SIZE, cv::Size);
HANDLE_CASE(RECT, cv::Rect);
UNSUPPORTED(UNKNOWN);
UNSUPPORTED(UINT64);
UNSUPPORTED(SCALAR);
UNSUPPORTED(MAT);
UNSUPPORTED(DRAW_PRIM);
}
#undef HANDLE_CASE
#undef UNSUPPORTED
GRunArgs args;
Py_ssize_t size = PyTuple_Size(py_args);
for (int i = 0; i < size; ++i)
util::throw_error(std::logic_error("Unsupported type for GOpaqueT"));
}
static cv::detail::VectorRef extract_vector_ref(PyObject* from, cv::detail::OpaqueKind kind)
{
#define HANDLE_CASE(T, O) case cv::detail::OpaqueKind::CV_##T: \
{ \
std::vector<O> obj; \
pyopencv_to_generic_vec_with_check(from, obj, "Failed to obtain vector of " # O); \
return cv::detail::VectorRef{std::move(obj)}; \
}
#define UNSUPPORTED(T) case cv::detail::OpaqueKind::CV_##T: break
switch (kind)
{
PyObject* item = PyTuple_GetItem(py_args, i);
if (PyTuple_Check(item))
HANDLE_CASE(BOOL, bool);
HANDLE_CASE(INT, int);
HANDLE_CASE(DOUBLE, double);
HANDLE_CASE(FLOAT, float);
HANDLE_CASE(STRING, std::string);
HANDLE_CASE(POINT, cv::Point);
HANDLE_CASE(POINT2F, cv::Point2f);
HANDLE_CASE(SIZE, cv::Size);
HANDLE_CASE(RECT, cv::Rect);
HANDLE_CASE(SCALAR, cv::Scalar);
HANDLE_CASE(MAT, cv::Mat);
UNSUPPORTED(UNKNOWN);
UNSUPPORTED(UINT64);
UNSUPPORTED(DRAW_PRIM);
#undef HANDLE_CASE
#undef UNSUPPORTED
}
util::throw_error(std::logic_error("Unsupported type for GOpaqueT"));
}
static cv::GRunArg extract_run_arg(const cv::GTypeInfo& info, PyObject* item)
{
switch (info.shape)
{
case cv::GShape::GMAT:
{
cv::Scalar s;
if (pyopencv_to(item, s, ArgInfo("scalar", false)))
// NB: In case streaming it can be IStreamSource or cv::Mat
if (PyObject_TypeCheck(item,
reinterpret_cast<PyTypeObject*>(pyopencv_gapi_wip_IStreamSource_TypePtr)))
{
args.emplace_back(s);
cv::gapi::wip::IStreamSource::Ptr source =
reinterpret_cast<pyopencv_gapi_wip_IStreamSource_t*>(item)->v;
return source;
}
else
{
PyErr_SetString(PyExc_TypeError, "Failed convert tuple to cv::Scalar");
return NULL;
cv::Mat obj;
pyopencv_to_with_check(item, obj, "Failed to obtain cv::Mat");
return obj;
}
}
else if (PyArray_Check(item))
case cv::GShape::GSCALAR:
{
cv::Mat m;
if (pyopencv_to(item, m, ArgInfo("mat", false)))
{
args.emplace_back(m);
}
else
{
PyErr_SetString(PyExc_TypeError, "Failed convert array to cv::Mat");
return NULL;
}
cv::Scalar obj;
pyopencv_to_with_check(item, obj, "Failed to obtain cv::Scalar");
return obj;
}
else if (PyObject_TypeCheck(item,
reinterpret_cast<PyTypeObject*>(pyopencv_gapi_wip_IStreamSource_TypePtr)))
case cv::GShape::GOPAQUE:
{
cv::gapi::wip::IStreamSource::Ptr source =
reinterpret_cast<pyopencv_gapi_wip_IStreamSource_t*>(item)->v;
args.emplace_back(source);
return extract_opaque_ref(item, info.kind);
}
else
case cv::GShape::GARRAY:
{
PyErr_SetString(PyExc_TypeError, "cv.gin can works only with cv::Mat,"
"cv::Scalar, cv::gapi::wip::IStreamSource::Ptr");
return NULL;
return extract_vector_ref(item, info.kind);
}
case cv::GShape::GFRAME:
{
break;
}
}
return pyopencv_from_generic_vec(args);
util::throw_error(std::logic_error("Unsupported output shape"));
}
static PyObject* pyopencv_cv_gout(PyObject* o, PyObject* py_args, PyObject* kw)
static cv::GRunArgs extract_run_args(const cv::GTypesInfo& info, PyObject* py_args)
{
return pyopencv_cv_gin(o, py_args, kw);
cv::GRunArgs args;
Py_ssize_t tuple_size = PyTuple_Size(py_args);
args.reserve(tuple_size);
for (int i = 0; i < tuple_size; ++i)
{
args.push_back(extract_run_arg(info[i], PyTuple_GetItem(py_args, i)));
}
return args;
}
static cv::GMetaArg extract_meta_arg(const cv::GTypeInfo& info, PyObject* item)
{
switch (info.shape)
{
case cv::GShape::GMAT:
{
cv::Mat obj;
pyopencv_to_with_check(item, obj, "Failed to obtain cv::Mat");
return cv::GMetaArg{cv::descr_of(obj)};
}
case cv::GShape::GSCALAR:
{
cv::Scalar obj;
pyopencv_to_with_check(item, obj, "Failed to obtain cv::Scalar");
return cv::GMetaArg{cv::descr_of(obj)};
}
case cv::GShape::GARRAY:
{
return cv::GMetaArg{cv::empty_array_desc()};
}
case cv::GShape::GOPAQUE:
{
return cv::GMetaArg{cv::empty_gopaque_desc()};
}
case cv::GShape::GFRAME:
{
// NB: Isn't supported yet.
break;
}
}
util::throw_error(std::logic_error("Unsupported output shape"));
}
static cv::GMetaArgs extract_meta_args(const cv::GTypesInfo& info, PyObject* py_args)
{
cv::GMetaArgs metas;
Py_ssize_t tuple_size = PyTuple_Size(py_args);
metas.reserve(tuple_size);
for (int i = 0; i < tuple_size; ++i)
{
metas.push_back(extract_meta_arg(info[i], PyTuple_GetItem(py_args, i)));
}
return metas;
}
static PyObject* pyopencv_cv_gin(PyObject*, PyObject* py_args, PyObject*)
{
Py_INCREF(py_args);
auto callback = cv::detail::ExtractArgsCallback{[=](const cv::GTypesInfo& info)
{
PyGILState_STATE gstate;
gstate = PyGILState_Ensure();
cv::GRunArgs args;
try
{
args = extract_run_args(info, py_args);
}
catch (...)
{
PyGILState_Release(gstate);
throw;
}
PyGILState_Release(gstate);
return args;
}};
return pyopencv_from(callback);
}
static PyObject* pyopencv_cv_descr_of(PyObject*, PyObject* py_args, PyObject*)
{
Py_INCREF(py_args);
auto callback = cv::detail::ExtractMetaCallback{[=](const cv::GTypesInfo& info)
{
PyGILState_STATE gstate;
gstate = PyGILState_Ensure();
cv::GMetaArgs args;
try
{
args = extract_meta_args(info, py_args);
}
catch (...)
{
PyGILState_Release(gstate);
throw;
}
PyGILState_Release(gstate);
return args;
}};
return pyopencv_from(callback);
}
template<typename T>
struct PyOpenCV_Converter<cv::GArray<T>>
{
static PyObject* from(const cv::GArray<T>& p)
{
return pyopencv_from(cv::GArrayT(p));
}
static bool to(PyObject *obj, cv::GArray<T>& value, const ArgInfo& info)
{
if (PyObject_TypeCheck(obj, reinterpret_cast<PyTypeObject*>(pyopencv_GArrayT_TypePtr)))
{
auto& array = reinterpret_cast<pyopencv_GArrayT_t*>(obj)->v;
try {
value = cv::util::get<cv::GArray<T>>(array.arg());
} catch (...) {
return false;
}
return true;
}
return false;
}
};
template<typename T>
struct PyOpenCV_Converter<cv::GOpaque<T>>
{
static PyObject* from(const cv::GOpaque<T>& p)
{
return pyopencv_from(cv::GOpaqueT(p));
}
static bool to(PyObject *obj, cv::GOpaque<T>& value, const ArgInfo& info)
{
if (PyObject_TypeCheck(obj, reinterpret_cast<PyTypeObject*>(pyopencv_GOpaqueT_TypePtr)))
{
auto& opaque = reinterpret_cast<pyopencv_GOpaqueT_t*>(obj)->v;
try {
value = cv::util::get<cv::GOpaque<T>>(opaque.arg());
} catch (...) {
return false;
}
return true;
}
return false;
}
};
#endif // HAVE_OPENCV_GAPI
#endif // OPENCV_GAPI_PYOPENCV_GAPI_HPP
......@@ -119,7 +119,8 @@ public:
GAPI_Assert(false);
}
GAPI_WRAP gapi::ArgType type() { return m_type; }
GAPI_WRAP gapi::ArgType type() { return m_type; }
const Storage& arg() const { return m_arg; }
private:
gapi::ArgType m_type;
......@@ -156,6 +157,7 @@ public:
}
GAPI_WRAP gapi::ArgType type() { return m_type; }
const Storage& arg() const { return m_arg; }
private:
gapi::ArgType m_type;
......
......@@ -16,11 +16,15 @@ namespace cv
class GAPI_EXPORTS_W_SIMPLE GRunArg { };
class GAPI_EXPORTS_W_SIMPLE GMetaArg { };
class GAPI_EXPORTS_W_SIMPLE GArrayP2f { };
using GProtoInputArgs = GIOProtoArgs<In_Tag>;
using GProtoOutputArgs = GIOProtoArgs<Out_Tag>;
namespace detail
{
struct GAPI_EXPORTS_W_SIMPLE ExtractArgsCallback { };
struct GAPI_EXPORTS_W_SIMPLE ExtractMetaCallback { };
} // namespace detail
namespace gapi
{
GAPI_EXPORTS_W gapi::GNetPackage networks(const cv::gapi::ie::PyParams& params);
......
......@@ -128,5 +128,62 @@ class gapi_core_test(NewOpenCVTests):
'Failed on ' + pkg_name + ' backend')
def test_kmeans(self):
# K-means params
count = 100
sz = (count, 2)
in_mat = np.random.random(sz).astype(np.float32)
K = 5
flags = cv.KMEANS_RANDOM_CENTERS
attempts = 1;
criteria = (cv.TERM_CRITERIA_MAX_ITER + cv.TERM_CRITERIA_EPS, 30, 0)
# G-API
g_in = cv.GMat()
compactness, out_labels, centers = cv.gapi.kmeans(g_in, K, criteria, attempts, flags)
comp = cv.GComputation(cv.GIn(g_in), cv.GOut(compactness, out_labels, centers))
compact, labels, centers = comp.apply(cv.gin(in_mat))
# Assert
self.assertTrue(compact >= 0)
self.assertEqual(sz[0], labels.shape[0])
self.assertEqual(1, labels.shape[1])
self.assertTrue(labels.size != 0)
self.assertEqual(centers.shape[1], sz[1]);
self.assertEqual(centers.shape[0], K);
self.assertTrue(centers.size != 0);
def generate_random_points(self, sz):
arr = np.random.random(sz).astype(np.float32).T
return list(zip(arr[0], arr[1]))
def test_kmeans_2d(self):
# K-means 2D params
count = 100
sz = (count, 2)
amount = sz[0]
K = 5
flags = cv.KMEANS_RANDOM_CENTERS
attempts = 1;
criteria = (cv.TERM_CRITERIA_MAX_ITER + cv.TERM_CRITERIA_EPS, 30, 0);
in_vector = self.generate_random_points(sz)
in_labels = []
# G-API
data = cv.GArrayT(cv.gapi.CV_POINT2F)
best_labels = cv.GArrayT(cv.gapi.CV_INT)
compactness, out_labels, centers = cv.gapi.kmeans(data, K, best_labels, criteria, attempts, flags);
comp = cv.GComputation(cv.GIn(data, best_labels), cv.GOut(compactness, out_labels, centers));
compact, labels, centers = comp.apply(cv.gin(in_vector, in_labels));
# Assert
self.assertTrue(compact >= 0)
self.assertEqual(amount, len(labels))
self.assertEqual(K, len(centers))
if __name__ == '__main__':
NewOpenCVTests.bootstrap()
......@@ -50,7 +50,9 @@ class gapi_imgproc_test(NewOpenCVTests):
# OpenCV - (num_points, 1, 2)
# G-API - (num_points, 2)
# Comparison
self.assertEqual(0.0, cv.norm(expected.flatten(), actual.flatten(), cv.NORM_INF),
self.assertEqual(0.0, cv.norm(expected.flatten(),
np.array(actual, dtype=np.float32).flatten(),
cv.NORM_INF),
'Failed on ' + pkg_name + ' backend')
......@@ -75,5 +77,30 @@ class gapi_imgproc_test(NewOpenCVTests):
'Failed on ' + pkg_name + ' backend')
def test_bounding_rect(self):
sz = 1280
fscale = 256
def sample_value(fscale):
return np.random.uniform(0, 255 * fscale) / fscale
points = np.array([(sample_value(fscale), sample_value(fscale)) for _ in range(1280)], np.float32)
# OpenCV
expected = cv.boundingRect(points)
# G-API
g_in = cv.GMat()
g_out = cv.gapi.boundingRect(g_in)
comp = cv.GComputation(cv.GIn(g_in), cv.GOut(g_out))
for pkg_name, pkg in pkgs:
actual = comp.apply(cv.gin(points), args=cv.compile_args(pkg))
# Comparison
self.assertEqual(0.0, cv.norm(expected, actual, cv.NORM_INF),
'Failed on ' + pkg_name + ' backend')
if __name__ == '__main__':
NewOpenCVTests.bootstrap()
......@@ -49,8 +49,6 @@ class test_gapi_infer(NewOpenCVTests):
comp = cv.GComputation(cv.GIn(g_in), cv.GOut(age_g, gender_g))
pp = cv.gapi.ie.params("net", model_path, weights_path, device_id)
nets = cv.gapi.networks(pp)
args = cv.compile_args(nets)
gapi_age, gapi_gender = comp.apply(cv.gin(img), args=cv.compile_args(cv.gapi.networks(pp)))
# Check
......@@ -58,5 +56,64 @@ class test_gapi_infer(NewOpenCVTests):
self.assertEqual(0.0, cv.norm(dnn_age, gapi_age, cv.NORM_INF))
def test_person_detection_retail_0013(self):
# NB: Check IE
if not cv.dnn.DNN_TARGET_CPU in cv.dnn.getAvailableTargets(cv.dnn.DNN_BACKEND_INFERENCE_ENGINE):
return
root_path = '/omz_intel_models/intel/person-detection-retail-0013/FP32/person-detection-retail-0013'
model_path = self.find_file(root_path + '.xml', [os.environ.get('OPENCV_DNN_TEST_DATA_PATH')])
weights_path = self.find_file(root_path + '.bin', [os.environ.get('OPENCV_DNN_TEST_DATA_PATH')])
img_path = self.find_file('gpu/lbpcascade/er.png', [os.environ.get('OPENCV_TEST_DATA_PATH')])
device_id = 'CPU'
img = cv.resize(cv.imread(img_path), (544, 320))
# OpenCV DNN
net = cv.dnn.readNetFromModelOptimizer(model_path, weights_path)
net.setPreferableBackend(cv.dnn.DNN_BACKEND_INFERENCE_ENGINE)
net.setPreferableTarget(cv.dnn.DNN_TARGET_CPU)
blob = cv.dnn.blobFromImage(img)
def parseSSD(detections, size):
h, w = size
bboxes = []
detections = detections.reshape(-1, 7)
for sample_id, class_id, confidence, xmin, ymin, xmax, ymax in detections:
if confidence >= 0.5:
x = int(xmin * w)
y = int(ymin * h)
width = int(xmax * w - x)
height = int(ymax * h - y)
bboxes.append((x, y, width, height))
return bboxes
net.setInput(blob)
dnn_detections = net.forward()
dnn_boxes = parseSSD(np.array(dnn_detections), img.shape[:2])
# OpenCV G-API
g_in = cv.GMat()
inputs = cv.GInferInputs()
inputs.setInput('data', g_in)
g_sz = cv.gapi.streaming.size(g_in)
outputs = cv.gapi.infer("net", inputs)
detections = outputs.at("detection_out")
bboxes = cv.gapi.parseSSD(detections, g_sz, 0.5, False, False)
comp = cv.GComputation(cv.GIn(g_in), cv.GOut(bboxes))
pp = cv.gapi.ie.params("net", model_path, weights_path, device_id)
gapi_boxes = comp.apply(cv.gin(img.astype(np.float32)),
args=cv.compile_args(cv.gapi.networks(pp)))
# Comparison
self.assertEqual(0.0, cv.norm(np.array(dnn_boxes).flatten(),
np.array(gapi_boxes).flatten(),
cv.NORM_INF))
if __name__ == '__main__':
NewOpenCVTests.bootstrap()
......@@ -19,7 +19,7 @@ class test_gapi_streaming(NewOpenCVTests):
g_in = cv.GMat()
g_out = cv.gapi.medianBlur(g_in, 3)
c = cv.GComputation(g_in, g_out)
ccomp = c.compileStreaming(cv.descr_of(cv.gin(in_mat)))
ccomp = c.compileStreaming(cv.descr_of(in_mat))
ccomp.setSource(cv.gin(in_mat))
ccomp.start()
......@@ -191,7 +191,9 @@ class test_gapi_streaming(NewOpenCVTests):
# NB: OpenCV & G-API have different output shapes:
# OpenCV - (num_points, 1, 2)
# G-API - (num_points, 2)
self.assertEqual(0.0, cv.norm(e.flatten(), a.flatten(), cv.NORM_INF))
self.assertEqual(0.0, cv.norm(e.flatten(),
np.array(a, np.float32).flatten(),
cv.NORM_INF))
proc_num_frames += 1
if proc_num_frames == max_num_frames:
......
......@@ -23,6 +23,31 @@
#include "compiler/gmodelbuilder.hpp"
#include "compiler/gcompiler.hpp"
#include "compiler/gcompiled_priv.hpp"
#include "compiler/gstreaming_priv.hpp"
static cv::GTypesInfo collectInfo(const cv::gimpl::GModel::ConstGraph& g,
const std::vector<ade::NodeHandle>& nhs) {
cv::GTypesInfo info;
info.reserve(nhs.size());
ade::util::transform(nhs, std::back_inserter(info), [&g](const ade::NodeHandle& nh) {
const auto& data = g.metadata(nh).get<cv::gimpl::Data>();
return cv::GTypeInfo{data.shape, data.kind, data.ctor};
});
return info;
}
// NB: This function is used to collect graph input/output info.
// Needed for python bridge to unpack inputs and constructs outputs properly.
static cv::GraphInfo::Ptr collectGraphInfo(const cv::GComputation::Priv& priv)
{
auto g = cv::gimpl::GCompiler::makeGraph(priv);
cv::gimpl::GModel::ConstGraph cgr(*g);
auto in_info = collectInfo(cgr, cgr.metadata().get<cv::gimpl::Protocol>().in_nhs);
auto out_info = collectInfo(cgr, cgr.metadata().get<cv::gimpl::Protocol>().out_nhs);
return cv::GraphInfo::Ptr(new cv::GraphInfo{std::move(in_info), std::move(out_info)});
}
// cv::GComputation private implementation /////////////////////////////////////
// <none>
......@@ -105,8 +130,37 @@ cv::GStreamingCompiled cv::GComputation::compileStreaming(GMetaArgs &&metas, GCo
cv::GStreamingCompiled cv::GComputation::compileStreaming(GCompileArgs &&args)
{
// NB: Used by python bridge
if (!m_priv->m_info)
{
m_priv->m_info = collectGraphInfo(*m_priv);
}
cv::gimpl::GCompiler comp(*this, {}, std::move(args));
return comp.compileStreaming();
auto compiled = comp.compileStreaming();
compiled.priv().setInInfo(m_priv->m_info->inputs);
compiled.priv().setOutInfo(m_priv->m_info->outputs);
return compiled;
}
cv::GStreamingCompiled cv::GComputation::compileStreaming(const cv::detail::ExtractMetaCallback &callback,
GCompileArgs &&args)
{
// NB: Used by python bridge
if (!m_priv->m_info)
{
m_priv->m_info = collectGraphInfo(*m_priv);
}
auto ins = callback(m_priv->m_info->inputs);
cv::gimpl::GCompiler comp(*this, std::move(ins), std::move(args));
auto compiled = comp.compileStreaming();
compiled.priv().setInInfo(m_priv->m_info->inputs);
compiled.priv().setOutInfo(m_priv->m_info->outputs);
return compiled;
}
// FIXME: Introduce similar query/test method for GMetaArgs as a building block
......@@ -172,50 +226,25 @@ void cv::GComputation::apply(const std::vector<cv::Mat> &ins,
}
// NB: This overload is called from python code
cv::GRunArgs cv::GComputation::apply(GRunArgs &&ins, GCompileArgs &&args)
cv::GRunArgs cv::GComputation::apply(const cv::detail::ExtractArgsCallback &callback,
GCompileArgs &&args)
{
recompile(descr_of(ins), std::move(args));
// NB: Used by python bridge
if (!m_priv->m_info)
{
m_priv->m_info = collectGraphInfo(*m_priv);
}
const auto& out_info = m_priv->m_lastCompiled.priv().outInfo();
auto ins = callback(m_priv->m_info->inputs);
recompile(descr_of(ins), std::move(args));
GRunArgs run_args;
GRunArgsP outs;
run_args.reserve(out_info.size());
outs.reserve(out_info.size());
run_args.reserve(m_priv->m_info->outputs.size());
outs.reserve(m_priv->m_info->outputs.size());
cv::detail::constructGraphOutputs(m_priv->m_info->outputs, run_args, outs);
for (auto&& info : out_info)
{
switch (info.shape)
{
case cv::GShape::GMAT:
{
run_args.emplace_back(cv::Mat{});
outs.emplace_back(&cv::util::get<cv::Mat>(run_args.back()));
break;
}
case cv::GShape::GSCALAR:
{
run_args.emplace_back(cv::Scalar{});
outs.emplace_back(&cv::util::get<cv::Scalar>(run_args.back()));
break;
}
case cv::GShape::GARRAY:
{
switch (info.kind)
{
case cv::detail::OpaqueKind::CV_POINT2F:
run_args.emplace_back(cv::detail::VectorRef{std::vector<cv::Point2f>{}});
outs.emplace_back(cv::util::get<cv::detail::VectorRef>(run_args.back()));
break;
default:
util::throw_error(std::logic_error("Unsupported kind for GArray"));
}
break;
}
default:
util::throw_error(std::logic_error("Only cv::GMat and cv::GScalar are supported for python output"));
}
}
m_priv->m_lastCompiled(std::move(ins), std::move(outs));
return run_args;
}
......
......@@ -21,6 +21,13 @@
namespace cv {
struct GraphInfo
{
using Ptr = std::shared_ptr<GraphInfo>;
cv::GTypesInfo inputs;
cv::GTypesInfo outputs;
};
class GComputation::Priv
{
public:
......@@ -36,9 +43,10 @@ public:
, Dump // A deserialized graph
>;
GCompiled m_lastCompiled;
GMetaArgs m_lastMetas; // TODO: make GCompiled remember its metas?
Shape m_shape;
GCompiled m_lastCompiled;
GMetaArgs m_lastMetas; // TODO: make GCompiled remember its metas?
Shape m_shape;
GraphInfo::Ptr m_info; // NB: Used by python bridge
};
}
......
......@@ -31,3 +31,48 @@ cv::GRunArg& cv::GRunArg::operator= (cv::GRunArg &&arg) {
meta = std::move(arg.meta);
return *this;
}
// NB: Construct GRunArgsP based on passed info and store the memory in passed cv::GRunArgs.
// Needed for python bridge, because in case python user doesn't pass output arguments to apply.
void cv::detail::constructGraphOutputs(const cv::GTypesInfo &out_info,
cv::GRunArgs &args,
cv::GRunArgsP &outs)
{
for (auto&& info : out_info)
{
switch (info.shape)
{
case cv::GShape::GMAT:
{
args.emplace_back(cv::Mat{});
outs.emplace_back(&cv::util::get<cv::Mat>(args.back()));
break;
}
case cv::GShape::GSCALAR:
{
args.emplace_back(cv::Scalar{});
outs.emplace_back(&cv::util::get<cv::Scalar>(args.back()));
break;
}
case cv::GShape::GARRAY:
{
cv::detail::VectorRef ref;
util::get<cv::detail::ConstructVec>(info.ctor)(ref);
args.emplace_back(ref);
outs.emplace_back(cv::util::get<cv::detail::VectorRef>(args.back()));
break;
}
case cv::GShape::GOPAQUE:
{
cv::detail::OpaqueRef ref;
util::get<cv::detail::ConstructOpaque>(info.ctor)(ref);
args.emplace_back(ref);
outs.emplace_back(ref);
break;
}
default:
util::throw_error(std::logic_error("Unsupported output shape for python"));
}
}
}
......@@ -707,7 +707,10 @@ static void PostOutputs(InferenceEngine::InferRequest &request,
auto& out_mat = ctx->outMatR(i);
IE::Blob::Ptr this_blob = request.GetBlob(ctx->uu.params.output_names[i]);
copyFromIE(this_blob, out_mat);
ctx->out.post(ctx->output(i));
auto output = ctx->output(i);
ctx->out.meta(output, cv::GRunArg::Meta{});
ctx->out.post(std::move(output));
}
}
......@@ -904,7 +907,9 @@ struct InferList: public cv::detail::KernelTag {
// NB: In case there is no input data need to post output anyway
if (in_roi_vec.empty()) {
for (auto i : ade::util::iota(ctx->uu.params.num_out)) {
ctx->out.post(ctx->output(i));
auto output = ctx->output(i);
ctx->out.meta(output, cv::GRunArg::Meta{});
ctx->out.post(std::move(output));
}
return;
}
......@@ -940,7 +945,9 @@ struct InferList: public cv::detail::KernelTag {
}
for (auto i : ade::util::iota(ctx->uu.params.num_out)) {
ctx->out.post(ctx->output(i));
auto output = ctx->output(i);
ctx->out.meta(output, cv::GRunArg::Meta{});
ctx->out.post(std::move(output));
}
},
[](InferenceEngine::InferRequest &) { /* do nothing */ }
......@@ -1049,7 +1056,9 @@ struct InferList2: public cv::detail::KernelTag {
const auto list_size = ctx->inArg<cv::detail::VectorRef>(1u).size();
if (list_size == 0u) {
for (auto i : ade::util::iota(ctx->uu.params.num_out)) {
ctx->out.post(ctx->output(i));
auto output = ctx->output(i);
ctx->out.meta(output, cv::GRunArg::Meta{});
ctx->out.post(std::move(output));
}
return;
}
......@@ -1103,7 +1112,9 @@ struct InferList2: public cv::detail::KernelTag {
}
for (auto i : ade::util::iota(ctx->uu.params.num_out)) {
ctx->out.post(ctx->output(i));
auto output = ctx->output(i);
ctx->out.meta(output, cv::GRunArg::Meta{});
ctx->out.post(std::move(output));
}
},
[](InferenceEngine::InferRequest &) { /* do nothing */ }
......
......@@ -38,10 +38,6 @@ class GAPI_EXPORTS GCompiled::Priv
GMetaArgs m_outMetas; // inferred by compiler
std::unique_ptr<cv::gimpl::GExecutor> m_exec;
// NB: Used by python wrapper to clarify input/output types
GTypesInfo m_out_info;
GTypesInfo m_in_info;
void checkArgs(const cv::gimpl::GRuntimeArgs &args) const;
public:
......@@ -59,12 +55,6 @@ public:
const GMetaArgs& outMetas() const;
const cv::gimpl::GModel::Graph& model() const;
void setOutInfo(const GTypesInfo& info) { m_out_info = std::move(info); }
const GTypesInfo& outInfo() const { return m_out_info; }
void setInInfo(const GTypesInfo& info) { m_in_info = std::move(info); }
const GTypesInfo& inInfo() const { return m_in_info; }
};
}
......
......@@ -422,19 +422,6 @@ void cv::gimpl::GCompiler::compileIslands(ade::Graph &g, const cv::GCompileArgs
GIslandModel::compileIslands(gim, g, args);
}
static cv::GTypesInfo collectInfo(const cv::gimpl::GModel::ConstGraph& g,
const std::vector<ade::NodeHandle>& nhs) {
cv::GTypesInfo info;
info.reserve(nhs.size());
ade::util::transform(nhs, std::back_inserter(info), [&g](const ade::NodeHandle& nh) {
const auto& data = g.metadata(nh).get<cv::gimpl::Data>();
return cv::GTypeInfo{data.shape, data.kind};
});
return info;
}
cv::GCompiled cv::gimpl::GCompiler::produceCompiled(GPtr &&pg)
{
// This is the final compilation step. Here:
......@@ -454,23 +441,15 @@ cv::GCompiled cv::gimpl::GCompiler::produceCompiled(GPtr &&pg)
// ...before call to produceCompiled();
GModel::ConstGraph cgr(*pg);
const auto &outMetas = GModel::ConstGraph(*pg).metadata()
.get<OutputMeta>().outMeta;
std::unique_ptr<GExecutor> pE(new GExecutor(std::move(pg)));
// FIXME: select which executor will be actually used,
// make GExecutor abstract.
std::unique_ptr<GExecutor> pE(new GExecutor(std::move(pg)));
GCompiled compiled;
compiled.priv().setup(m_metas, outMetas, std::move(pE));
// NB: Need to store input/output GTypeInfo to allocate output arrays for python bindings
auto out_meta = collectInfo(cgr, cgr.metadata().get<cv::gimpl::Protocol>().out_nhs);
auto in_meta = collectInfo(cgr, cgr.metadata().get<cv::gimpl::Protocol>().in_nhs);
compiled.priv().setOutInfo(std::move(out_meta));
compiled.priv().setInInfo(std::move(in_meta));
return compiled;
}
......@@ -486,16 +465,8 @@ cv::GStreamingCompiled cv::gimpl::GCompiler::produceStreamingCompiled(GPtr &&pg)
outMetas = GModel::ConstGraph(*pg).metadata().get<OutputMeta>().outMeta;
}
GModel::ConstGraph cgr(*pg);
// NB: Need to store input/output GTypeInfo to allocate output arrays for python bindings
auto out_meta = collectInfo(cgr, cgr.metadata().get<cv::gimpl::Protocol>().out_nhs);
auto in_meta = collectInfo(cgr, cgr.metadata().get<cv::gimpl::Protocol>().in_nhs);
compiled.priv().setOutInfo(std::move(out_meta));
compiled.priv().setInInfo(std::move(in_meta));
std::unique_ptr<GStreamingExecutor> pE(new GStreamingExecutor(std::move(pg),
m_args));
if (!m_metas.empty() && !outMetas.empty())
......
......@@ -96,6 +96,12 @@ cv::GStreamingCompiled::GStreamingCompiled()
{
}
// NB: This overload is called from python code
void cv::GStreamingCompiled::setSource(const cv::detail::ExtractArgsCallback& callback)
{
setSource(callback(m_priv->inInfo()));
}
void cv::GStreamingCompiled::setSource(GRunArgs &&ins)
{
// FIXME: verify these input parameters according to the graph input meta
......@@ -119,46 +125,13 @@ bool cv::GStreamingCompiled::pull(cv::GRunArgsP &&outs)
std::tuple<bool, cv::GRunArgs> cv::GStreamingCompiled::pull()
{
// FIXME: Why it is not @ priv??
GRunArgs run_args;
GRunArgsP outs;
const auto& out_info = m_priv->outInfo();
run_args.reserve(out_info.size());
outs.reserve(out_info.size());
for (auto&& info : out_info)
{
switch (info.shape)
{
case cv::GShape::GMAT:
{
run_args.emplace_back(cv::Mat{});
outs.emplace_back(&cv::util::get<cv::Mat>(run_args.back()));
break;
}
case cv::GShape::GSCALAR:
{
run_args.emplace_back(cv::Scalar{});
outs.emplace_back(&cv::util::get<cv::Scalar>(run_args.back()));
break;
}
case cv::GShape::GARRAY:
{
switch (info.kind)
{
case cv::detail::OpaqueKind::CV_POINT2F:
run_args.emplace_back(cv::detail::VectorRef{std::vector<cv::Point2f>{}});
outs.emplace_back(cv::util::get<cv::detail::VectorRef>(run_args.back()));
break;
default:
util::throw_error(std::logic_error("Unsupported kind for GArray"));
}
break;
}
default:
util::throw_error(std::logic_error("Only cv::GMat and cv::GScalar are supported for python output"));
}
}
cv::detail::constructGraphOutputs(m_priv->outInfo(), run_args, outs);
bool is_over = m_priv->pull(std::move(outs));
return std::make_tuple(is_over, run_args);
......
......@@ -91,8 +91,6 @@ namespace opencv_test
}
};
// NB: Check an apply specifically designed to be called from Python,
// but can also be used from C++
struct GComputationPythonApplyTest: public ::testing::Test
{
cv::Size sz;
......@@ -103,22 +101,28 @@ namespace opencv_test
GComputationPythonApplyTest() : sz(cv::Size(300,300)), type(CV_8UC1),
in_mat1(sz, type), in_mat2(sz, type), out_mat_ocv(sz, type),
m_c([&](){
cv::GMat in1, in2;
cv::GMat out = in1 + in2;
return cv::GComputation(cv::GIn(in1, in2), cv::GOut(out));
})
cv::GMat in1, in2;
cv::GMat out = in1 + in2;
return cv::GComputation(cv::GIn(in1, in2), cv::GOut(out));
})
{
cv::randu(in_mat1, cv::Scalar::all(0), cv::Scalar::all(255));
cv::randu(in_mat2, cv::Scalar::all(0), cv::Scalar::all(255));
out_mat_ocv = in_mat1 + in_mat2;
}
};
}
TEST_F(GComputationPythonApplyTest, WithoutSerialization)
{
auto output = m_c.apply(cv::gin(in_mat1, in_mat2));
auto output = m_c.apply(cv::detail::ExtractArgsCallback{[this](const cv::GTypesInfo& info)
{
GAPI_Assert(info[0].shape == cv::GShape::GMAT);
GAPI_Assert(info[1].shape == cv::GShape::GMAT);
return cv::GRunArgs{in_mat1, in_mat2};
}
});
EXPECT_EQ(1u, output.size());
const auto& out_mat_gapi = cv::util::get<cv::Mat>(output[0]);
......@@ -130,7 +134,14 @@ namespace opencv_test
auto p = cv::gapi::serialize(m_c);
auto c = cv::gapi::deserialize<cv::GComputation>(p);
auto output = c.apply(cv::gin(in_mat1, in_mat2));
auto output = c.apply(cv::detail::ExtractArgsCallback{[this](const cv::GTypesInfo& info)
{
GAPI_Assert(info[0].shape == cv::GShape::GMAT);
GAPI_Assert(info[1].shape == cv::GShape::GMAT);
return cv::GRunArgs{in_mat1, in_mat2};
}
});
EXPECT_EQ(1u, output.size());
const auto& out_mat_gapi = cv::util::get<cv::Mat>(output[0]);
......
......@@ -1282,7 +1282,7 @@ TEST(Streaming, Python_Pull_Overload)
cv::Mat in_mat(sz, CV_8UC3);
cv::randu(in_mat, cv::Scalar::all(0), cv::Scalar(255));
auto ccomp = c.compileStreaming(cv::descr_of(in_mat));
auto ccomp = c.compileStreaming();
EXPECT_TRUE(ccomp);
EXPECT_FALSE(ccomp.running());
......@@ -1895,4 +1895,54 @@ TEST(GAPI_Streaming, AccessBGRFromNV12Frame)
}
}
TEST(GAPI_Streaming, TestPythonAPI)
{
cv::Size sz(200, 200);
cv::Mat in_mat(sz, CV_8UC3);
cv::randu(in_mat, cv::Scalar::all(0), cv::Scalar(255));
const auto crop_rc = cv::Rect(13, 75, 100, 100);
// OpenCV reference image
cv::Mat ocv_mat;
{
ocv_mat = in_mat(crop_rc);
}
cv::GMat in;
auto roi = cv::gapi::crop(in, crop_rc);
cv::GComputation comp(cv::GIn(in), cv::GOut(roi));
// NB: Used by python bridge
auto cc = comp.compileStreaming(cv::detail::ExtractMetaCallback{[&](const cv::GTypesInfo& info)
{
GAPI_Assert(info.size() == 1u);
GAPI_Assert(info[0].shape == cv::GShape::GMAT);
return cv::GMetaArgs{cv::GMetaArg{cv::descr_of(in_mat)}};
}});
// NB: Used by python bridge
cc.setSource(cv::detail::ExtractArgsCallback{[&](const cv::GTypesInfo& info)
{
GAPI_Assert(info.size() == 1u);
GAPI_Assert(info[0].shape == cv::GShape::GMAT);
return cv::GRunArgs{in_mat};
}});
cc.start();
bool is_over = false;
cv::GRunArgs out_args;
// NB: Used by python bridge
std::tie(is_over, out_args) = cc.pull();
ASSERT_EQ(1u, out_args.size());
ASSERT_TRUE(cv::util::holds_alternative<cv::Mat>(out_args[0]));
EXPECT_EQ(0, cvtest::norm(ocv_mat, cv::util::get<cv::Mat>(out_args[0]), NORM_INF));
EXPECT_TRUE(is_over);
cc.stop();
}
} // namespace opencv_test
......@@ -1608,13 +1608,53 @@ template<typename _Tp> static inline bool pyopencv_to_generic_vec(PyObject* obj,
return true;
}
template<> inline bool pyopencv_to_generic_vec(PyObject* obj, std::vector<bool>& value, const ArgInfo& info)
{
if(!obj || obj == Py_None)
return true;
if (!PySequence_Check(obj))
return false;
size_t n = PySequence_Size(obj);
value.resize(n);
for(size_t i = 0; i < n; i++ )
{
SafeSeqItem item_wrap(obj, i);
bool elem{};
if(!pyopencv_to(item_wrap.item, elem, info))
return false;
value[i] = elem;
}
return true;
}
template<typename _Tp> static inline PyObject* pyopencv_from_generic_vec(const std::vector<_Tp>& value)
{
int i, n = (int)value.size();
PyObject* seq = PyList_New(n);
for( i = 0; i < n; i++ )
{
PyObject* item = pyopencv_from(value[i]);
_Tp elem = value[i];
PyObject* item = pyopencv_from(elem);
if(!item)
break;
PyList_SetItem(seq, i, item);
}
if( i < n )
{
Py_DECREF(seq);
return 0;
}
return seq;
}
template<> inline PyObject* pyopencv_from_generic_vec(const std::vector<bool>& value)
{
int i, n = (int)value.size();
PyObject* seq = PyList_New(n);
for( i = 0; i < n; i++ )
{
bool elem = value[i];
PyObject* item = pyopencv_from(elem);
if(!item)
break;
PyList_SetItem(seq, i, item);
......@@ -2160,7 +2200,8 @@ static PyMethodDef special_methods[] = {
#ifdef HAVE_OPENCV_GAPI
{"GIn", CV_PY_FN_WITH_KW(pyopencv_cv_GIn), "GIn(...) -> GInputProtoArgs"},
{"GOut", CV_PY_FN_WITH_KW(pyopencv_cv_GOut), "GOut(...) -> GOutputProtoArgs"},
{"gin", CV_PY_FN_WITH_KW(pyopencv_cv_gin), "gin(...) -> GRunArgs"},
{"gin", CV_PY_FN_WITH_KW(pyopencv_cv_gin), "gin(...) -> ExtractArgsCallback"},
{"descr_of", CV_PY_FN_WITH_KW(pyopencv_cv_descr_of), "descr_of(...) -> ExtractMetaCallback"},
#endif
{NULL, NULL},
};
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册