提交 5590aea4 编写于 作者: A Alexander Alekhin

Merge pull request #8998 from abratchik:DNN.java.wrappers.fix

......@@ -9,7 +9,7 @@ endif()
set(the_description "Deep neural network module. It allows to load models from different frameworks and to make forward pass")
ocv_add_module(dnn opencv_core opencv_imgproc WRAP python matlab)
ocv_add_module(dnn opencv_core opencv_imgproc WRAP python matlab java)
ocv_warnings_disable(CMAKE_CXX_FLAGS -Wno-shadow -Wno-parentheses -Wmaybe-uninitialized -Wsign-promo
-Wmissing-declarations -Wmissing-prototypes
)
......
......@@ -39,15 +39,15 @@
//
//M*/
#ifndef OPENCV_DNN_DNN_DICT_HPP
#define OPENCV_DNN_DNN_DICT_HPP
#include <opencv2/core.hpp>
#include <map>
#include <ostream>
#include <opencv2/dnn/dnn.hpp>
#ifndef OPENCV_DNN_DNN_DICT_HPP
#define OPENCV_DNN_DNN_DICT_HPP
namespace cv {
namespace dnn {
CV__DNN_EXPERIMENTAL_NS_BEGIN
......@@ -57,14 +57,14 @@ CV__DNN_EXPERIMENTAL_NS_BEGIN
/** @brief This struct stores the scalar value (or array) of one of the following type: double, cv::String or int64.
* @todo Maybe int64 is useless because double type exactly stores at least 2^52 integers.
*/
struct DictValue
struct CV_EXPORTS_W DictValue
{
DictValue(const DictValue &r);
DictValue(int64 i = 0) : type(Param::INT), pi(new AutoBuffer<int64,1>) { (*pi)[0] = i; } //!< Constructs integer scalar
DictValue(int i) : type(Param::INT), pi(new AutoBuffer<int64,1>) { (*pi)[0] = i; } //!< Constructs integer scalar
CV_WRAP DictValue(int i) : type(Param::INT), pi(new AutoBuffer<int64,1>) { (*pi)[0] = i; } //!< Constructs integer scalar
DictValue(unsigned p) : type(Param::INT), pi(new AutoBuffer<int64,1>) { (*pi)[0] = p; } //!< Constructs integer scalar
DictValue(double p) : type(Param::REAL), pd(new AutoBuffer<double,1>) { (*pd)[0] = p; } //!< Constructs floating point scalar
DictValue(const String &s) : type(Param::STRING), ps(new AutoBuffer<String,1>) { (*ps)[0] = s; } //!< Constructs string scalar
CV_WRAP DictValue(double p) : type(Param::REAL), pd(new AutoBuffer<double,1>) { (*pd)[0] = p; } //!< Constructs floating point scalar
CV_WRAP DictValue(const String &s) : type(Param::STRING), ps(new AutoBuffer<String,1>) { (*ps)[0] = s; } //!< Constructs string scalar
DictValue(const char *s) : type(Param::STRING), ps(new AutoBuffer<String,1>) { (*ps)[0] = s; } //!< @overload
template<typename TypeIter>
......@@ -79,9 +79,13 @@ struct DictValue
int size() const;
bool isInt() const;
bool isString() const;
bool isReal() const;
CV_WRAP bool isInt() const;
CV_WRAP bool isString() const;
CV_WRAP bool isReal() const;
CV_WRAP int getIntValue(int idx = -1) const;
CV_WRAP double getRealValue(int idx = -1) const;
CV_WRAP String getStringValue(int idx = -1) const;
DictValue &operator=(const DictValue &r);
......
......@@ -46,11 +46,10 @@
#include <opencv2/core.hpp>
#if !defined CV_DOXYGEN && !defined CV_DNN_DONT_ADD_EXPERIMENTAL_NS
#define CV__DNN_EXPERIMENTAL_NS_USE using namespace experimental_dnn_v1;
#define CV__DNN_EXPERIMENTAL_NS_BEGIN namespace experimental_dnn_v1 {
#define CV__DNN_EXPERIMENTAL_NS_END }
namespace cv { namespace dnn { namespace experimental_dnn_v1 { } using namespace experimental_dnn_v1; }}
#else
#define CV__DNN_EXPERIMENTAL_NS_USE
#define CV__DNN_EXPERIMENTAL_NS_BEGIN
#define CV__DNN_EXPERIMENTAL_NS_END
#endif
......@@ -59,7 +58,6 @@
namespace cv {
namespace dnn {
CV__DNN_EXPERIMENTAL_NS_USE
CV__DNN_EXPERIMENTAL_NS_BEGIN
//! @addtogroup dnn
//! @{
......@@ -160,7 +158,7 @@ CV__DNN_EXPERIMENTAL_NS_BEGIN
* Each class, derived from Layer, must implement allocate() methods to declare own outputs and forward() to compute outputs.
* Also before using the new layer into networks you must register your layer by using one of @ref dnnLayerFactory "LayerFactory" macros.
*/
class CV_EXPORTS_W Layer
class CV_EXPORTS_W Layer : public Algorithm
{
public:
......@@ -329,7 +327,7 @@ CV__DNN_EXPERIMENTAL_NS_BEGIN
/** @brief Container for strings and integers. */
typedef DictValue LayerId;
/** @brief Returns pointer to layer with specified name which the network use. */
/** @brief Returns pointer to layer with specified id or name which the network use. */
CV_WRAP Ptr<Layer> getLayer(LayerId layerId);
/** @brief Returns pointers to input layers of specific layer. */
......@@ -517,7 +515,7 @@ CV__DNN_EXPERIMENTAL_NS_BEGIN
/** @brief Returns list of types for layer used in model.
* @param layersTypes output parameter for returning types.
*/
CV_WRAP void getLayerTypes(std::vector<String>& layersTypes) const;
CV_WRAP void getLayerTypes(CV_OUT std::vector<String>& layersTypes) const;
/** @brief Returns count of layers of specified type.
* @param layerType type.
......@@ -532,18 +530,18 @@ CV__DNN_EXPERIMENTAL_NS_BEGIN
* @param blobs output parameter to store resulting bytes for intermediate blobs.
*/
CV_WRAP void getMemoryConsumption(const std::vector<MatShape>& netInputShapes,
size_t& weights, size_t& blobs) const;
CV_OUT size_t& weights, CV_OUT size_t& blobs) const;
/** @overload */
CV_WRAP void getMemoryConsumption(const MatShape& netInputShape,
size_t& weights, size_t& blobs) const;
CV_OUT size_t& weights, CV_OUT size_t& blobs) const;
/** @overload */
CV_WRAP void getMemoryConsumption(const int layerId,
const std::vector<MatShape>& netInputShapes,
size_t& weights, size_t& blobs) const;
CV_OUT size_t& weights, CV_OUT size_t& blobs) const;
/** @overload */
CV_WRAP void getMemoryConsumption(const int layerId,
const MatShape& netInputShape,
size_t& weights, size_t& blobs) const;
CV_OUT size_t& weights, CV_OUT size_t& blobs) const;
/** @brief Computes bytes number which are requered to store
* all weights and intermediate blobs for each layer.
......@@ -553,12 +551,12 @@ CV__DNN_EXPERIMENTAL_NS_BEGIN
* @param blobs output parameter to store resulting bytes for intermediate blobs.
*/
CV_WRAP void getMemoryConsumption(const std::vector<MatShape>& netInputShapes,
std::vector<int>& layerIds, std::vector<size_t>& weights,
std::vector<size_t>& blobs) const;
CV_OUT std::vector<int>& layerIds, CV_OUT std::vector<size_t>& weights,
CV_OUT std::vector<size_t>& blobs) const;
/** @overload */
CV_WRAP void getMemoryConsumption(const MatShape& netInputShape,
std::vector<int>& layerIds, std::vector<size_t>& weights,
std::vector<size_t>& blobs) const;
CV_OUT std::vector<int>& layerIds, CV_OUT std::vector<size_t>& weights,
CV_OUT std::vector<size_t>& blobs) const;
private:
struct Impl;
......@@ -566,7 +564,7 @@ CV__DNN_EXPERIMENTAL_NS_BEGIN
};
/** @brief Small interface class for loading trained serialized models of different dnn-frameworks. */
class CV_EXPORTS_W Importer
class CV_EXPORTS_W Importer : public Algorithm
{
public:
......@@ -602,7 +600,7 @@ CV__DNN_EXPERIMENTAL_NS_BEGIN
* @param model path to the .pb file with binary protobuf description of the network architecture.
* @returns Pointer to the created importer, NULL in failure cases.
*/
CV_EXPORTS Ptr<Importer> createTensorflowImporter(const String &model);
CV_EXPORTS_W Ptr<Importer> createTensorflowImporter(const String &model);
/** @brief Creates the importer of <a href="http://torch.ch">Torch7</a> framework network.
* @param filename path to the file, dumped from Torch by using torch.save() function.
......@@ -676,4 +674,4 @@ CV__DNN_EXPERIMENTAL_NS_END
#include <opencv2/dnn/layer.hpp>
#include <opencv2/dnn/dnn.inl.hpp>
#endif /* __OPENCV_DNN_DNN_HPP__ */
#endif /* OPENCV_DNN_DNN_HPP */
......@@ -115,6 +115,11 @@ inline int DictValue::get<int>(int idx) const
return (int)get<int64>(idx);
}
inline int DictValue::getIntValue(int idx) const
{
return (int)get<int64>(idx);
}
template<>
inline unsigned DictValue::get<unsigned>(int idx) const
{
......@@ -148,6 +153,11 @@ inline double DictValue::get<double>(int idx) const
}
}
inline double DictValue::getRealValue(int idx) const
{
return get<double>(idx);
}
template<>
inline float DictValue::get<float>(int idx) const
{
......@@ -162,6 +172,12 @@ inline String DictValue::get<String>(int idx) const
return (*ps)[(idx == -1) ? 0 : idx];
}
inline String DictValue::getStringValue(int idx) const
{
return get<String>(idx);
}
inline void DictValue::release()
{
switch (type)
......
misc/java/src/cpp/dnn_converters.hpp
{
"type_dict": {
"MatShape": {
"j_type": "MatOfInt",
"jn_type": "long",
"jni_type": "jlong",
"jni_var": "MatShape %(n)s",
"suffix": "J",
"v_type": "Mat",
"j_import": "org.opencv.core.MatOfInt"
},
"vector_MatShape": {
"j_type": "List<MatOfInt>",
"jn_type": "List<MatOfInt>",
"jni_type": "jobject",
"jni_var": "std::vector< MatShape > %(n)s",
"suffix": "Ljava_util_List",
"v_type": "vector_MatShape",
"j_import": "org.opencv.core.MatOfInt"
},
"vector_size_t": {
"j_type": "MatOfDouble",
"jn_type": "long",
"jni_type": "jlong",
"jni_var": "std::vector<size_t> %(n)s",
"suffix": "J",
"v_type": "Mat",
"j_import": "org.opencv.core.MatOfDouble"
},
"vector_Ptr_Layer": {
"j_type": "List<Layer>",
"jn_type": "List<Layer>",
"jni_type": "jobject",
"jni_var": "std::vector< Ptr<cv::dnn::Layer> > %(n)s",
"suffix": "Ljava_util_List",
"v_type": "vector_Layer",
"j_import": "org.opencv.dnn.Layer"
},
"LayerId": {
"j_type": "DictValue",
"jn_type": "long",
"jn_args": [
[
"__int64",
".getNativeObjAddr()"
]
],
"jni_name": "(*(cv::dnn::DictValue*)%(n)s_nativeObj)",
"jni_type": "jlong",
"suffix": "J",
"j_import": "org.opencv.dnn.DictValue"
}
}
}
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html
// Author: abratchik
#include "dnn_converters.hpp"
void Mat_to_MatShape(cv::Mat& mat, MatShape& matshape)
{
matshape.clear();
CHECK_MAT(mat.type()==CV_32SC1 && mat.cols==1);
matshape = (MatShape) mat;
}
void MatShape_to_Mat(MatShape& matshape, cv::Mat& mat)
{
mat = cv::Mat(matshape, true);
}
void Mat_to_vector_size_t(cv::Mat& mat, std::vector<size_t>& v_size_t)
{
v_size_t.clear();
CHECK_MAT(mat.type()==CV_32SC1 && mat.cols==1);
v_size_t = (std::vector<size_t>) mat;
}
void vector_size_t_to_Mat(std::vector<size_t>& v_size_t, cv::Mat& mat)
{
mat = cv::Mat(v_size_t, true);
}
std::vector<MatShape> List_to_vector_MatShape(JNIEnv* env, jobject list)
{
static jclass juArrayList = ARRAYLIST(env);
jmethodID m_size = LIST_SIZE(env, juArrayList);
jmethodID m_get = LIST_GET(env, juArrayList);
static jclass jMatOfInt = MATOFINT(env);
jint len = env->CallIntMethod(list, m_size);
std::vector<MatShape> result;
result.reserve(len);
for (jint i=0; i<len; i++)
{
jobject element = static_cast<jobject>(env->CallObjectMethod(list, m_get, i));
cv::Mat& mat = *((cv::Mat*) GETNATIVEOBJ(env, jMatOfInt, element) );
MatShape matshape = (MatShape) mat;
result.push_back(matshape);
env->DeleteLocalRef(element);
}
return result;
}
jobject vector_Ptr_Layer_to_List(JNIEnv* env, std::vector<cv::Ptr<cv::dnn::Layer> >& vs)
{
static jclass juArrayList = ARRAYLIST(env);
static jmethodID m_create = CONSTRUCTOR(env, juArrayList);
jmethodID m_add = LIST_ADD(env, juArrayList);
static jclass jLayerClass = LAYER(env);
static jmethodID m_create_layer = LAYER_CONSTRUCTOR(env, jLayerClass);
jobject result = env->NewObject(juArrayList, m_create, vs.size());
for (std::vector< cv::Ptr<cv::dnn::Layer> >::iterator it = vs.begin(); it != vs.end(); ++it) {
jobject element = env->NewObject(jLayerClass, m_create_layer, (*it).get());
env->CallBooleanMethod(result, m_add, element);
env->DeleteLocalRef(element);
}
return result;
}
std::vector<cv::Ptr<cv::dnn::Layer> > List_to_vector_Ptr_Layer(JNIEnv* env, jobject list)
{
static jclass juArrayList = ARRAYLIST(env);
jmethodID m_size = LIST_SIZE(env, juArrayList);
jmethodID m_get = LIST_GET(env, juArrayList);
static jclass jLayerClass = LAYER(env);
jint len = env->CallIntMethod(list, m_size);
std::vector< cv::Ptr<cv::dnn::Layer> > result;
result.reserve(len);
for (jint i=0; i<len; i++)
{
jobject element = static_cast<jobject>(env->CallObjectMethod(list, m_get, i));
cv::Ptr<cv::dnn::Layer>* layer_ptr = (cv::Ptr<cv::dnn::Layer>*) GETNATIVEOBJ(env, jLayerClass, element) ;
cv::Ptr<cv::dnn::Layer> layer = *(layer_ptr);
result.push_back(layer);
env->DeleteLocalRef(element);
}
return result;
}
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html
// Author: abratchik
#ifndef DNN_CONVERTERS_HPP
#define DNN_CONVERTERS_HPP
#include <jni.h>
#include "opencv2/java.hpp"
#include "opencv2/core.hpp"
#include "opencv2/dnn/dnn.hpp"
#define LAYER(ENV) static_cast<jclass>(ENV->NewGlobalRef(ENV->FindClass("org/opencv/dnn/Layer")))
#define LAYER_CONSTRUCTOR(ENV, CLS) ENV->GetMethodID(CLS, "<init>", "(J)V")
using namespace cv::dnn;
void Mat_to_MatShape(cv::Mat& mat, MatShape& matshape);
void MatShape_to_Mat(MatShape& matshape, cv::Mat& mat);
void Mat_to_vector_size_t(cv::Mat& mat, std::vector<size_t>& v_size_t);
void vector_size_t_to_Mat(std::vector<size_t>& v_size_t, cv::Mat& mat);
std::vector<MatShape> List_to_vector_MatShape(JNIEnv* env, jobject list);
jobject vector_Ptr_Layer_to_List(JNIEnv* env, std::vector<cv::Ptr<cv::dnn::Layer> >& vs);
std::vector<cv::Ptr<cv::dnn::Layer> > List_to_vector_Ptr_Layer(JNIEnv* env, jobject list);
#endif /* DNN_CONVERTERS_HPP */
package org.opencv.test.dnn;
import java.io.File;
import java.util.ArrayList;
import java.util.List;
import org.opencv.core.Core;
import org.opencv.core.Mat;
import org.opencv.core.Scalar;
import org.opencv.core.Size;
import org.opencv.dnn.DictValue;
import org.opencv.dnn.Dnn;
import org.opencv.dnn.Importer;
import org.opencv.dnn.Layer;
import org.opencv.dnn.Net;
import org.opencv.imgcodecs.Imgcodecs;
import org.opencv.imgproc.Imgproc;
import org.opencv.test.OpenCVTestCase;
public class DnnTensorFlowTest extends OpenCVTestCase {
private final static String ENV_OPENCV_DNN_TEST_DATA_PATH = "OPENCV_DNN_TEST_DATA_PATH";
private final static String ENV_OPENCV_TEST_DATA_PATH = "OPENCV_TEST_DATA_PATH";
String modelFileName = "";
String sourceImageFile = "";
Net net;
@Override
protected void setUp() throws Exception {
super.setUp();
String envDnnTestDataPath = System.getenv(ENV_OPENCV_DNN_TEST_DATA_PATH);
if(envDnnTestDataPath == null){
isTestCaseEnabled = false;
return;
}
File dnnTestDataPath = new File(envDnnTestDataPath);
modelFileName = new File(dnnTestDataPath, "dnn/tensorflow_inception_graph.pb").toString();
String envTestDataPath = System.getenv(ENV_OPENCV_TEST_DATA_PATH);
if(envTestDataPath == null) throw new Exception(ENV_OPENCV_TEST_DATA_PATH + " has to be defined!");
File testDataPath = new File(envTestDataPath);
File f = new File(testDataPath, "dnn/space_shuttle.jpg");
sourceImageFile = f.toString();
if(!f.exists()) throw new Exception("Test image is missing: " + sourceImageFile);
net = new Net();
if(net.empty()) {
Importer importer = Dnn.createTensorflowImporter(modelFileName);
importer.populateNet(net);
}
}
public void testGetLayerTypes() {
List<String> layertypes = new ArrayList();
net.getLayerTypes(layertypes);
assertFalse("No layer types returned!", layertypes.isEmpty());
}
public void testGetLayer() {
List<String> layernames = net.getLayerNames();
assertFalse("Test net returned no layers!", layernames.isEmpty());
String testLayerName = layernames.get(0);
DictValue layerId = new DictValue(testLayerName);
assertEquals("DictValue did not return the string, which was used in constructor!", testLayerName, layerId.getStringValue());
Layer layer = net.getLayer(layerId);
assertEquals("Layer name does not match the expected value!", testLayerName, layer.get_name());
}
public void testTestNetForward() {
Mat rawImage = Imgcodecs.imread(sourceImageFile);
assertNotNull("Loading image from file failed!", rawImage);
Mat image = new Mat();
Imgproc.resize(rawImage, image, new Size(224,224));
Mat inputBlob = Dnn.blobFromImage(image);
assertNotNull("Converting image to blob failed!", inputBlob);
Mat inputBlobP = new Mat();
Core.subtract(inputBlob, new Scalar(117.0), inputBlobP);
net.setInput(inputBlobP, "input" );
Mat result = net.forward();
assertNotNull("Net returned no result!", result);
Core.MinMaxLocResult minmax = Core.minMaxLoc(result.reshape(1, 1));
assertTrue("No image recognized!", minmax.maxVal > 0.9);
}
}
......@@ -1539,9 +1539,7 @@ void Net::deleteLayer(LayerId)
Ptr<Layer> Net::getLayer(LayerId layerId)
{
LayerData &ld = impl->getLayerData(layerId);
if (!ld.layerInstance)
CV_Error(Error::StsNullPtr, format("Requested layer \"%s\" was not initialized", ld.name.c_str()));
return ld.layerInstance;
return ld.getLayerInstance();
}
std::vector<Ptr<Layer> > Net::getLayerInputs(LayerId layerId)
......
......@@ -386,7 +386,7 @@ endif(ANDROID)
# workarounding lack of `__attribute__ ((visibility("default")))` in jni_md.h/JNIEXPORT
string(REPLACE "-fvisibility=hidden" "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}")
ocv_warnings_disable(CMAKE_CXX_FLAGS -Wunused-const-variable)
ocv_warnings_disable(CMAKE_CXX_FLAGS -Wunused-const-variable -Wundef)
ocv_add_library(${the_module} SHARED ${handwritten_h_sources} ${handwritten_cpp_sources} ${generated_cpp_sources}
${copied_files}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册