未验证 提交 5ee7abbe 编写于 作者: A Alexander Alekhin 提交者: GitHub

Merge pull request #16088 from alalek:dnn_eltwise_layer_different_src_channels

dnn(eltwise): fix handling of different number of channels

* dnn(test): reproducer for Eltwise layer issue from PR16063

* dnn(eltwise): rework support for inputs with different channels

* dnn(eltwise): get rid of finalize(), variableChannels

* dnn(eltwise): update input sorting by number of channels

- do not swap inputs if number of channels are same after truncation

* dnn(test): skip "shortcut" with batch size 2 on MYRIAD targets
上级 f2cce5fd
......@@ -58,6 +58,7 @@ ocv_warnings_disable(CMAKE_CXX_FLAGS
/wd4456 /wd4510 /wd4610 /wd4800
/wd4701 /wd4703 # potentially uninitialized local/pointer variable 'value' used
/wd4505 # unreferenced local function has been removed
/wd4458 # declaration of 'x' hides class member. GCC still works, MSVC bug is here: https://developercommunity.visualstudio.com/content/problem/219311/c-c4458-declaration-hides-class-member-warning-iss.html
-wd858 -wd2196
-Winvalid-offsetof # Apple Clang (attr_value.pb.cc)
)
......
......@@ -508,6 +508,13 @@ CV__DNN_EXPERIMENTAL_NS_BEGIN
static Ptr<Layer> create(const LayerParams &params);
};
/** @brief Element wise operation on inputs
Extra optional parameters:
- "operation" as string. Values are "sum" (default), "prod", "max", "div"
- "coeff" as float array. Specify weights of inputs for SUM operation
- "output_channels_mode" as string. Values are "same" (default, all input must have the same layout), "input_0", "input_0_truncate", "max_input_channels"
*/
class CV_EXPORTS EltwiseLayer : public Layer
{
public:
......
......@@ -425,6 +425,7 @@ namespace cv {
}
shortcut_param.set<std::string>("op", "sum");
shortcut_param.set<std::string>("output_channels_mode", "input_0_truncate");
darknet::LayerParameter lp;
std::string layer_name = cv::format("shortcut_%d", layer_id);
......
......@@ -99,6 +99,7 @@ class Test_Darknet_layers : public DNNTestLayer
public:
void testDarknetLayer(const std::string& name, bool hasWeights = false)
{
SCOPED_TRACE(name);
Mat inp = blobFromNPY(findDataFile("dnn/darknet/" + name + "_in.npy"));
Mat ref = blobFromNPY(findDataFile("dnn/darknet/" + name + "_out.npy"));
......@@ -115,6 +116,47 @@ public:
net.setInput(inp);
Mat out = net.forward();
normAssert(out, ref, "", default_l1, default_lInf);
if (inp.size[0] == 1) // test handling of batch size
{
SCOPED_TRACE("batch size 2");
#if defined(INF_ENGINE_RELEASE)
if (target == DNN_TARGET_MYRIAD && name == "shortcut")
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD);
#endif
std::vector<int> sz2 = shape(inp);
sz2[0] = 2;
Net net2 = readNet(cfg, model);
net2.setPreferableBackend(backend);
net2.setPreferableTarget(target);
Range ranges0[4] = { Range(0, 1), Range::all(), Range::all(), Range::all() };
Range ranges1[4] = { Range(1, 2), Range::all(), Range::all(), Range::all() };
Mat inp2(sz2, inp.type(), Scalar::all(0));
inp.copyTo(inp2(ranges0));
inp.copyTo(inp2(ranges1));
net2.setInput(inp2);
Mat out2 = net2.forward();
EXPECT_EQ(0, cv::norm(out2(ranges0), out2(ranges1), NORM_INF)) << "Batch result is not equal: " << name;
Mat ref2 = ref;
if (ref.dims == 2 && out2.dims == 3)
{
int ref_3d_sizes[3] = {1, ref.rows, ref.cols};
ref2 = Mat(3, ref_3d_sizes, ref.type(), (void*)ref.data);
}
/*else if (ref.dims == 3 && out2.dims == 4)
{
int ref_4d_sizes[4] = {1, ref.size[0], ref.size[1], ref.size[2]};
ref2 = Mat(4, ref_4d_sizes, ref.type(), (void*)ref.data);
}*/
ASSERT_EQ(out2.dims, ref2.dims) << ref.dims;
normAssert(out2(ranges0), ref2, "", default_l1, default_lInf);
normAssert(out2(ranges1), ref2, "", default_l1, default_lInf);
}
}
};
......
......@@ -1582,30 +1582,91 @@ TEST(Layer_Test_Convolution, relu_fusion)
}
typedef testing::TestWithParam<tuple<bool, tuple<Backend, Target> > > Layer_Test_Eltwise_unequal;
TEST_P(Layer_Test_Eltwise_unequal, Accuracy)
TEST_P(Layer_Test_Eltwise_unequal, accuracy_input_0_truncate)
{
bool weighted = get<0>(GetParam());
int backendId = get<0>(get<1>(GetParam()));
int targetId = get<1>(get<1>(GetParam()));
if (backendId == DNN_BACKEND_OPENCV && targetId == DNN_TARGET_OPENCL_FP16)
applyTestTag(CV_TEST_TAG_DNN_SKIP_OPENCL_FP16);
Net net;
LayerParams lp;
lp.type = "Eltwise";
lp.name = "testLayer";
lp.set<std::string>("output_channels_mode", "input_0_truncate");
const int inpShapes[][4] = {{1, 4, 2, 2}, {1, 5, 2, 2}, {1, 3, 2, 2}};
const int out_channels = inpShapes[0][1];
std::vector<String> inpNames(3);
std::vector<Mat> inputs(3);
std::vector<float> weights(3, 1);
if (weighted)
{
for (int i = 0; i < inputs.size(); ++i)
weights[i] = -0.125f + i * 0.25f;
lp.set("coeff", DictValue::arrayReal<float*>(&weights[0], weights.size()));
}
int eltwiseId = net.addLayer(lp.name, lp.type, lp);
for (int i = 0; i < inputs.size(); ++i)
{
inputs[i].create(4, inpShapes[i], CV_32F);
size_t total = inputs[i].total();
for (size_t j = 0; j < total; j++)
inputs[i].ptr<float>()[j] = j + i * 100;
inpNames[i] = format("input_%d", i);
net.connect(0, i, eltwiseId, i);
}
Mat ref(4, inpShapes[0], CV_32F, Scalar(0));
net.setInputsNames(inpNames);
for (int i = 0; i < inputs.size(); ++i)
{
//std::cout << ref.reshape(1,1) << endl;
net.setInput(inputs[i], inpNames[i]);
for (size_t batchId = 0; batchId < ref.size[0]; batchId++)
{
int input_channels = inputs[i].size[1];
Range ranges[4] = { Range(batchId, batchId + 1), Range(0, std::min(out_channels, input_channels)), Range::all(), Range::all() };
Mat ref_slice = ref(ranges);
Mat input_slice = inputs[i](ranges);
ref_slice += weights[i] * input_slice;
}
}
net.setPreferableBackend(backendId);
net.setPreferableTarget(targetId);
Mat out = net.forward();
normAssert(out, ref);
if (testing::Test::HasFailure())
{
std::cout << out.reshape(1,1) << endl;
std::cout << ref.reshape(1,1) << endl;
}
}
TEST_P(Layer_Test_Eltwise_unequal, accuracy_input_0)
{
bool weighted = get<0>(GetParam());
int backendId = get<0>(get<1>(GetParam()));
int targetId = get<1>(get<1>(GetParam()));
Net net;
LayerParams lp;
lp.type = "Eltwise";
lp.name = "testLayer";
lp.set<std::string>("output_channels_mode", "input_0");
const int inpShapes[][4] = {{1, 4, 2, 2}, {1, 2, 2, 2}, {1, 3, 2, 2}};
const int out_channels = inpShapes[0][1];
std::vector<String> inpNames(3);
std::vector<Mat> inputs(3);
size_t numOutValues = 1*4*2*2; // By the first input
std::vector<float> weights(3, 1);
if (weighted)
{
for (int i = 0; i < inputs.size(); ++i)
randu(Mat(1, 1, CV_32F, &weights[i]), -1, 1);
weights[i] = -0.125f + i * 0.25f;
lp.set("coeff", DictValue::arrayReal<float*>(&weights[0], weights.size()));
}
......@@ -1613,27 +1674,40 @@ TEST_P(Layer_Test_Eltwise_unequal, Accuracy)
for (int i = 0; i < inputs.size(); ++i)
{
inputs[i].create(4, inpShapes[i], CV_32F);
randu(inputs[i], 0, 255);
size_t total = inputs[i].total();
for (size_t j = 0; j < total; j++)
inputs[i].ptr<float>()[j] = j + i * 100;
inpNames[i] = format("input_%d", i);
net.connect(0, i, eltwiseId, i);
}
Mat ref(1, numOutValues, CV_32F, Scalar(0));
Mat ref(4, inpShapes[0], CV_32F, Scalar(0));
net.setInputsNames(inpNames);
for (int i = 0; i < inputs.size(); ++i)
{
//std::cout << ref.reshape(1,1) << endl;
net.setInput(inputs[i], inpNames[i]);
if (numOutValues >= inputs[i].total())
ref.colRange(0, inputs[i].total()) += weights[i] * inputs[i].reshape(1, 1);
else
ref += weights[i] * inputs[i].reshape(1, 1).colRange(0, numOutValues);
for (size_t batchId = 0; batchId < ref.size[0]; batchId++)
{
int input_channels = inputs[i].size[1];
Range ranges[4] = { Range(batchId, batchId + 1), Range(0, std::min(out_channels, input_channels)), Range::all(), Range::all() };
Mat ref_slice = ref(ranges);
Mat input_slice = inputs[i](ranges);
ref_slice += weights[i] * input_slice;
}
}
net.setPreferableBackend(backendId);
net.setPreferableTarget(targetId);
Mat out = net.forward();
normAssert(out.reshape(1, 1), ref);
normAssert(out, ref);
if (testing::Test::HasFailure())
{
std::cout << out.reshape(1,1) << endl;
std::cout << ref.reshape(1,1) << endl;
}
}
INSTANTIATE_TEST_CASE_P(/**/, Layer_Test_Eltwise_unequal, Combine(
testing::Bool(),
dnnBackendsAndTargets()
......
......@@ -1368,7 +1368,8 @@ double norm(InputArray _src1, InputArray _src2, int normType, InputArray _mask)
int normType0 = normType;
normType = normType == NORM_L2SQR ? NORM_L2 : normType;
CV_Assert( src1.type() == src2.type() && src1.size == src2.size );
CV_CheckTypeEQ(src1.type(), src2.type(), "");
CV_Assert(src1.size == src2.size);
CV_Assert( mask.empty() || (src1.size == mask.size && mask.type() == CV_8U) );
CV_Assert( normType == NORM_INF || normType == NORM_L1 || normType == NORM_L2 );
const Mat *arrays[]={&src1, &src2, &mask, 0};
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册