提交 af61a158 编写于 作者: D Dmitry Kurtaev

Fix Darknet eltwise

上级 adbd6136
...@@ -420,7 +420,7 @@ namespace cv { ...@@ -420,7 +420,7 @@ namespace cv {
if (alpha != 1) if (alpha != 1)
{ {
std::vector<float> coeffs(2, 1); std::vector<float> coeffs(2, 1);
coeffs[1] = alpha; coeffs[0] = alpha;
shortcut_param.set("coeff", DictValue::arrayReal<float*>(&coeffs[0], coeffs.size())); shortcut_param.set("coeff", DictValue::arrayReal<float*>(&coeffs[0], coeffs.size()));
} }
...@@ -431,8 +431,8 @@ namespace cv { ...@@ -431,8 +431,8 @@ namespace cv {
lp.layer_name = layer_name; lp.layer_name = layer_name;
lp.layer_type = shortcut_param.type; lp.layer_type = shortcut_param.type;
lp.layerParams = shortcut_param; lp.layerParams = shortcut_param;
lp.bottom_indexes.push_back(fused_layer_names.at(from));
lp.bottom_indexes.push_back(last_layer); lp.bottom_indexes.push_back(last_layer);
lp.bottom_indexes.push_back(fused_layer_names.at(from));
last_layer = layer_name; last_layer = layer_name;
net->layers.push_back(lp); net->layers.push_back(lp);
...@@ -599,7 +599,7 @@ namespace cv { ...@@ -599,7 +599,7 @@ namespace cv {
current_channels = 0; current_channels = 0;
for (size_t k = 0; k < layers_vec.size(); ++k) { for (size_t k = 0; k < layers_vec.size(); ++k) {
layers_vec[k] = layers_vec[k] > 0 ? layers_vec[k] : (layers_vec[k] + layers_counter); layers_vec[k] = layers_vec[k] >= 0 ? layers_vec[k] : (layers_vec[k] + layers_counter);
current_channels += net->out_channels_vec[layers_vec[k]]; current_channels += net->out_channels_vec[layers_vec[k]];
} }
...@@ -645,8 +645,6 @@ namespace cv { ...@@ -645,8 +645,6 @@ namespace cv {
int from = std::atoi(bottom_layer.c_str()); int from = std::atoi(bottom_layer.c_str());
from = from < 0 ? from + layers_counter : from; from = from < 0 ? from + layers_counter : from;
current_channels = net->out_channels_vec[from];
setParams.setShortcut(from, alpha); setParams.setShortcut(from, alpha);
} }
else if (layer_type == "upsample") else if (layer_type == "upsample")
......
...@@ -114,11 +114,11 @@ public: ...@@ -114,11 +114,11 @@ public:
CV_Assert(op == SUM || coeffs.size() == 0); CV_Assert(op == SUM || coeffs.size() == 0);
int dims = inputs[0].size(); int dims = inputs[0].size();
// Number of channels in output shape is determined by the first input tensor.
int numChannels = inputs[0][1]; int numChannels = inputs[0][1];
for (int i = 1; i < inputs.size(); i++) for (int i = 1; i < inputs.size(); i++)
{ {
CV_Assert(inputs[0][0] == inputs[i][0]); CV_Assert(inputs[0][0] == inputs[i][0]);
numChannels = std::max(numChannels, inputs[i][1]);
// It's allowed for channels axis to be different. // It's allowed for channels axis to be different.
for (int j = 2; j < dims; j++) for (int j = 2; j < dims; j++)
...@@ -177,7 +177,7 @@ public: ...@@ -177,7 +177,7 @@ public:
CV_Assert(srcs[i].type() == dst.type() && CV_Assert(srcs[i].type() == dst.type() &&
srcs[i].isContinuous()); srcs[i].isContinuous());
// Sort srcs and coefficients in the order by number of channels // Sort srcs and coefficients in the order by number of channels
for( int j = i - 1; j >= 1 && p.srcs[j - 1]->size[1] < p.srcs[j]->size[1]; j++ ) for( int j = i; j >= 1 && p.srcs[j - 1]->size[1] < p.srcs[j]->size[1]; j-- )
{ {
std::swap(p.srcs[j - 1], p.srcs[j]); std::swap(p.srcs[j - 1], p.srcs[j]);
if (!p.coeffs.empty()) if (!p.coeffs.empty())
......
...@@ -445,6 +445,8 @@ TEST_P(Test_Darknet_layers, shortcut) ...@@ -445,6 +445,8 @@ TEST_P(Test_Darknet_layers, shortcut)
{ {
testDarknetLayer("shortcut"); testDarknetLayer("shortcut");
testDarknetLayer("shortcut_leaky"); testDarknetLayer("shortcut_leaky");
testDarknetLayer("shortcut_unequal");
testDarknetLayer("shortcut_unequal_2");
} }
TEST_P(Test_Darknet_layers, upsample) TEST_P(Test_Darknet_layers, upsample)
......
...@@ -1506,7 +1506,7 @@ TEST_P(Layer_Test_Eltwise_unequal, Accuracy) ...@@ -1506,7 +1506,7 @@ TEST_P(Layer_Test_Eltwise_unequal, Accuracy)
const int inpShapes[][4] = {{1, 4, 2, 2}, {1, 5, 2, 2}, {1, 3, 2, 2}}; const int inpShapes[][4] = {{1, 4, 2, 2}, {1, 5, 2, 2}, {1, 3, 2, 2}};
std::vector<String> inpNames(3); std::vector<String> inpNames(3);
std::vector<Mat> inputs(3); std::vector<Mat> inputs(3);
size_t numValues = 0; size_t numOutValues = 1*4*2*2; // By the first input
std::vector<float> weights(3, 1); std::vector<float> weights(3, 1);
if (weighted) if (weighted)
...@@ -1520,18 +1520,20 @@ TEST_P(Layer_Test_Eltwise_unequal, Accuracy) ...@@ -1520,18 +1520,20 @@ TEST_P(Layer_Test_Eltwise_unequal, Accuracy)
for (int i = 0; i < inputs.size(); ++i) for (int i = 0; i < inputs.size(); ++i)
{ {
inputs[i].create(4, inpShapes[i], CV_32F); inputs[i].create(4, inpShapes[i], CV_32F);
numValues = std::max(numValues, inputs[i].total());
randu(inputs[i], 0, 255); randu(inputs[i], 0, 255);
inpNames[i] = format("input_%d", i); inpNames[i] = format("input_%d", i);
net.connect(0, i, eltwiseId, i); net.connect(0, i, eltwiseId, i);
} }
Mat ref(1, numValues, CV_32F, Scalar(0)); Mat ref(1, numOutValues, CV_32F, Scalar(0));
net.setInputsNames(inpNames); net.setInputsNames(inpNames);
for (int i = 0; i < inputs.size(); ++i) for (int i = 0; i < inputs.size(); ++i)
{ {
net.setInput(inputs[i], inpNames[i]); net.setInput(inputs[i], inpNames[i]);
ref.colRange(0, inputs[i].total()) += weights[i] * inputs[i].reshape(1, 1); if (numOutValues >= inputs[i].total())
ref.colRange(0, inputs[i].total()) += weights[i] * inputs[i].reshape(1, 1);
else
ref += weights[i] * inputs[i].reshape(1, 1).colRange(0, numOutValues);
} }
net.setPreferableBackend(backendId); net.setPreferableBackend(backendId);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册