提交 38b6387b 编写于 作者: N nihuini

ncnnoptimize and ncnn2mem now accepts weight-less custom layer

上级 e93ad408
......@@ -147,10 +147,10 @@ protected:
#if NCNN_STRING
int find_blob_index_by_name(const char* name) const;
int find_layer_index_by_name(const char* name) const;
int custom_layer_to_index(const char* type);
Layer* create_custom_layer(const char* type);
virtual int custom_layer_to_index(const char* type);
virtual Layer* create_custom_layer(const char* type);
#endif // NCNN_STRING
Layer* create_custom_layer(int index);
virtual Layer* create_custom_layer(int index);
int forward_layer(int layer_index, std::vector<Mat>& blob_mats, const Option& opt) const;
#if NCNN_VULKAN
......
......@@ -30,6 +30,11 @@ ParamDict::ParamDict()
clear();
}
int ParamDict::type(int id) const
{
return params[id].type;
}
// TODO strict type check
int ParamDict::get(int id, int def) const
{
......
......@@ -30,6 +30,9 @@ public:
// empty
ParamDict();
// get type
int type(int id) const;
// get int
int get(int id, int def) const;
// get float
......
......@@ -13,6 +13,7 @@
// specific language governing permissions and limitations under the License.
#include "layer.h"
#include "layer_type.h"
#include <cstddef>
#include <ctype.h>
......@@ -120,6 +121,8 @@ static int dump_param(const char* parampath, const char* parambinpath, const cha
layer_names.resize(layer_count);
blob_names.resize(blob_count);
std::vector<std::string> custom_layer_index;
int blob_index = 0;
for (int i = 0; i < layer_count; i++)
{
......@@ -137,6 +140,26 @@ static int dump_param(const char* parampath, const char* parambinpath, const cha
sanitize_name(layer_name);
int typeindex = ncnn::layer_to_index(layer_type);
if (typeindex == -1)
{
// lookup custom_layer_index
for (size_t j = 0; j < custom_layer_index.size(); j++)
{
if (custom_layer_index[j] == layer_type)
{
typeindex = ncnn::LayerType::CustomBit | j;
break;
}
}
if (typeindex == -1)
{
// new custom layer type
size_t j = custom_layer_index.size();
custom_layer_index.push_back(layer_type);
typeindex = ncnn::LayerType::CustomBit | j;
}
}
fwrite(&typeindex, sizeof(int), 1, mp);
fwrite(&bottom_count, sizeof(int), 1, mp);
......@@ -263,6 +286,17 @@ static int dump_param(const char* parampath, const char* parambinpath, const cha
layer_names[i] = std::string(layer_name);
}
// dump custom layer index
for (size_t j = 0; j < custom_layer_index.size(); j++)
{
const std::string& layer_type = custom_layer_index[j];
int typeindex = ncnn::LayerType::CustomBit | j;
fprintf(ip, "const int TYPEINDEX_%s = %d;\n", layer_type.c_str(), typeindex);
fprintf(stderr, "net.register_custom_layer(%s_id::TYPEINDEX_%s, %s_layer_creator);\n", param_var.c_str(), layer_type.c_str(), layer_type.c_str());
}
fprintf(ip, "} // namespace %s_id\n", param_var.c_str());
fprintf(ip, "#endif // NCNN_INCLUDE_GUARD_%s\n", include_guard_var.c_str());
......
......@@ -24,6 +24,7 @@
// ncnn public header
#include "datareader.h"
#include "layer.h"
#include "layer_type.h"
#include "net.h"
// ncnn private header
......@@ -134,8 +135,71 @@ public:
std::map<void*, size_t> bookkeeper;
};
class CustomLayer : public ncnn::Layer
{
public:
virtual int load_param(const ncnn::ParamDict& pd)
{
mpd = pd;
return 0;
}
void write_param(FILE* pp)
{
for (int i = 0; i < NCNN_MAX_PARAM_COUNT; i++)
{
int type = mpd.type(i);
if (type == 0)
continue;
if (type == 2)
{
fprintf(pp, " %d=%d", i, mpd.get(i, 0));
}
if (type == 3)
{
fprintf(pp, " %d=%e", i, mpd.get(i, 0.f));
}
if (type == 5)
{
ncnn::Mat v = mpd.get(i, ncnn::Mat());
int len = v.w;
fprintf(pp, " %d=%d", -i - 23300, len);
const int* p = v;
for (int j = 0; j < len; j++)
{
fprintf(pp, ",%d", p[j]);
}
}
if (type == 6)
{
ncnn::Mat v = mpd.get(i, ncnn::Mat());
int len = v.w;
fprintf(pp, " %d=%d", -i - 23300, len);
const float* p = v;
for (int j = 0; j < len; j++)
{
fprintf(pp, ",%e", p[j]);
}
}
}
}
public:
ncnn::ParamDict mpd;
};
class NetOptimize : public ncnn::Net
{
public:
NetOptimize();
virtual int custom_layer_to_index(const char* type);
virtual ncnn::Layer* create_custom_layer(const char* type);
virtual ncnn::Layer* create_custom_layer(int index);
int custom_layer_index;
public:
// 0=fp32 1=fp16
int storage_type;
......@@ -190,6 +254,51 @@ public:
int save(const char* parampath, const char* binpath);
};
NetOptimize::NetOptimize()
{
custom_layer_index = 0;
}
int NetOptimize::custom_layer_to_index(const char* type)
{
int index = Net::custom_layer_to_index(type);
if (index != -1)
return index;
fprintf(stderr, "custom_layer_to_index %s\n", type);
index = ncnn::LayerType::CustomBit | custom_layer_index;
custom_layer_index++;
return index;
}
ncnn::Layer* NetOptimize::create_custom_layer(const char* type)
{
ncnn::Layer* layer = Net::create_custom_layer(type);
if (layer)
return layer;
fprintf(stderr, "create_custom_layer %s\n", type);
layer = new CustomLayer;
layer->type = type;
layer->typeindex = custom_layer_to_index(type);
return layer;
}
ncnn::Layer* NetOptimize::create_custom_layer(int index)
{
ncnn::Layer* layer = Net::create_custom_layer(index);
if (layer)
return layer;
fprintf(stderr, "create_custom_layer %d\n", index);
layer = new CustomLayer;
layer->typeindex = index;
return layer;
}
int NetOptimize::fuse_batchnorm_scale()
{
const size_t layer_count = layers.size();
......@@ -2650,6 +2759,12 @@ int NetOptimize::replace_convolution_with_innerproduct_after_innerproduct()
int NetOptimize::shape_inference()
{
if (custom_layer_index)
{
fprintf(stderr, "model has %d custom layer, shape_inference aborted\n", custom_layer_index);
return -1;
}
const size_t layer_count = layers.size();
const size_t blob_count = blobs.size();
......@@ -2763,6 +2878,12 @@ int NetOptimize::shape_inference()
int NetOptimize::estimate_memory_footprint()
{
if (custom_layer_index)
{
fprintf(stderr, "model has %d custom layer, estimate_memory_footprint aborted\n", custom_layer_index);
return -1;
}
const size_t layer_count = layers.size();
const size_t blob_count = blobs.size();
......@@ -3001,6 +3122,16 @@ int NetOptimize::save(const char* parampath, const char* binpath)
}
}
// custom op
if (layer->typeindex & ncnn::LayerType::CustomBit)
{
((CustomLayer*)layer)->write_param(pp);
fprintf(pp, "\n");
continue;
}
ncnn::Layer* layer_default = ncnn::create_layer(layer->typeindex);
ncnn::ParamDict pd;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册