From 38b6387bc50b618993f216d563651866a1611eaa Mon Sep 17 00:00:00 2001 From: nihuini Date: Thu, 17 Dec 2020 15:54:33 +0800 Subject: [PATCH] ncnnoptimize and ncnn2mem now accepts weight-less custom layer --- src/net.h | 6 +- src/paramdict.cpp | 5 ++ src/paramdict.h | 3 + tools/ncnn2mem.cpp | 34 +++++++++++ tools/ncnnoptimize.cpp | 131 +++++++++++++++++++++++++++++++++++++++++ 5 files changed, 176 insertions(+), 3 deletions(-) diff --git a/src/net.h b/src/net.h index 896e6e59..a7c05bd0 100644 --- a/src/net.h +++ b/src/net.h @@ -147,10 +147,10 @@ protected: #if NCNN_STRING int find_blob_index_by_name(const char* name) const; int find_layer_index_by_name(const char* name) const; - int custom_layer_to_index(const char* type); - Layer* create_custom_layer(const char* type); + virtual int custom_layer_to_index(const char* type); + virtual Layer* create_custom_layer(const char* type); #endif // NCNN_STRING - Layer* create_custom_layer(int index); + virtual Layer* create_custom_layer(int index); int forward_layer(int layer_index, std::vector& blob_mats, const Option& opt) const; #if NCNN_VULKAN diff --git a/src/paramdict.cpp b/src/paramdict.cpp index 1614df10..8f9dedb5 100644 --- a/src/paramdict.cpp +++ b/src/paramdict.cpp @@ -30,6 +30,11 @@ ParamDict::ParamDict() clear(); } +int ParamDict::type(int id) const +{ + return params[id].type; +} + // TODO strict type check int ParamDict::get(int id, int def) const { diff --git a/src/paramdict.h b/src/paramdict.h index 7809a6b2..db6f757e 100644 --- a/src/paramdict.h +++ b/src/paramdict.h @@ -30,6 +30,9 @@ public: // empty ParamDict(); + // get type + int type(int id) const; + // get int int get(int id, int def) const; // get float diff --git a/tools/ncnn2mem.cpp b/tools/ncnn2mem.cpp index b9e1ccad..a5147217 100644 --- a/tools/ncnn2mem.cpp +++ b/tools/ncnn2mem.cpp @@ -13,6 +13,7 @@ // specific language governing permissions and limitations under the License. #include "layer.h" +#include "layer_type.h" #include #include @@ -120,6 +121,8 @@ static int dump_param(const char* parampath, const char* parambinpath, const cha layer_names.resize(layer_count); blob_names.resize(blob_count); + std::vector custom_layer_index; + int blob_index = 0; for (int i = 0; i < layer_count; i++) { @@ -137,6 +140,26 @@ static int dump_param(const char* parampath, const char* parambinpath, const cha sanitize_name(layer_name); int typeindex = ncnn::layer_to_index(layer_type); + if (typeindex == -1) + { + // lookup custom_layer_index + for (size_t j = 0; j < custom_layer_index.size(); j++) + { + if (custom_layer_index[j] == layer_type) + { + typeindex = ncnn::LayerType::CustomBit | j; + break; + } + } + + if (typeindex == -1) + { + // new custom layer type + size_t j = custom_layer_index.size(); + custom_layer_index.push_back(layer_type); + typeindex = ncnn::LayerType::CustomBit | j; + } + } fwrite(&typeindex, sizeof(int), 1, mp); fwrite(&bottom_count, sizeof(int), 1, mp); @@ -263,6 +286,17 @@ static int dump_param(const char* parampath, const char* parambinpath, const cha layer_names[i] = std::string(layer_name); } + // dump custom layer index + for (size_t j = 0; j < custom_layer_index.size(); j++) + { + const std::string& layer_type = custom_layer_index[j]; + int typeindex = ncnn::LayerType::CustomBit | j; + + fprintf(ip, "const int TYPEINDEX_%s = %d;\n", layer_type.c_str(), typeindex); + + fprintf(stderr, "net.register_custom_layer(%s_id::TYPEINDEX_%s, %s_layer_creator);\n", param_var.c_str(), layer_type.c_str(), layer_type.c_str()); + } + fprintf(ip, "} // namespace %s_id\n", param_var.c_str()); fprintf(ip, "#endif // NCNN_INCLUDE_GUARD_%s\n", include_guard_var.c_str()); diff --git a/tools/ncnnoptimize.cpp b/tools/ncnnoptimize.cpp index 84fe748c..f135726a 100644 --- a/tools/ncnnoptimize.cpp +++ b/tools/ncnnoptimize.cpp @@ -24,6 +24,7 @@ // ncnn public header #include "datareader.h" #include "layer.h" +#include "layer_type.h" #include "net.h" // ncnn private header @@ -134,8 +135,71 @@ public: std::map bookkeeper; }; +class CustomLayer : public ncnn::Layer +{ +public: + virtual int load_param(const ncnn::ParamDict& pd) + { + mpd = pd; + return 0; + } + + void write_param(FILE* pp) + { + for (int i = 0; i < NCNN_MAX_PARAM_COUNT; i++) + { + int type = mpd.type(i); + if (type == 0) + continue; + + if (type == 2) + { + fprintf(pp, " %d=%d", i, mpd.get(i, 0)); + } + if (type == 3) + { + fprintf(pp, " %d=%e", i, mpd.get(i, 0.f)); + } + if (type == 5) + { + ncnn::Mat v = mpd.get(i, ncnn::Mat()); + int len = v.w; + fprintf(pp, " %d=%d", -i - 23300, len); + const int* p = v; + for (int j = 0; j < len; j++) + { + fprintf(pp, ",%d", p[j]); + } + } + if (type == 6) + { + ncnn::Mat v = mpd.get(i, ncnn::Mat()); + int len = v.w; + fprintf(pp, " %d=%d", -i - 23300, len); + const float* p = v; + for (int j = 0; j < len; j++) + { + fprintf(pp, ",%e", p[j]); + } + } + } + } + +public: + ncnn::ParamDict mpd; +}; + class NetOptimize : public ncnn::Net { +public: + NetOptimize(); + + virtual int custom_layer_to_index(const char* type); + virtual ncnn::Layer* create_custom_layer(const char* type); + virtual ncnn::Layer* create_custom_layer(int index); + + int custom_layer_index; + public: // 0=fp32 1=fp16 int storage_type; @@ -190,6 +254,51 @@ public: int save(const char* parampath, const char* binpath); }; +NetOptimize::NetOptimize() +{ + custom_layer_index = 0; +} + +int NetOptimize::custom_layer_to_index(const char* type) +{ + int index = Net::custom_layer_to_index(type); + if (index != -1) + return index; + + fprintf(stderr, "custom_layer_to_index %s\n", type); + + index = ncnn::LayerType::CustomBit | custom_layer_index; + custom_layer_index++; + return index; +} + +ncnn::Layer* NetOptimize::create_custom_layer(const char* type) +{ + ncnn::Layer* layer = Net::create_custom_layer(type); + if (layer) + return layer; + + fprintf(stderr, "create_custom_layer %s\n", type); + + layer = new CustomLayer; + layer->type = type; + layer->typeindex = custom_layer_to_index(type); + return layer; +} + +ncnn::Layer* NetOptimize::create_custom_layer(int index) +{ + ncnn::Layer* layer = Net::create_custom_layer(index); + if (layer) + return layer; + + fprintf(stderr, "create_custom_layer %d\n", index); + + layer = new CustomLayer; + layer->typeindex = index; + return layer; +} + int NetOptimize::fuse_batchnorm_scale() { const size_t layer_count = layers.size(); @@ -2650,6 +2759,12 @@ int NetOptimize::replace_convolution_with_innerproduct_after_innerproduct() int NetOptimize::shape_inference() { + if (custom_layer_index) + { + fprintf(stderr, "model has %d custom layer, shape_inference aborted\n", custom_layer_index); + return -1; + } + const size_t layer_count = layers.size(); const size_t blob_count = blobs.size(); @@ -2763,6 +2878,12 @@ int NetOptimize::shape_inference() int NetOptimize::estimate_memory_footprint() { + if (custom_layer_index) + { + fprintf(stderr, "model has %d custom layer, estimate_memory_footprint aborted\n", custom_layer_index); + return -1; + } + const size_t layer_count = layers.size(); const size_t blob_count = blobs.size(); @@ -3001,6 +3122,16 @@ int NetOptimize::save(const char* parampath, const char* binpath) } } + // custom op + if (layer->typeindex & ncnn::LayerType::CustomBit) + { + ((CustomLayer*)layer)->write_param(pp); + + fprintf(pp, "\n"); + + continue; + } + ncnn::Layer* layer_default = ncnn::create_layer(layer->typeindex); ncnn::ParamDict pd; -- GitLab