未验证 提交 520736d0 编写于 作者: J Jason 提交者: GitHub

Merge pull request #61 from jiangjiajun/export_onnx

Export onnx
......@@ -3,3 +3,4 @@
本目录为PaddleX模型部署代码, 编译和使用的教程参考:
- [C++部署文档](../docs/deploy/deploy.md#C部署)
- [OpenVINO部署文档](../docs/deploy/deploy.md#openvino部署)
cmake_minimum_required(VERSION 3.0)
project(PaddleX CXX C)
option(WITH_STATIC_LIB "Compile demo with static/shared library, default use static." OFF)
SET(CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake" ${CMAKE_MODULE_PATH})
SET(OPENVINO_DIR "" CACHE PATH "Location of libraries")
SET(OPENCV_DIR "" CACHE PATH "Location of libraries")
SET(GFLAGS_DIR "" CACHE PATH "Location of libraries")
SET(NGRAPH_LIB "" CACHE PATH "Location of libraries")
include(cmake/yaml-cpp.cmake)
include_directories("${CMAKE_SOURCE_DIR}/")
link_directories("${CMAKE_CURRENT_BINARY_DIR}")
include_directories("${CMAKE_CURRENT_BINARY_DIR}/ext/yaml-cpp/src/ext-yaml-cpp/include")
link_directories("${CMAKE_CURRENT_BINARY_DIR}/ext/yaml-cpp/lib")
macro(safe_set_static_flag)
foreach(flag_var
CMAKE_CXX_FLAGS CMAKE_CXX_FLAGS_DEBUG CMAKE_CXX_FLAGS_RELEASE
CMAKE_CXX_FLAGS_MINSIZEREL CMAKE_CXX_FLAGS_RELWITHDEBINFO)
if(${flag_var} MATCHES "/MD")
string(REGEX REPLACE "/MD" "/MT" ${flag_var} "${${flag_var}}")
endif(${flag_var} MATCHES "/MD")
endforeach(flag_var)
endmacro()
if (NOT DEFINED OPENVINO_DIR OR ${OPENVINO_DIR} STREQUAL "")
message(FATAL_ERROR "please set OPENVINO_DIR with -DOPENVINO_DIR=/path/influence_engine")
endif()
if (NOT DEFINED OPENCV_DIR OR ${OPENCV_DIR} STREQUAL "")
message(FATAL_ERROR "please set OPENCV_DIR with -DOPENCV_DIR=/path/opencv")
endif()
if (NOT DEFINED GFLAGS_DIR OR ${GFLAGS_DIR} STREQUAL "")
message(FATAL_ERROR "please set GFLAGS_DIR with -DGFLAGS_DIR=/path/gflags")
endif()
if (NOT DEFINED NGRAPH_LIB OR ${NGRAPH_LIB} STREQUAL "")
message(FATAL_ERROR "please set NGRAPH_DIR with -DNGRAPH_DIR=/path/ngraph")
endif()
include_directories("${OPENVINO_DIR}")
link_directories("${OPENVINO_DIR}/lib")
include_directories("${OPENVINO_DIR}/include")
link_directories("${OPENVINO_DIR}/external/tbb/lib")
include_directories("${OPENVINO_DIR}/external/tbb/include/tbb")
link_directories("${GFLAGS_DIR}/lib")
include_directories("${GFLAGS_DIR}/include")
link_directories("${NGRAPH_LIB}")
link_directories("${NGRAPH_LIB}/lib")
if (WIN32)
find_package(OpenCV REQUIRED PATHS ${OPENCV_DIR}/build/ NO_DEFAULT_PATH)
unset(OpenCV_DIR CACHE)
else ()
find_package(OpenCV REQUIRED PATHS ${OPENCV_DIR}/share/OpenCV NO_DEFAULT_PATH)
endif ()
include_directories(${OpenCV_INCLUDE_DIRS})
if (WIN32)
add_definitions("/DGOOGLE_GLOG_DLL_DECL=")
set(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} /bigobj /MTd")
set(CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE} /bigobj /MT")
set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} /bigobj /MTd")
set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} /bigobj /MT")
if (WITH_STATIC_LIB)
safe_set_static_flag()
add_definitions(-DSTATIC_LIB)
endif()
else()
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -g -o2 -fopenmp -std=c++11")
set(CMAKE_STATIC_LIBRARY_PREFIX "")
endif()
if(WITH_STATIC_LIB)
set(DEPS ${OPENVINO_DIR}/lib/intel64/libinference_engine${CMAKE_STATIC_LIBRARY_SUFFIX})
set(DEPS ${DEPS} ${OPENVINO_DIR}/lib/intel64/libinference_engine_legacy${CMAKE_STATIC_LIBRARY_SUFFIX})
else()
set(DEPS ${OPENVINO_DIR}/lib/intel64/libinference_engine${CMAKE_SHARED_LIBRARY_SUFFIX})
set(DEPS ${DEPS} ${OPENVINO_DIR}/lib/intel64/libinference_engine_legacy${CMAKE_SHARED_LIBRARY_SUFFIX})
endif()
if (NOT WIN32)
set(DEPS ${DEPS}
glog gflags z yaml-cpp
)
else()
set(DEPS ${DEPS}
glog gflags_static libprotobuf zlibstatic xxhash libyaml-cppmt)
set(DEPS ${DEPS} libcmt shlwapi)
endif(NOT WIN32)
if (NOT WIN32)
set(EXTERNAL_LIB "-ldl -lrt -lgomp -lz -lm -lpthread")
set(DEPS ${DEPS} ${EXTERNAL_LIB})
endif()
set(DEPS ${DEPS} ${OpenCV_LIBS})
add_executable(classifier src/classifier.cpp src/transforms.cpp src/paddlex.cpp)
ADD_DEPENDENCIES(classifier ext-yaml-cpp)
target_link_libraries(classifier ${DEPS})
{
"configurations": [
{
"name": "x64-Release",
"generator": "Ninja",
"configurationType": "RelWithDebInfo",
"inheritEnvironments": [ "msvc_x64_x64" ],
"buildRoot": "${projectDir}\\out\\build\\${name}",
"installRoot": "${projectDir}\\out\\install\\${name}",
"cmakeCommandArgs": "",
"buildCommandArgs": "-v",
"ctestCommandArgs": "",
"variables": [
{
"name": "OPENCV_DIR",
"value": "C:/projects/opencv",
"type": "PATH"
},
{
"name": "OPENVINO_LIB",
"value": "C:/projetcs/inference_engine",
"type": "PATH"
}
]
}
]
}
find_package(Git REQUIRED)
include(ExternalProject)
message("${CMAKE_BUILD_TYPE}")
ExternalProject_Add(
ext-yaml-cpp
URL https://bj.bcebos.com/paddlex/deploy/deps/yaml-cpp.zip
URL_MD5 9542d6de397d1fbd649ed468cb5850e6
CMAKE_ARGS
-DYAML_CPP_BUILD_TESTS=OFF
-DYAML_CPP_BUILD_TOOLS=OFF
-DYAML_CPP_INSTALL=OFF
-DYAML_CPP_BUILD_CONTRIB=OFF
-DMSVC_SHARED_RT=OFF
-DBUILD_SHARED_LIBS=OFF
-DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE}
-DCMAKE_CXX_FLAGS=${CMAKE_CXX_FLAGS}
-DCMAKE_CXX_FLAGS_DEBUG=${CMAKE_CXX_FLAGS_DEBUG}
-DCMAKE_CXX_FLAGS_RELEASE=${CMAKE_CXX_FLAGS_RELEASE}
-DCMAKE_LIBRARY_OUTPUT_DIRECTORY=${CMAKE_BINARY_DIR}/ext/yaml-cpp/lib
-DCMAKE_ARCHIVE_OUTPUT_DIRECTORY=${CMAKE_BINARY_DIR}/ext/yaml-cpp/lib
PREFIX "${CMAKE_BINARY_DIR}/ext/yaml-cpp"
# Disable install step
INSTALL_COMMAND ""
LOG_DOWNLOAD ON
LOG_BUILD 1
)
// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <iostream>
#include <map>
#include <string>
#include <vector>
#include "yaml-cpp/yaml.h"
#ifdef _WIN32
#define OS_PATH_SEP "\\"
#else
#define OS_PATH_SEP "/"
#endif
namespace PaddleX {
// Inference model configuration parser
class ConfigPaser {
public:
ConfigPaser() {}
~ConfigPaser() {}
bool load_config(const std::string& model_dir,
const std::string& cfg = "model.yml") {
// Load as a YAML::Node
YAML::Node config;
config = YAML::LoadFile(model_dir + OS_PATH_SEP + cfg);
if (config["Transforms"].IsDefined()) {
YAML::Node transforms_ = config["Transforms"];
} else {
std::cerr << "There's no field 'Transforms' in model.yml" << std::endl;
return false;
}
return true;
}
YAML::Node Transforms_;
};
} // namespace PaddleDetection
// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <functional>
#include <iostream>
#include <numeric>
#include "yaml-cpp/yaml.h"
#ifdef _WIN32
#define OS_PATH_SEP "\\"
#else
#define OS_PATH_SEP "/"
#endif
#include <inference_engine.hpp>
#include "include/paddlex/config_parser.h"
#include "include/paddlex/results.h"
#include "include/paddlex/transforms.h"
using namespace InferenceEngine;
namespace PaddleX {
class Model {
public:
void Init(const std::string& model_dir,
const std::string& cfg_dir,
std::string device) {
create_predictor(model_dir, cfg_dir, device);
}
void create_predictor(const std::string& model_dir,
const std::string& cfg_dir,
std::string device);
bool load_config(const std::string& model_dir);
bool preprocess(cv::Mat* input_im);
bool predict(const cv::Mat& im, ClsResult* result);
std::string type;
std::string name;
std::vector<std::string> labels;
Transforms transforms_;
Blob::Ptr inputs_;
Blob::Ptr output_;
CNNNetwork network_;
ExecutableNetwork executable_network_;
};
} // namespce of PaddleX
// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <iostream>
#include <string>
#include <vector>
namespace PaddleX {
template <class T>
struct Mask {
std::vector<T> data;
std::vector<int> shape;
void clear() {
data.clear();
shape.clear();
}
};
struct Box {
int category_id;
std::string category;
float score;
std::vector<float> coordinate;
Mask<float> mask;
};
class BaseResult {
public:
std::string type = "base";
};
class ClsResult : public BaseResult {
public:
int category_id;
std::string category;
float score;
std::string type = "cls";
};
class DetResult : public BaseResult {
public:
std::vector<Box> boxes;
int mask_resolution;
std::string type = "det";
void clear() { boxes.clear(); }
};
class SegResult : public BaseResult {
public:
Mask<int64_t> label_map;
Mask<float> score_map;
void clear() {
label_map.clear();
score_map.clear();
}
};
} // namespce of PaddleX
// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <yaml-cpp/yaml.h>
#include <memory>
#include <string>
#include <unordered_map>
#include <utility>
#include <vector>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <inference_engine.hpp>
using namespace InferenceEngine;
namespace PaddleX {
// Abstraction of preprocessing opration class
class Transform {
public:
virtual void Init(const YAML::Node& item) = 0;
virtual bool Run(cv::Mat* im) = 0;
};
class Normalize : public Transform {
public:
virtual void Init(const YAML::Node& item) {
mean_ = item["mean"].as<std::vector<float>>();
std_ = item["std"].as<std::vector<float>>();
}
virtual bool Run(cv::Mat* im);
private:
std::vector<float> mean_;
std::vector<float> std_;
};
class ResizeByShort : public Transform {
public:
virtual void Init(const YAML::Node& item) {
short_size_ = item["short_size"].as<int>();
if (item["max_size"].IsDefined()) {
max_size_ = item["max_size"].as<int>();
} else {
max_size_ = -1;
}
};
virtual bool Run(cv::Mat* im);
private:
float GenerateScale(const cv::Mat& im);
int short_size_;
int max_size_;
};
class CenterCrop : public Transform {
public:
virtual void Init(const YAML::Node& item) {
if (item["crop_size"].IsScalar()) {
height_ = item["crop_size"].as<int>();
width_ = item["crop_size"].as<int>();
} else if (item["crop_size"].IsSequence()) {
std::vector<int> crop_size = item["crop_size"].as<std::vector<int>>();
width_ = crop_size[0];
height_ = crop_size[1];
}
}
virtual bool Run(cv::Mat* im);
private:
int height_;
int width_;
};
class Transforms {
public:
void Init(const YAML::Node& node, bool to_rgb = true);
std::shared_ptr<Transform> CreateTransform(const std::string& name);
bool Run(cv::Mat* im, Blob::Ptr blob);
private:
std::vector<std::shared_ptr<Transform>> transforms_;
bool to_rgb_ = true;
};
} // namespace PaddleX
# download pre-compiled opencv lib
OPENCV_URL=https://paddleseg.bj.bcebos.com/deploy/docker/opencv3gcc4.8.tar.bz2
if [ ! -d "./deps/opencv3gcc4.8" ]; then
mkdir -p deps
cd deps
wget -c ${OPENCV_URL}
tar xvfj opencv3gcc4.8.tar.bz2
rm -rf opencv3gcc4.8.tar.bz2
cd ..
fi
# openvino预编译库的路径
OPENVINO_DIR=/path/to/inference_engine/
# gflags预编译库的路径
GFLAGS_DIR=/path/to/gflags
# ngraph lib的路径,编译openvino时通常会生成
NGRAPH_LIB=/path/to/ngraph/lib/
# opencv预编译库的路径, 如果使用自带预编译版本可不修改
OPENCV_DIR=$(pwd)/deps/opencv3gcc4.8/
# 下载自带预编译版本
sh $(pwd)/scripts/bootstrap.sh
rm -rf build
mkdir -p build
cd build
cmake .. \
-DOPENCV_DIR=${OPENCV_DIR} \
-DGFLAGS_DIR=${GFLAGS_DIR} \
-DOPENVINO_DIR=${OPENVINO_DIR} \
-DNGRAPH_LIB=${NGRAPH_LIB}
make
// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <glog/logging.h>
#include <fstream>
#include <iostream>
#include <string>
#include <vector>
#include "include/paddlex/paddlex.h"
DEFINE_string(model_dir, "", "Path of inference model");
DEFINE_string(cfg_dir, "", "Path of inference model");
DEFINE_string(device, "CPU", "Device name");
DEFINE_string(image, "", "Path of test image file");
DEFINE_string(image_list, "", "Path of test image list file");
int main(int argc, char** argv) {
// Parsing command-line
google::ParseCommandLineFlags(&argc, &argv, true);
if (FLAGS_model_dir == "") {
std::cerr << "--model_dir need to be defined" << std::endl;
return -1;
}
if (FLAGS_cfg_dir == "") {
std::cerr << "--cfg_dir need to be defined" << std::endl;
return -1;
}
if (FLAGS_image == "" & FLAGS_image_list == "") {
std::cerr << "--image or --image_list need to be defined" << std::endl;
return -1;
}
// 加载模型
PaddleX::Model model;
model.Init(FLAGS_model_dir, FLAGS_cfg_dir, FLAGS_device);
// 进行预测
if (FLAGS_image_list != "") {
std::ifstream inf(FLAGS_image_list);
if (!inf) {
std::cerr << "Fail to open file " << FLAGS_image_list << std::endl;
return -1;
}
std::string image_path;
while (getline(inf, image_path)) {
PaddleX::ClsResult result;
cv::Mat im = cv::imread(image_path, 1);
model.predict(im, &result);
std::cout << "Predict label: " << result.category
<< ", label_id:" << result.category_id
<< ", score: " << result.score << std::endl;
}
} else {
PaddleX::ClsResult result;
cv::Mat im = cv::imread(FLAGS_image, 1);
model.predict(im, &result);
std::cout << "Predict label: " << result.category
<< ", label_id:" << result.category_id
<< ", score: " << result.score << std::endl;
}
return 0;
}
// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "include/paddlex/paddlex.h"
using namespace InferenceEngine;
namespace PaddleX {
void Model::create_predictor(const std::string& model_dir,
const std::string& cfg_dir,
std::string device) {
Core ie;
network_ = ie.ReadNetwork(model_dir, model_dir.substr(0, model_dir.size() - 4) + ".bin");
network_.setBatchSize(1);
InputInfo::Ptr input_info = network_.getInputsInfo().begin()->second;
input_info->getPreProcess().setResizeAlgorithm(RESIZE_BILINEAR);
input_info->setLayout(Layout::NCHW);
input_info->setPrecision(Precision::FP32);
executable_network_ = ie.LoadNetwork(network_, device);
load_config(cfg_dir);
}
bool Model::load_config(const std::string& cfg_dir) {
YAML::Node config = YAML::LoadFile(cfg_dir);
type = config["_Attributes"]["model_type"].as<std::string>();
name = config["Model"].as<std::string>();
bool to_rgb = true;
if (config["TransformsMode"].IsDefined()) {
std::string mode = config["TransformsMode"].as<std::string>();
if (mode == "BGR") {
to_rgb = false;
} else if (mode != "RGB") {
std::cerr << "[Init] Only 'RGB' or 'BGR' is supported for TransformsMode"
<< std::endl;
return false;
}
}
// 构建数据处理流
transforms_.Init(config["Transforms"], to_rgb);
// 读入label list
labels.clear();
labels = config["_Attributes"]["labels"].as<std::vector<std::string>>();
return true;
}
bool Model::preprocess(cv::Mat* input_im) {
if (!transforms_.Run(input_im, inputs_)) {
return false;
}
return true;
}
bool Model::predict(const cv::Mat& im, ClsResult* result) {
if (type == "detector") {
std::cerr << "Loading model is a 'detector', DetResult should be passed to "
"function predict()!"
<< std::endl;
return false;
} else if (type == "segmenter") {
std::cerr << "Loading model is a 'segmenter', SegResult should be passed "
"to function predict()!"
<< std::endl;
return false;
}
// 处理输入图像
InferRequest infer_request = executable_network_.CreateInferRequest();
std::string input_name = network_.getInputsInfo().begin()->first;
inputs_ = infer_request.GetBlob(input_name);
auto im_clone = im.clone();
if (!preprocess(&im_clone)) {
std::cerr << "Preprocess failed!" << std::endl;
return false;
}
infer_request.Infer();
std::string output_name = network_.getOutputsInfo().begin()->first;
output_ = infer_request.GetBlob(output_name);
MemoryBlob::CPtr moutput = as<MemoryBlob>(output_);
auto moutputHolder = moutput->rmap();
float* outputs_data = moutputHolder.as<float *>();
// 对模型输出结果进行后处理
auto ptr = std::max_element(outputs_data, outputs_data+sizeof(outputs_data));
result->category_id = std::distance(outputs_data, ptr);
result->score = *ptr;
result->category = labels[result->category_id];
//for (int i=0;i<sizeof(outputs_data);i++){
// std::cout << labels[i] << std::endl;
// std::cout << outputs_[i] << std::endl;
// }
}
} // namespce of PaddleX
// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <iostream>
#include <string>
#include <vector>
#include "include/paddlex/transforms.h"
namespace PaddleX {
std::map<std::string, int> interpolations = {{"LINEAR", cv::INTER_LINEAR},
{"NEAREST", cv::INTER_NEAREST},
{"AREA", cv::INTER_AREA},
{"CUBIC", cv::INTER_CUBIC},
{"LANCZOS4", cv::INTER_LANCZOS4}};
bool Normalize::Run(cv::Mat* im){
for (int h = 0; h < im->rows; h++) {
for (int w = 0; w < im->cols; w++) {
im->at<cv::Vec3f>(h, w)[0] =
(im->at<cv::Vec3f>(h, w)[0] / 255.0 - mean_[0]) / std_[0];
im->at<cv::Vec3f>(h, w)[1] =
(im->at<cv::Vec3f>(h, w)[1] / 255.0 - mean_[1]) / std_[1];
im->at<cv::Vec3f>(h, w)[2] =
(im->at<cv::Vec3f>(h, w)[2] / 255.0 - mean_[2]) / std_[2];
}
}
return true;
}
bool CenterCrop::Run(cv::Mat* im) {
int height = static_cast<int>(im->rows);
int width = static_cast<int>(im->cols);
if (height < height_ || width < width_) {
std::cerr << "[CenterCrop] Image size less than crop size" << std::endl;
return false;
}
int offset_x = static_cast<int>((width - width_) / 2);
int offset_y = static_cast<int>((height - height_) / 2);
cv::Rect crop_roi(offset_x, offset_y, width_, height_);
*im = (*im)(crop_roi);
return true;
}
float ResizeByShort::GenerateScale(const cv::Mat& im) {
int origin_w = im.cols;
int origin_h = im.rows;
int im_size_max = std::max(origin_w, origin_h);
int im_size_min = std::min(origin_w, origin_h);
float scale =
static_cast<float>(short_size_) / static_cast<float>(im_size_min);
if (max_size_ > 0) {
if (round(scale * im_size_max) > max_size_) {
scale = static_cast<float>(max_size_) / static_cast<float>(im_size_max);
}
}
return scale;
}
bool ResizeByShort::Run(cv::Mat* im) {
float scale = GenerateScale(*im);
int width = static_cast<int>(scale * im->cols);
int height = static_cast<int>(scale * im->rows);
cv::resize(*im, *im, cv::Size(width, height), 0, 0, cv::INTER_LINEAR);
return true;
}
void Transforms::Init(const YAML::Node& transforms_node, bool to_rgb) {
transforms_.clear();
to_rgb_ = to_rgb;
for (const auto& item : transforms_node) {
std::string name = item.begin()->first.as<std::string>();
std::cout << "trans name: " << name << std::endl;
std::shared_ptr<Transform> transform = CreateTransform(name);
transform->Init(item.begin()->second);
transforms_.push_back(transform);
}
}
std::shared_ptr<Transform> Transforms::CreateTransform(
const std::string& transform_name) {
if (transform_name == "Normalize") {
return std::make_shared<Normalize>();
} else if (transform_name == "CenterCrop") {
return std::make_shared<CenterCrop>();
} else if (transform_name == "ResizeByShort") {
return std::make_shared<ResizeByShort>();
} else {
std::cerr << "There's unexpected transform(name='" << transform_name
<< "')." << std::endl;
exit(-1);
}
}
bool Transforms::Run(cv::Mat* im, Blob::Ptr blob) {
// 按照transforms中预处理算子顺序处理图像
if (to_rgb_) {
cv::cvtColor(*im, *im, cv::COLOR_BGR2RGB);
}
(*im).convertTo(*im, CV_32FC3);
for (int i = 0; i < transforms_.size(); ++i) {
if (!transforms_[i]->Run(im)) {
std::cerr << "Apply transforms to image failed!" << std::endl;
return false;
}
}
// 将图像由NHWC转为NCHW格式
// 同时转为连续的内存块存储到Blob
SizeVector blobSize = blob->getTensorDesc().getDims();
const size_t width = blobSize[3];
const size_t height = blobSize[2];
const size_t channels = blobSize[1];
MemoryBlob::Ptr mblob = InferenceEngine::as<MemoryBlob>(blob);
auto mblobHolder = mblob->wmap();
float *blob_data = mblobHolder.as<float *>();
for (size_t c = 0; c < channels; c++) {
for (size_t h = 0; h < height; h++) {
for (size_t w = 0; w < width; w++) {
blob_data[c * width * height + h * width + w] =
im->at<cv::Vec3f>(h, w)[c];
}
}
}
return true;
}
} // namespace PaddleX
......@@ -27,6 +27,7 @@ from . import det
from . import seg
from . import cls
from . import slim
from . import convertor
from . import tools
try:
......
......@@ -29,6 +29,12 @@ def arg_parser():
action="store_true",
default=False,
help="export inference model for C++/Python deployment")
parser.add_argument(
"--export_onnx",
"-eo",
action="store_true",
default=False,
help="export onnx model for deployment")
parser.add_argument(
"--fixed_input_shape",
"-fs",
......@@ -54,25 +60,34 @@ def main():
print("Repo: https://github.com/PaddlePaddle/PaddleX.git")
print("Email: paddlex@baidu.com")
return
if args.export_inference:
assert args.model_dir is not None, "--model_dir should be defined while exporting inference model"
assert args.save_dir is not None, "--save_dir should be defined to save inference model"
fixed_input_shape = eval(args.fixed_input_shape)
assert len(
fixed_input_shape) == 2, "len of fixed input shape must == 2"
fixed_input_shape = None
if args.fixed_input_shape is not None:
fixed_input_shape = eval(args.fixed_input_shape)
assert len(
fixed_input_shape
) == 2, "len of fixed input shape must == 2, such as [224,224]"
model = pdx.load_model(args.model_dir, fixed_input_shape)
model.export_inference_model(args.save_dir)
if args.export_onnx:
assert args.model_dir is not None, "--model_dir should be defined while exporting onnx model"
assert args.save_dir is not None, "--save_dir should be defined to save onnx model"
fixed_input_shape = eval(args.fixed_input_shape)
assert len(
fixed_input_shape) == 2, "len of fixed input shape must == 2"
assert args.save_dir is not None, "--save_dir should be defined to create onnx model"
assert args.fixed_input_shape is not None, "--fixed_input_shape should be defined [w,h] to create onnx model, such as [224,224]"
fixed_input_shape = []
if args.fixed_input_shape is not None:
fixed_input_shape = eval(args.fixed_input_shape)
assert len(
fixed_input_shape
) == 2, "len of fixed input shape must == 2, such as [224,224]"
model = pdx.load_model(args.model_dir, fixed_input_shape)
model.export_onnx_model(args.save_dir)
pdx.convertor.export_onnx_model(model, args.save_dir)
if __name__ == "__main__":
......
#copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
from __future__ import absolute_import
import paddle.fluid as fluid
import os
import sys
import paddlex as pdx
import paddlex.utils.logging as logging
__all__ = ['export_onnx']
def export_onnx(model_dir, save_dir, fixed_input_shape):
assert len(fixed_input_shape) == 2, "len of fixed input shape must == 2"
model = pdx.load_model(model_dir, fixed_input_shape)
model_name = os.path.basename(model_dir.strip('/')).split('/')[-1]
export_onnx_model(model, save_dir)
def export_onnx_model(model, save_dir):
support_list = [
'ResNet18', 'ResNet34', 'ResNet50', 'ResNet101', 'ResNet50_vd',
'ResNet101_vd', 'ResNet50_vd_ssld', 'ResNet101_vd_ssld', 'DarkNet53',
'MobileNetV1', 'MobileNetV2', 'DenseNet121', 'DenseNet161',
'DenseNet201'
]
if model.__class__.__name__ not in support_list:
raise Exception("Model: {} unsupport export to ONNX".format(
model.__class__.__name__))
try:
from fluid.utils import op_io_info, init_name_prefix
from onnx import helper, checker
import fluid_onnx.ops as ops
from fluid_onnx.variables import paddle_variable_to_onnx_tensor, paddle_onnx_weight
from debug.model_check import debug_model, Tracker
except Exception as e:
logging.error(
"Import Module Failed! Please install paddle2onnx. Related requirements see https://github.com/PaddlePaddle/paddle2onnx."
)
raise e
place = fluid.CPUPlace()
exe = fluid.Executor(place)
inference_scope = fluid.global_scope()
with fluid.scope_guard(inference_scope):
test_input_names = [
var.name for var in list(model.test_inputs.values())
]
inputs_outputs_list = ["fetch", "feed"]
weights, weights_value_info = [], []
global_block = model.test_prog.global_block()
for var_name in global_block.vars:
var = global_block.var(var_name)
if var_name not in test_input_names\
and var.persistable:
weight, val_info = paddle_onnx_weight(
var=var, scope=inference_scope)
weights.append(weight)
weights_value_info.append(val_info)
# Create inputs
inputs = [
paddle_variable_to_onnx_tensor(v, global_block)
for v in test_input_names
]
logging.INFO("load the model parameter done.")
onnx_nodes = []
op_check_list = []
op_trackers = []
nms_first_index = -1
nms_outputs = []
for block in model.test_prog.blocks:
for op in block.ops:
if op.type in ops.node_maker:
# TODO: deal with the corner case that vars in
# different blocks have the same name
node_proto = ops.node_maker[str(op.type)](
operator=op, block=block)
op_outputs = []
last_node = None
if isinstance(node_proto, tuple):
onnx_nodes.extend(list(node_proto))
last_node = list(node_proto)
else:
onnx_nodes.append(node_proto)
last_node = [node_proto]
tracker = Tracker(str(op.type), last_node)
op_trackers.append(tracker)
op_check_list.append(str(op.type))
if op.type == "multiclass_nms" and nms_first_index < 0:
nms_first_index = 0
if nms_first_index >= 0:
_, _, output_op = op_io_info(op)
for output in output_op:
nms_outputs.extend(output_op[output])
else:
if op.type not in ['feed', 'fetch']:
op_check_list.append(op.type)
logging.info('The operator sets to run test case.')
logging.info(set(op_check_list))
# Create outputs
# Get the new names for outputs if they've been renamed in nodes' making
renamed_outputs = op_io_info.get_all_renamed_outputs()
test_outputs = list(model.test_outputs.values())
test_outputs_names = [var.name for var in model.test_outputs.values()]
test_outputs_names = [
name if name not in renamed_outputs else renamed_outputs[name]
for name in test_outputs_names
]
outputs = [
paddle_variable_to_onnx_tensor(v, global_block)
for v in test_outputs_names
]
# Make graph
onnx_name = 'paddlex.onnx'
onnx_graph = helper.make_graph(
nodes=onnx_nodes,
name=onnx_name,
initializer=weights,
inputs=inputs + weights_value_info,
outputs=outputs)
# Make model
onnx_model = helper.make_model(
onnx_graph, producer_name='PaddlePaddle')
# Model check
checker.check_model(onnx_model)
if onnx_model is not None:
onnx_model_file = os.path.join(save_dir, onnx_name)
if not os.path.exists(save_dir):
os.mkdir(save_dir)
with open(onnx_model_file, 'wb') as f:
f.write(onnx_model.SerializeToString())
logging.info("Saved converted model to path: %s" % onnx_model_file)
......@@ -15,6 +15,7 @@
from __future__ import absolute_import
import paddle.fluid as fluid
import os
import sys
import numpy as np
import time
import math
......@@ -251,6 +252,9 @@ class BaseAPI:
del self.init_params['self']
if '__class__' in self.init_params:
del self.init_params['__class__']
if 'model_name' in self.init_params:
del self.init_params['model_name']
info['_init_params'] = self.init_params
info['_Attributes']['num_classes'] = self.num_classes
......
......@@ -38,12 +38,9 @@ def load_model(model_dir, fixed_input_shape=None):
if not hasattr(paddlex.cv.models, info['Model']):
raise Exception("There's no attribute {} in paddlex.cv.models".format(
info['Model']))
if info['_Attributes']['model_type'] == 'classifier':
model = paddlex.cv.models.BaseClassifier(**info['_init_params'])
else:
model = getattr(paddlex.cv.models,
info['Model'])(**info['_init_params'])
if 'model_name' in info['_init_params']:
del info['_init_params']['model_name']
model = getattr(paddlex.cv.models, info['Model'])(**info['_init_params'])
model.fixed_input_shape = fixed_input_shape
if status == "Normal" or \
status == "Prune" or status == "fluid.save":
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册