提交 26466dcd 编写于 作者: S syyxsxx

fix code style

上级 bf07b81c
// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <glog/logging.h> #include <glog/logging.h>
#include <omp.h> #include <omp.h>
...@@ -49,7 +63,7 @@ int main(int argc, char** argv) { ...@@ -49,7 +63,7 @@ int main(int argc, char** argv) {
// 进行预测 // 进行预测
if (FLAGS_image_list != "") { if (FLAGS_image_list != "") {
std::ifstream inf(FLAGS_image_list); std::ifstream inf(FLAGS_image_list);
if(!inf){ if (!inf) {
std::cerr << "Fail to open file " << FLAGS_image_list << std::endl; std::cerr << "Fail to open file " << FLAGS_image_list << std::endl;
return -1; return -1;
} }
...@@ -58,16 +72,16 @@ int main(int argc, char** argv) { ...@@ -58,16 +72,16 @@ int main(int argc, char** argv) {
PaddleX::DetResult result; PaddleX::DetResult result;
cv::Mat im = cv::imread(image_path, 1); cv::Mat im = cv::imread(image_path, 1);
model.predict(im, &result); model.predict(im, &result);
if(FLAGS_save_dir != ""){ if (FLAGS_save_dir != "") {
cv::Mat vis_img = cv::Mat vis_img = PaddleX::Visualize(
PaddleX::Visualize(im, result, model.labels, colormap, FLAGS_threshold); im, result, model.labels, colormap, FLAGS_threshold);
std::string save_path = std::string save_path =
PaddleX::generate_save_path(FLAGS_save_dir, FLAGS_image); PaddleX::generate_save_path(FLAGS_save_dir, FLAGS_image);
cv::imwrite(save_path, vis_img); cv::imwrite(save_path, vis_img);
std::cout << "Visualized output saved as " << save_path << std::endl; std::cout << "Visualized output saved as " << save_path << std::endl;
} }
} }
}else { } else {
PaddleX::DetResult result; PaddleX::DetResult result;
cv::Mat im = cv::imread(FLAGS_image, 1); cv::Mat im = cv::imread(FLAGS_image, 1);
model.predict(im, &result); model.predict(im, &result);
...@@ -81,10 +95,10 @@ int main(int argc, char** argv) { ...@@ -81,10 +95,10 @@ int main(int argc, char** argv) {
<< result.boxes[i].coordinate[2] << ", " << result.boxes[i].coordinate[2] << ", "
<< result.boxes[i].coordinate[3] << ")" << std::endl; << result.boxes[i].coordinate[3] << ")" << std::endl;
} }
if(FLAGS_save_dir != ""){ if (FLAGS_save_dir != "") {
// 可视化 // 可视化
cv::Mat vis_img = cv::Mat vis_img = PaddleX::Visualize(
PaddleX::Visualize(im, result, model.labels, colormap, FLAGS_threshold); im, result, model.labels, colormap, FLAGS_threshold);
std::string save_path = std::string save_path =
PaddleX::generate_save_path(FLAGS_save_dir, FLAGS_image); PaddleX::generate_save_path(FLAGS_save_dir, FLAGS_image);
cv::imwrite(save_path, vis_img); cv::imwrite(save_path, vis_img);
...@@ -94,5 +108,3 @@ int main(int argc, char** argv) { ...@@ -94,5 +108,3 @@ int main(int argc, char** argv) {
} }
return 0; return 0;
} }
...@@ -69,7 +69,7 @@ int main(int argc, char** argv) { ...@@ -69,7 +69,7 @@ int main(int argc, char** argv) {
PaddleX::SegResult result; PaddleX::SegResult result;
cv::Mat im = cv::imread(image_path, 1); cv::Mat im = cv::imread(image_path, 1);
model.predict(im, &result); model.predict(im, &result);
if(FLAGS_save_dir != ""){ if (FLAGS_save_dir != "") {
cv::Mat vis_img = PaddleX::Visualize(im, result, model.labels, colormap); cv::Mat vis_img = PaddleX::Visualize(im, result, model.labels, colormap);
std::string save_path = std::string save_path =
PaddleX::generate_save_path(FLAGS_save_dir, image_path); PaddleX::generate_save_path(FLAGS_save_dir, image_path);
...@@ -77,13 +77,11 @@ int main(int argc, char** argv) { ...@@ -77,13 +77,11 @@ int main(int argc, char** argv) {
std::cout << "Visualized output saved as " << save_path << std::endl; std::cout << "Visualized output saved as " << save_path << std::endl;
} }
} }
}else{ } else {
PaddleX::SegResult result; PaddleX::SegResult result;
cv::Mat im = cv::imread(FLAGS_image, 1); cv::Mat im = cv::imread(FLAGS_image, 1);
std::cout << "predict start" << std::endl;
model.predict(im, &result); model.predict(im, &result);
std::cout << "predict done" << std::endl; if (FLAGS_save_dir != "") {
if(FLAGS_save_dir != ""){
cv::Mat vis_img = PaddleX::Visualize(im, result, model.labels, colormap); cv::Mat vis_img = PaddleX::Visualize(im, result, model.labels, colormap);
std::string save_path = std::string save_path =
PaddleX::generate_save_path(FLAGS_save_dir, FLAGS_image); PaddleX::generate_save_path(FLAGS_save_dir, FLAGS_image);
......
...@@ -54,4 +54,4 @@ class ConfigPaser { ...@@ -54,4 +54,4 @@ class ConfigPaser {
YAML::Node Transforms_; YAML::Node Transforms_;
}; };
} // namespace PaddleDetection } // namespace PaddleX
...@@ -17,7 +17,8 @@ ...@@ -17,7 +17,8 @@
#include <functional> #include <functional>
#include <iostream> #include <iostream>
#include <numeric> #include <numeric>
#include <chrono> #include <map>
#include <string>
#include "yaml-cpp/yaml.h" #include "yaml-cpp/yaml.h"
...@@ -31,7 +32,7 @@ ...@@ -31,7 +32,7 @@
#include "include/paddlex/config_parser.h" #include "include/paddlex/config_parser.h"
#include "include/paddlex/results.h" #include "include/paddlex/results.h"
#include "include/paddlex/transforms.h" #include "include/paddlex/transforms.h"
using namespace InferenceEngine;
namespace PaddleX { namespace PaddleX {
...@@ -63,8 +64,8 @@ class Model { ...@@ -63,8 +64,8 @@ class Model {
std::map<int, std::string> labels; std::map<int, std::string> labels;
Transforms transforms_; Transforms transforms_;
ImageBlob inputs_; ImageBlob inputs_;
Blob::Ptr output_; InferenceEngine::Blob::Ptr output_;
CNNNetwork network_; InferenceEngine::CNNNetwork network_;
ExecutableNetwork executable_network_; InferenceEngine::ExecutableNetwork executable_network_;
}; };
} // namespce of PaddleX } // namespace PaddleX
...@@ -68,4 +68,4 @@ class SegResult : public BaseResult { ...@@ -68,4 +68,4 @@ class SegResult : public BaseResult {
score_map.clear(); score_map.clear();
} }
}; };
} // namespce of PaddleX } // namespace PaddleX
...@@ -16,18 +16,17 @@ ...@@ -16,18 +16,17 @@
#include <yaml-cpp/yaml.h> #include <yaml-cpp/yaml.h>
#include <memory>
#include <string>
#include <unordered_map> #include <unordered_map>
#include <utility> #include <utility>
#include <memory>
#include <string>
#include <vector> #include <vector>
#include <opencv2/core/core.hpp> #include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp> #include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp> #include <opencv2/imgproc/imgproc.hpp>
#include <inference_engine.hpp> #include <inference_engine.hpp>
using namespace InferenceEngine;
namespace PaddleX { namespace PaddleX {
...@@ -38,8 +37,7 @@ namespace PaddleX { ...@@ -38,8 +37,7 @@ namespace PaddleX {
class ImageBlob { class ImageBlob {
public: public:
// Original image height and width // Original image height and width
//std::vector<int> ori_im_size_ = std::vector<int>(2); InferenceEngine::Blob::Ptr ori_im_size_;
Blob::Ptr ori_im_size_;
// Newest image height and width after process // Newest image height and width after process
std::vector<int> new_im_size_ = std::vector<int>(2); std::vector<int> new_im_size_ = std::vector<int>(2);
...@@ -50,7 +48,7 @@ class ImageBlob { ...@@ -50,7 +48,7 @@ class ImageBlob {
// Resize scale // Resize scale
float scale = 1.0; float scale = 1.0;
// Buffer for image data after preprocessing // Buffer for image data after preprocessing
Blob::Ptr blob; InferenceEngine::Blob::Ptr blob;
void clear() { void clear() {
im_size_before_resize_.clear(); im_size_before_resize_.clear();
...@@ -90,7 +88,7 @@ class ResizeByShort : public Transform { ...@@ -90,7 +88,7 @@ class ResizeByShort : public Transform {
} else { } else {
max_size_ = -1; max_size_ = -1;
} }
}; }
virtual bool Run(cv::Mat* im, ImageBlob* data); virtual bool Run(cv::Mat* im, ImageBlob* data);
private: private:
...@@ -196,8 +194,7 @@ class Padding : public Transform { ...@@ -196,8 +194,7 @@ class Padding : public Transform {
} }
if (item["im_padding_value"].IsDefined()) { if (item["im_padding_value"].IsDefined()) {
im_value_ = item["im_padding_value"].as<std::vector<float>>(); im_value_ = item["im_padding_value"].as<std::vector<float>>();
} } else {
else {
im_value_ = {0, 0, 0}; im_value_ = {0, 0, 0};
} }
} }
......
...@@ -16,36 +16,37 @@ ...@@ -16,36 +16,37 @@
#include <iostream> #include <iostream>
#include <fstream> #include <fstream>
using namespace InferenceEngine;
namespace PaddleX { namespace PaddleX {
void Model::create_predictor(const std::string& model_dir, void Model::create_predictor(const std::string& model_dir,
const std::string& cfg_dir, const std::string& cfg_dir,
std::string device) { std::string device) {
Core ie; InferenceEngine::Core ie;
network_ = ie.ReadNetwork(model_dir, model_dir.substr(0, model_dir.size() - 4) + ".bin"); network_ = ie.ReadNetwork(
model_dir, model_dir.substr(0, model_dir.size() - 4) + ".bin");
network_.setBatchSize(1); network_.setBatchSize(1);
InputsDataMap inputInfo(network_.getInputsInfo()); InferenceEngine::InputsDataMap inputInfo(network_.getInputsInfo());
std::string imageInputName; std::string imageInputName;
for (const auto & inputInfoItem : inputInfo) { for (const auto & inputInfoItem : inputInfo) {
if (inputInfoItem.second->getTensorDesc().getDims().size() == 4){ if (inputInfoItem.second->getTensorDesc().getDims().size() == 4) {
imageInputName = inputInfoItem.first; imageInputName = inputInfoItem.first;
inputInfoItem.second->setPrecision(Precision::FP32); inputInfoItem.second->setPrecision(Precision::FP32);
inputInfoItem.second->getPreProcess().setResizeAlgorithm(RESIZE_BILINEAR); inputInfoItem.second->getPreProcess().setResizeAlgorithm(
RESIZE_BILINEAR);
inputInfoItem.second->setLayout(Layout::NCHW); inputInfoItem.second->setLayout(Layout::NCHW);
} }
if (inputInfoItem.second->getTensorDesc().getDims().size() == 2){ if (inputInfoItem.second->getTensorDesc().getDims().size() == 2) {
imageInputName = inputInfoItem.first; imageInputName = inputInfoItem.first;
inputInfoItem.second->setPrecision(Precision::FP32); inputInfoItem.second->setPrecision(Precision::FP32);
} }
} }
if(device == "MYRIAD"){ if (device == "MYRIAD") {
std::map<std::string, std::string> networkConfig; std::map<std::string, std::string> networkConfig;
networkConfig["VPU_HW_STAGES_OPTIMIZATION"] = "ON"; networkConfig["VPU_HW_STAGES_OPTIMIZATION"] = "ON";
executable_network_ = ie.LoadNetwork(network_, device, networkConfig); executable_network_ = ie.LoadNetwork(network_, device, networkConfig);
}else{ } else {
executable_network_ = ie.LoadNetwork(network_, device); executable_network_ = ie.LoadNetwork(network_, device);
} }
load_config(cfg_dir); load_config(cfg_dir);
...@@ -98,7 +99,8 @@ bool Model::predict(const cv::Mat& im, ClsResult* result) { ...@@ -98,7 +99,8 @@ bool Model::predict(const cv::Mat& im, ClsResult* result) {
return false; return false;
} }
// 处理输入图像 // 处理输入图像
InferRequest infer_request = executable_network_.CreateInferRequest(); InferenceEngine::InferRequest infer_request =
executable_network_.CreateInferRequest();
std::string input_name = network_.getInputsInfo().begin()->first; std::string input_name = network_.getInputsInfo().begin()->first;
inputs_.blob = infer_request.GetBlob(input_name); inputs_.blob = infer_request.GetBlob(input_name);
cv::Mat im_clone = im.clone(); cv::Mat im_clone = im.clone();
...@@ -107,13 +109,12 @@ bool Model::predict(const cv::Mat& im, ClsResult* result) { ...@@ -107,13 +109,12 @@ bool Model::predict(const cv::Mat& im, ClsResult* result) {
return false; return false;
} }
infer_request.Infer(); infer_request.Infer();
std::string output_name = network_.getOutputsInfo().begin()->first; std::string output_name = network_.getOutputsInfo().begin()->first;
output_ = infer_request.GetBlob(output_name); output_ = infer_request.GetBlob(output_name);
MemoryBlob::CPtr moutput = as<MemoryBlob>(output_); InferenceEngine::MemoryBlob::CPtr moutput =
as<InferenceEngine::MemoryBlob>(output_);
auto moutputHolder = moutput->rmap(); auto moutputHolder = moutput->rmap();
float* outputs_data = moutputHolder.as<float *>(); float* outputs_data = moutputHolder.as<float *>();
...@@ -122,7 +123,7 @@ bool Model::predict(const cv::Mat& im, ClsResult* result) { ...@@ -122,7 +123,7 @@ bool Model::predict(const cv::Mat& im, ClsResult* result) {
result->category_id = std::distance(outputs_data, ptr); result->category_id = std::distance(outputs_data, ptr);
result->score = *ptr; result->score = *ptr;
result->category = labels[result->category_id]; result->category = labels[result->category_id];
return true;
} }
bool Model::predict(const cv::Mat& im, DetResult* result) { bool Model::predict(const cv::Mat& im, DetResult* result) {
...@@ -137,15 +138,16 @@ bool Model::predict(const cv::Mat& im, DetResult* result) { ...@@ -137,15 +138,16 @@ bool Model::predict(const cv::Mat& im, DetResult* result) {
"to function predict()!" << std::endl; "to function predict()!" << std::endl;
return false; return false;
} }
InferRequest infer_request = executable_network_.CreateInferRequest(); InferenceEngine::InferRequest infer_request =
InputsDataMap input_maps = network_.getInputsInfo(); executable_network_.CreateInferRequest();
InferenceEngine::InputsDataMap input_maps = network_.getInputsInfo();
std::string inputName; std::string inputName;
for (const auto & input_map : input_maps) { for (const auto & input_map : input_maps) {
if (input_map.second->getTensorDesc().getDims().size() == 4){ if (input_map.second->getTensorDesc().getDims().size() == 4) {
inputName = input_map.first; inputName = input_map.first;
inputs_.blob = infer_request.GetBlob(inputName); inputs_.blob = infer_request.GetBlob(inputName);
} }
if (input_map.second->getTensorDesc().getDims().size() == 2){ if (input_map.second->getTensorDesc().getDims().size() == 2) {
inputName = input_map.first; inputName = input_map.first;
inputs_.ori_im_size_ = infer_request.GetBlob(inputName); inputs_.ori_im_size_ = infer_request.GetBlob(inputName);
} }
...@@ -156,27 +158,25 @@ bool Model::predict(const cv::Mat& im, DetResult* result) { ...@@ -156,27 +158,25 @@ bool Model::predict(const cv::Mat& im, DetResult* result) {
return false; return false;
} }
infer_request.Infer(); infer_request.Infer();
InferenceEngine::OutputsDataMap out_map = network_.getOutputsInfo();
OutputsDataMap out_map = network_.getOutputsInfo();
auto iter = out_map.begin(); auto iter = out_map.begin();
std::string outputName = iter->first; std::string outputName = iter->first;
std::cout << "output: " << outputName << std::endl; InferenceEngine::Blob::Ptr output = infer_request.GetBlob(outputName);
Blob::Ptr output = infer_request.GetBlob(outputName); InferenceEngine::MemoryBlob::CPtr moutput =
MemoryBlob::CPtr moutput = as<MemoryBlob>(output); as<InferenceEngine::MemoryBlob>(output);
TensorDesc blob_output = moutput->getTensorDesc(); InferenceEngine::TensorDesc blob_output = moutput->getTensorDesc();
std::vector<size_t> output_shape = blob_output.getDims(); std::vector<size_t> output_shape = blob_output.getDims();
auto moutputHolder = moutput->rmap(); auto moutputHolder = moutput->rmap();
float* data = moutputHolder.as<float *>(); float* data = moutputHolder.as<float *>();
int size = 1; int size = 1;
for (auto& i : output_shape){ for (auto& i : output_shape) {
size *= static_cast<int>(i); size *= static_cast<int>(i);
} }
int num_boxes = size / 6; int num_boxes = size / 6;
for (int i = 0; i < num_boxes; ++i){ for (int i = 0; i < num_boxes; ++i) {
if(data[i * 6] > 0){ if (data[i * 6] > 0) {
Box box; Box box;
box.category_id = static_cast<int>(data[i * 6]); box.category_id = static_cast<int>(data[i * 6]);
box.category = labels[box.category_id]; box.category = labels[box.category_id];
...@@ -207,7 +207,8 @@ bool Model::predict(const cv::Mat& im, SegResult* result) { ...@@ -207,7 +207,8 @@ bool Model::predict(const cv::Mat& im, SegResult* result) {
return false; return false;
} }
// //
InferRequest infer_request = executable_network_.CreateInferRequest(); InferenceEngine::InferRequest infer_request =
executable_network_.CreateInferRequest();
std::string input_name = network_.getInputsInfo().begin()->first; std::string input_name = network_.getInputsInfo().begin()->first;
inputs_.blob = infer_request.GetBlob(input_name); inputs_.blob = infer_request.GetBlob(input_name);
...@@ -220,19 +221,19 @@ bool Model::predict(const cv::Mat& im, SegResult* result) { ...@@ -220,19 +221,19 @@ bool Model::predict(const cv::Mat& im, SegResult* result) {
// //
infer_request.Infer(); infer_request.Infer();
if (count_num_ >= 20){ if (count_num_ >= 20) {
total_time_ = total_time_ + time_used.count(); total_time_ = total_time_ + time_used.count();
} }
OInferenceEngine::utputsDataMap out_map = network_.getOutputsInfo();
OutputsDataMap out_map = network_.getOutputsInfo();
auto iter = out_map.begin(); auto iter = out_map.begin();
iter++; iter++;
std::string output_name_score = iter->first; std::string output_name_score = iter->first;
std::cout << iter->first << std::endl; InferenceEngine::Blob::Ptr output_score =
Blob::Ptr output_score = infer_request.GetBlob(output_name_score); infer_request.GetBlob(output_name_score);
MemoryBlob::CPtr moutput_score = as<MemoryBlob>(output_score); InferenceEngine::MemoryBlob::CPtr moutput_score =
TensorDesc blob_score = moutput_score->getTensorDesc(); as<InferenceEngine::MemoryBlob>(output_score);
InferenceEngine::TensorDesc blob_score = moutput_score->getTensorDesc();
std::vector<size_t> output_score_shape = blob_score.getDims(); std::vector<size_t> output_score_shape = blob_score.getDims();
int size = 1; int size = 1;
for (auto& i : output_score_shape) { for (auto& i : output_score_shape) {
...@@ -242,14 +243,15 @@ bool Model::predict(const cv::Mat& im, SegResult* result) { ...@@ -242,14 +243,15 @@ bool Model::predict(const cv::Mat& im, SegResult* result) {
result->score_map.data.resize(size); result->score_map.data.resize(size);
auto moutputHolder_score = moutput_score->rmap(); auto moutputHolder_score = moutput_score->rmap();
float* score_data = moutputHolder_score.as<float *>(); float* score_data = moutputHolder_score.as<float *>();
memcpy(result->score_map.data.data(),score_data,moutput_score->byteSize()); memcpy(result->score_map.data.data(), score_data, moutput_score->byteSize());
iter++; iter++;
std::string output_name_label = iter->first; std::string output_name_label = iter->first;
std::cout << iter->first << std::endl; InferenceEngine::Blob::Ptr output_label =
Blob::Ptr output_label = infer_request.GetBlob(output_name_label); infer_request.GetBlob(output_name_label);
MemoryBlob::CPtr moutput_label = as<MemoryBlob>(output_label); InferenceEngine::MemoryBlob::CPtr moutput_label =
TensorDesc blob_label = moutput_label->getTensorDesc(); as<InferenceEngine::MemoryBlob>(output_label);
InferenceEngine::TensorDesc blob_label = moutput_label->getTensorDesc();
std::vector<size_t> output_label_shape = blob_label.getDims(); std::vector<size_t> output_label_shape = blob_label.getDims();
size = 1; size = 1;
for (auto& i : output_label_shape) { for (auto& i : output_label_shape) {
...@@ -259,7 +261,7 @@ bool Model::predict(const cv::Mat& im, SegResult* result) { ...@@ -259,7 +261,7 @@ bool Model::predict(const cv::Mat& im, SegResult* result) {
result->label_map.data.resize(size); result->label_map.data.resize(size);
auto moutputHolder_label = moutput_label->rmap(); auto moutputHolder_label = moutput_label->rmap();
int* label_data = moutputHolder_label.as<int *>(); int* label_data = moutputHolder_label.as<int *>();
memcpy(result->label_map.data.data(),label_data,moutput_label->byteSize()); memcpy(result->label_map.data.data(), label_data, moutput_label->byteSize());
...@@ -315,5 +317,4 @@ bool Model::predict(const cv::Mat& im, SegResult* result) { ...@@ -315,5 +317,4 @@ bool Model::predict(const cv::Mat& im, SegResult* result) {
result->score_map.shape = {mask_score.rows, mask_score.cols}; result->score_map.shape = {mask_score.rows, mask_score.cols};
return true; return true;
} }
} // namespace PaddleX
} // namespce of PaddleX
...@@ -12,13 +12,15 @@ ...@@ -12,13 +12,15 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
#include "include/paddlex/transforms.h"
#include <math.h>
#include <iostream> #include <iostream>
#include <fstream> #include <fstream>
#include <string> #include <string>
#include <vector> #include <vector>
#include <math.h>
#include "include/paddlex/transforms.h"
namespace PaddleX { namespace PaddleX {
...@@ -28,7 +30,7 @@ std::map<std::string, int> interpolations = {{"LINEAR", cv::INTER_LINEAR}, ...@@ -28,7 +30,7 @@ std::map<std::string, int> interpolations = {{"LINEAR", cv::INTER_LINEAR},
{"CUBIC", cv::INTER_CUBIC}, {"CUBIC", cv::INTER_CUBIC},
{"LANCZOS4", cv::INTER_LANCZOS4}}; {"LANCZOS4", cv::INTER_LANCZOS4}};
bool Normalize::Run(cv::Mat* im, ImageBlob* data){ bool Normalize::Run(cv::Mat* im, ImageBlob* data) {
for (int h = 0; h < im->rows; h++) { for (int h = 0; h < im->rows; h++) {
for (int w = 0; w < im->cols; w++) { for (int w = 0; w < im->cols; w++) {
im->at<cv::Vec3f>(h, w)[0] = im->at<cv::Vec3f>(h, w)[0] =
...@@ -62,16 +64,13 @@ float ResizeByShort::GenerateScale(const cv::Mat& im) { ...@@ -62,16 +64,13 @@ float ResizeByShort::GenerateScale(const cv::Mat& im) {
bool ResizeByShort::Run(cv::Mat* im, ImageBlob* data) { bool ResizeByShort::Run(cv::Mat* im, ImageBlob* data) {
data->im_size_before_resize_.push_back({im->rows, im->cols}); data->im_size_before_resize_.push_back({im->rows, im->cols});
data->reshape_order_.push_back("resize"); data->reshape_order_.push_back("resize");
float scale = GenerateScale(*im); float scale = GenerateScale(*im);
int width = static_cast<int>(round(scale * im->cols)); int width = static_cast<int>(round(scale * im->cols));
int height = static_cast<int>(round(scale * im->rows)); int height = static_cast<int>(round(scale * im->rows));
cv::resize(*im, *im, cv::Size(width, height), 0, 0, cv::INTER_LINEAR); cv::resize(*im, *im, cv::Size(width, height), 0, 0, cv::INTER_LINEAR);
data->new_im_size_[0] = im->rows; data->new_im_size_[0] = im->rows;
data->new_im_size_[1] = im->cols; data->new_im_size_[1] = im->cols;
data->scale = scale; data->scale = scale;
return true; return true;
} }
...@@ -166,7 +165,8 @@ bool Resize::Run(cv::Mat* im, ImageBlob* data) { ...@@ -166,7 +165,8 @@ bool Resize::Run(cv::Mat* im, ImageBlob* data) {
return true; return true;
} }
void Transforms::Init(const YAML::Node& transforms_node, std::string type, bool to_rgb) { void Transforms::Init(
const YAML::Node& transforms_node, std::string type, bool to_rgb) {
transforms_.clear(); transforms_.clear();
to_rgb_ = to_rgb; to_rgb_ = to_rgb;
type_ = type; type_ = type;
...@@ -206,20 +206,18 @@ bool Transforms::Run(cv::Mat* im, ImageBlob* data) { ...@@ -206,20 +206,18 @@ bool Transforms::Run(cv::Mat* im, ImageBlob* data) {
cv::cvtColor(*im, *im, cv::COLOR_BGR2RGB); cv::cvtColor(*im, *im, cv::COLOR_BGR2RGB);
} }
(*im).convertTo(*im, CV_32FC3); (*im).convertTo(*im, CV_32FC3);
if (type_ == "detector") {
if(type_ == "detector" ){ LockedMemory<void> input2Mapped = as<MemoryBlob>(
LockedMemory<void> input2Mapped = as<MemoryBlob>(data->ori_im_size_)->wmap(); data->ori_im_size_)->wmap();
float *p = input2Mapped.as<float*>(); float *p = input2Mapped.as<float*>();
p[0] = im->rows; p[0] = im->rows;
p[1] = im->cols; p[1] = im->cols;
} }
//data->ori_im_size_[0] = im->rows;
//data->ori_im_size_[1] = im->cols;
data->new_im_size_[0] = im->rows; data->new_im_size_[0] = im->rows;
data->new_im_size_[1] = im->cols; data->new_im_size_[1] = im->cols;
for (int i = 0; i < transforms_.size(); ++i) { for (int i = 0; i < transforms_.size(); ++i) {
if (!transforms_[i]->Run(im,data)) { if (!transforms_[i]->Run(im, data)) {
std::cerr << "Apply transforms to image failed!" << std::endl; std::cerr << "Apply transforms to image failed!" << std::endl;
return false; return false;
} }
...@@ -227,15 +225,14 @@ bool Transforms::Run(cv::Mat* im, ImageBlob* data) { ...@@ -227,15 +225,14 @@ bool Transforms::Run(cv::Mat* im, ImageBlob* data) {
// 将图像由NHWC转为NCHW格式 // 将图像由NHWC转为NCHW格式
// 同时转为连续的内存块存储到Blob // 同时转为连续的内存块存储到Blob
InferenceEngine::SizeVector blobSize = data->blob->getTensorDesc().getDims();
SizeVector blobSize = data->blob->getTensorDesc().getDims();
const size_t width = blobSize[3]; const size_t width = blobSize[3];
const size_t height = blobSize[2]; const size_t height = blobSize[2];
const size_t channels = blobSize[1]; const size_t channels = blobSize[1];
MemoryBlob::Ptr mblob = InferenceEngine::as<MemoryBlob>(data->blob); MemoryBlob::Ptr mblob = InferenceEngine::as<InferenceEngine::MemoryBlob>(
data->blob);
auto mblobHolder = mblob->wmap(); auto mblobHolder = mblob->wmap();
float *blob_data = mblobHolder.as<float *>(); float *blob_data = mblobHolder.as<float *>();
for (size_t c = 0; c < channels; c++) { for (size_t c = 0; c < channels; c++) {
for (size_t h = 0; h < height; h++) { for (size_t h = 0; h < height; h++) {
for (size_t w = 0; w < width; w++) { for (size_t w = 0; w < width; w++) {
......
...@@ -63,7 +63,7 @@ int main(int argc, char** argv) { ...@@ -63,7 +63,7 @@ int main(int argc, char** argv) {
// 进行预测 // 进行预测
if (FLAGS_image_list != "") { if (FLAGS_image_list != "") {
std::ifstream inf(FLAGS_image_list); std::ifstream inf(FLAGS_image_list);
if(!inf){ if (!inf) {
std::cerr << "Fail to open file " << FLAGS_image_list << std::endl; std::cerr << "Fail to open file " << FLAGS_image_list << std::endl;
return -1; return -1;
} }
...@@ -73,16 +73,16 @@ int main(int argc, char** argv) { ...@@ -73,16 +73,16 @@ int main(int argc, char** argv) {
PaddleX::DetResult result; PaddleX::DetResult result;
cv::Mat im = cv::imread(image_path, 1); cv::Mat im = cv::imread(image_path, 1);
model.predict(im, &result); model.predict(im, &result);
if(FLAGS_save_dir != ""){ if (FLAGS_save_dir != "") {
cv::Mat vis_img = cv::Mat vis_img = PaddleX::Visualize(
PaddleX::Visualize(im, result, model.labels, colormap, FLAGS_threshold); im, result, model.labels, colormap, FLAGS_threshold);
std::string save_path = std::string save_path =
PaddleX::generate_save_path(FLAGS_save_dir, FLAGS_image); PaddleX::generate_save_path(FLAGS_save_dir, FLAGS_image);
cv::imwrite(save_path, vis_img); cv::imwrite(save_path, vis_img);
std::cout << "Visualized output saved as " << save_path << std::endl; std::cout << "Visualized output saved as " << save_path << std::endl;
} }
} }
}else { } else {
PaddleX::DetResult result; PaddleX::DetResult result;
cv::Mat im = cv::imread(FLAGS_image, 1); cv::Mat im = cv::imread(FLAGS_image, 1);
model.predict(im, &result); model.predict(im, &result);
...@@ -96,10 +96,10 @@ int main(int argc, char** argv) { ...@@ -96,10 +96,10 @@ int main(int argc, char** argv) {
<< result.boxes[i].coordinate[2] << ", " << result.boxes[i].coordinate[2] << ", "
<< result.boxes[i].coordinate[3] << ")" << std::endl; << result.boxes[i].coordinate[3] << ")" << std::endl;
} }
if(FLAGS_save_dir != ""){ if (FLAGS_save_dir != "") {
// 可视化 // 可视化
cv::Mat vis_img = cv::Mat vis_img = PaddleX::Visualize(
PaddleX::Visualize(im, result, model.labels, colormap, FLAGS_threshold); im, result, model.labels, colormap, FLAGS_threshold);
std::string save_path = std::string save_path =
PaddleX::generate_save_path(FLAGS_save_dir, FLAGS_image); PaddleX::generate_save_path(FLAGS_save_dir, FLAGS_image);
cv::imwrite(save_path, vis_img); cv::imwrite(save_path, vis_img);
...@@ -109,5 +109,3 @@ int main(int argc, char** argv) { ...@@ -109,5 +109,3 @@ int main(int argc, char** argv) {
} }
return 0; return 0;
} }
...@@ -54,7 +54,6 @@ int main(int argc, char** argv) { ...@@ -54,7 +54,6 @@ int main(int argc, char** argv) {
std::cout << "init done" << std::endl; std::cout << "init done" << std::endl;
int imgs = 1; int imgs = 1;
auto colormap = PaddleX::GenerateColorMap(model.labels.size()); auto colormap = PaddleX::GenerateColorMap(model.labels.size());
if (FLAGS_image_list != "") { if (FLAGS_image_list != "") {
std::ifstream inf(FLAGS_image_list); std::ifstream inf(FLAGS_image_list);
if (!inf) { if (!inf) {
...@@ -67,23 +66,19 @@ int main(int argc, char** argv) { ...@@ -67,23 +66,19 @@ int main(int argc, char** argv) {
PaddleX::SegResult result; PaddleX::SegResult result;
cv::Mat im = cv::imread(image_path, 1); cv::Mat im = cv::imread(image_path, 1);
model.predict(im, &result); model.predict(im, &result);
if(FLAGS_save_dir != ""){ if (FLAGS_save_dir != "") {
cv::Mat vis_img = PaddleX::Visualize(im, result, model.labels, colormap); cv::Mat vis_img = PaddleX::Visualize(im, result, model.labels, colormap);
std::string save_path = std::string save_path =
PaddleX::generate_save_path(FLAGS_save_dir, image_path); PaddleX::generate_save_path(FLAGS_save_dir, image_path);
cv::imwrite(save_path, vis_img); cv::imwrite(save_path, vis_img);
std::cout << "Visualized output saved as " << save_path << std::endl; std::cout << "Visualized output saved as " << save_path << std::endl;
} }
} }
} else {
}else{
PaddleX::SegResult result; PaddleX::SegResult result;
cv::Mat im = cv::imread(FLAGS_image, 1); cv::Mat im = cv::imread(FLAGS_image, 1);
std::cout << "predict start" << std::endl;
model.predict(im, &result); model.predict(im, &result);
std::cout << "predict done" << std::endl; if (FLAGS_save_dir != "") {
if(FLAGS_save_dir != ""){
cv::Mat vis_img = PaddleX::Visualize(im, result, model.labels, colormap); cv::Mat vis_img = PaddleX::Visualize(im, result, model.labels, colormap);
std::string save_path = std::string save_path =
PaddleX::generate_save_path(FLAGS_save_dir, FLAGS_image); PaddleX::generate_save_path(FLAGS_save_dir, FLAGS_image);
......
...@@ -54,4 +54,4 @@ class ConfigPaser { ...@@ -54,4 +54,4 @@ class ConfigPaser {
YAML::Node Transforms_; YAML::Node Transforms_;
}; };
} // namespace PaddleDetection } // namespace PaddleX
...@@ -14,26 +14,35 @@ ...@@ -14,26 +14,35 @@
#pragma once #pragma once
#include <arm_neon.h>
#include <paddle_api.h>
#include <functional> #include <functional>
#include <iostream> #include <iostream>
#include <numeric> #include <numeric>
#include <chrono> #include <map>
#include <string>
#include <memory>
#include "include/paddlex/config_parser.h"
#include "include/paddlex/results.h"
#include "include/paddlex/transforms.h"
#include "yaml-cpp/yaml.h" #include "yaml-cpp/yaml.h"
#ifdef _WIN32 #ifdef _WIN32
#define OS_PATH_SEP "\\" #define OS_PATH_SEP "\\"
#else #else
#define OS_PATH_SEP "/" #define OS_PATH_SEP "/"
#endif #endif
#include "paddle_api.h"
#include <arm_neon.h>
#include "include/paddlex/config_parser.h"
#include "include/paddlex/results.h"
#include "include/paddlex/transforms.h"
using namespace paddle::lite_api;
namespace PaddleX { namespace PaddleX {
...@@ -65,7 +74,6 @@ class Model { ...@@ -65,7 +74,6 @@ class Model {
std::map<int, std::string> labels; std::map<int, std::string> labels;
Transforms transforms_; Transforms transforms_;
ImageBlob inputs_; ImageBlob inputs_;
std::shared_ptr<PaddlePredictor> predictor_; std::shared_ptr<paddle::lite_api::PaddlePredictor> predictor_;
}; };
} // namespce of PaddleX } // namespace PaddleX
...@@ -68,4 +68,4 @@ class SegResult : public BaseResult { ...@@ -68,4 +68,4 @@ class SegResult : public BaseResult {
score_map.clear(); score_map.clear();
} }
}; };
} // namespce of PaddleX } // namespace PaddleX
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
#pragma once #pragma once
#include <yaml-cpp/yaml.h> #include <yaml-cpp/yaml.h>
#include <paddle_api.h>
#include <memory> #include <memory>
#include <string> #include <string>
...@@ -26,9 +27,7 @@ ...@@ -26,9 +27,7 @@
#include <opencv2/highgui/highgui.hpp> #include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp> #include <opencv2/imgproc/imgproc.hpp>
#include "paddle_api.h"
using namespace paddle::lite_api;
namespace PaddleX { namespace PaddleX {
...@@ -90,7 +89,7 @@ class ResizeByShort : public Transform { ...@@ -90,7 +89,7 @@ class ResizeByShort : public Transform {
} else { } else {
max_size_ = -1; max_size_ = -1;
} }
}; }
virtual bool Run(cv::Mat* im, ImageBlob* data); virtual bool Run(cv::Mat* im, ImageBlob* data);
private: private:
...@@ -196,8 +195,7 @@ class Padding : public Transform { ...@@ -196,8 +195,7 @@ class Padding : public Transform {
} }
if (item["im_padding_value"].IsDefined()) { if (item["im_padding_value"].IsDefined()) {
im_value_ = item["im_padding_value"].as<std::vector<float>>(); im_value_ = item["im_padding_value"].as<std::vector<float>>();
} } else {
else {
im_value_ = {0, 0, 0}; im_value_ = {0, 0, 0};
} }
} }
......
...@@ -16,18 +16,17 @@ ...@@ -16,18 +16,17 @@
#include <iostream> #include <iostream>
#include <fstream> #include <fstream>
using namespace paddle::lite_api;
namespace PaddleX { namespace PaddleX {
void Model::create_predictor(const std::string& model_dir, void Model::create_predictor(const std::string& model_dir,
const std::string& cfg_dir, const std::string& cfg_dir,
int thread_num) { int thread_num) {
MobileConfig config; paddle::lite_api::MobileConfig config;
config.set_model_from_file(model_dir); config.set_model_from_file(model_dir);
config.set_threads(thread_num); config.set_threads(thread_num);
load_config(cfg_dir); load_config(cfg_dir);
predictor_ = CreatePaddlePredictor<MobileConfig>(config); predictor_ = CreatePaddlePredictor<paddle::lite_api::MobileConfig>(config);
} }
bool Model::load_config(const std::string& cfg_dir) { bool Model::load_config(const std::string& cfg_dir) {
...@@ -84,11 +83,10 @@ bool Model::predict(const cv::Mat& im, ClsResult* result) { ...@@ -84,11 +83,10 @@ bool Model::predict(const cv::Mat& im, ClsResult* result) {
return false; return false;
} }
;
predictor_->Run(); predictor_->Run();
std::unique_ptr<const paddle::lite_api::Tensor> output_tensor(
std::unique_ptr<const Tensor> output_tensor(std::move(predictor_->GetOutput(0))); std::move(predictor_->GetOutput(0)));
const float *outputs_data = output_tensor->mutable_data<float>(); const float *outputs_data = output_tensor->mutable_data<float>();
...@@ -97,10 +95,6 @@ bool Model::predict(const cv::Mat& im, ClsResult* result) { ...@@ -97,10 +95,6 @@ bool Model::predict(const cv::Mat& im, ClsResult* result) {
result->category_id = std::distance(outputs_data, ptr); result->category_id = std::distance(outputs_data, ptr);
result->score = *ptr; result->score = *ptr;
result->category = labels[result->category_id]; result->category = labels[result->category_id];
//for (int i=0;i<sizeof(outputs_data);i++){
// std::cout << labels[i] << std::endl;
// std::cout << outputs_[i] << std::endl;
// }
} }
bool Model::predict(const cv::Mat& im, DetResult* result) { bool Model::predict(const cv::Mat& im, DetResult* result) {
...@@ -115,7 +109,6 @@ bool Model::predict(const cv::Mat& im, DetResult* result) { ...@@ -115,7 +109,6 @@ bool Model::predict(const cv::Mat& im, DetResult* result) {
"to function predict()!" << std::endl; "to function predict()!" << std::endl;
return false; return false;
} }
inputs_.input_tensor_ = std::move(predictor_->GetInput(0)); inputs_.input_tensor_ = std::move(predictor_->GetInput(0));
cv::Mat im_clone = im.clone(); cv::Mat im_clone = im.clone();
...@@ -126,18 +119,14 @@ bool Model::predict(const cv::Mat& im, DetResult* result) { ...@@ -126,18 +119,14 @@ bool Model::predict(const cv::Mat& im, DetResult* result) {
int h = inputs_.new_im_size_[0]; int h = inputs_.new_im_size_[0];
int w = inputs_.new_im_size_[1]; int w = inputs_.new_im_size_[1];
if (name == "YOLOv3") { if (name == "YOLOv3") {
std::unique_ptr<Tensor> im_size_tensor(std::move(predictor_->GetInput(1))); std::unique_ptr<paddle::lite_api::Tensor> im_size_tensor(
const std::vector<int64_t> IM_SIZE_SHAPE = {1,2}; std::move(predictor_->GetInput(1)));
const std::vector<int64_t> IM_SIZE_SHAPE = {1, 2};
im_size_tensor->Resize(IM_SIZE_SHAPE); im_size_tensor->Resize(IM_SIZE_SHAPE);
auto *im_size_data = im_size_tensor->mutable_data<int>(); auto *im_size_data = im_size_tensor->mutable_data<int>();
memcpy(im_size_data, inputs_.ori_im_size_.data(), 1*2*sizeof(int)); memcpy(im_size_data, inputs_.ori_im_size_.data(), 1*2*sizeof(int));
} }
predictor_->Run(); predictor_->Run();
auto output_names = predictor_->GetOutputNames(); auto output_names = predictor_->GetOutputNames();
auto output_box_tensor = predictor_->GetTensor(output_names[0]); auto output_box_tensor = predictor_->GetTensor(output_names[0]);
const float *output_box = output_box_tensor->mutable_data<float>(); const float *output_box = output_box_tensor->mutable_data<float>();
...@@ -177,27 +166,17 @@ bool Model::predict(const cv::Mat& im, SegResult* result) { ...@@ -177,27 +166,17 @@ bool Model::predict(const cv::Mat& im, SegResult* result) {
"function predict()!" << std::endl; "function predict()!" << std::endl;
return false; return false;
} }
inputs_.input_tensor_ = std::move(predictor_->GetInput(0)); inputs_.input_tensor_ = std::move(predictor_->GetInput(0));
cv::Mat im_clone = im.clone(); cv::Mat im_clone = im.clone();
if (!preprocess(&im_clone, &inputs_)) { if (!preprocess(&im_clone, &inputs_)) {
std::cerr << "Preprocess failed!" << std::endl; std::cerr << "Preprocess failed!" << std::endl;
return false; return false;
} }
std::cout << "Preprocess is done" << std::endl; std::cout << "Preprocess is done" << std::endl;
predictor_->Run(); predictor_->Run();
auto output_names = predictor_->GetOutputNames(); auto output_names = predictor_->GetOutputNames();
auto output_label_tensor = predictor_->GetTensor(output_names[0]); auto output_label_tensor = predictor_->GetTensor(output_names[0]);
std::cout << "output0" << output_names[0] << std::endl;
std::cout << "output1" << output_names[1] << std::endl;
const int64_t *label_data = output_label_tensor->mutable_data<int64_t>(); const int64_t *label_data = output_label_tensor->mutable_data<int64_t>();
std::vector<int64_t> output_label_shape = output_label_tensor->shape(); std::vector<int64_t> output_label_shape = output_label_tensor->shape();
int size = 1; int size = 1;
...@@ -272,5 +251,4 @@ bool Model::predict(const cv::Mat& im, SegResult* result) { ...@@ -272,5 +251,4 @@ bool Model::predict(const cv::Mat& im, SegResult* result) {
result->score_map.shape = {mask_score.rows, mask_score.cols}; result->score_map.shape = {mask_score.rows, mask_score.cols};
return true; return true;
} }
} // namespace PaddleX
} // namespce of PaddleX
...@@ -12,12 +12,16 @@ ...@@ -12,12 +12,16 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
#include "include/paddlex/transforms.h"
#include <math.h>
#include <iostream> #include <iostream>
#include <string> #include <string>
#include <vector> #include <vector>
#include <math.h>
#include "include/paddlex/transforms.h"
namespace PaddleX { namespace PaddleX {
...@@ -27,7 +31,7 @@ std::map<std::string, int> interpolations = {{"LINEAR", cv::INTER_LINEAR}, ...@@ -27,7 +31,7 @@ std::map<std::string, int> interpolations = {{"LINEAR", cv::INTER_LINEAR},
{"CUBIC", cv::INTER_CUBIC}, {"CUBIC", cv::INTER_CUBIC},
{"LANCZOS4", cv::INTER_LANCZOS4}}; {"LANCZOS4", cv::INTER_LANCZOS4}};
bool Normalize::Run(cv::Mat* im, ImageBlob* data){ bool Normalize::Run(cv::Mat* im, ImageBlob* data) {
for (int h = 0; h < im->rows; h++) { for (int h = 0; h < im->rows; h++) {
for (int w = 0; w < im->cols; w++) { for (int w = 0; w < im->cols; w++) {
im->at<cv::Vec3f>(h, w)[0] = im->at<cv::Vec3f>(h, w)[0] =
...@@ -66,11 +70,9 @@ bool ResizeByShort::Run(cv::Mat* im, ImageBlob* data) { ...@@ -66,11 +70,9 @@ bool ResizeByShort::Run(cv::Mat* im, ImageBlob* data) {
int width = static_cast<int>(round(scale * im->cols)); int width = static_cast<int>(round(scale * im->cols));
int height = static_cast<int>(round(scale * im->rows)); int height = static_cast<int>(round(scale * im->rows));
cv::resize(*im, *im, cv::Size(width, height), 0, 0, cv::INTER_LINEAR); cv::resize(*im, *im, cv::Size(width, height), 0, 0, cv::INTER_LINEAR);
data->new_im_size_[0] = im->rows; data->new_im_size_[0] = im->rows;
data->new_im_size_[1] = im->cols; data->new_im_size_[1] = im->cols;
data->scale = scale; data->scale = scale;
return true; return true;
} }
...@@ -204,7 +206,6 @@ bool Transforms::Run(cv::Mat* im, ImageBlob* data) { ...@@ -204,7 +206,6 @@ bool Transforms::Run(cv::Mat* im, ImageBlob* data) {
cv::cvtColor(*im, *im, cv::COLOR_BGR2RGB); cv::cvtColor(*im, *im, cv::COLOR_BGR2RGB);
} }
(*im).convertTo(*im, CV_32FC3); (*im).convertTo(*im, CV_32FC3);
data->ori_im_size_[0] = im->rows; data->ori_im_size_[0] = im->rows;
data->ori_im_size_[1] = im->cols; data->ori_im_size_[1] = im->cols;
data->new_im_size_[0] = im->rows; data->new_im_size_[0] = im->rows;
...@@ -219,14 +220,12 @@ bool Transforms::Run(cv::Mat* im, ImageBlob* data) { ...@@ -219,14 +220,12 @@ bool Transforms::Run(cv::Mat* im, ImageBlob* data) {
// 将图像由NHWC转为NCHW格式 // 将图像由NHWC转为NCHW格式
// 同时转为连续的内存块存储到Blob // 同时转为连续的内存块存储到Blob
int height = im->rows; int height = im->rows;
int width = im->cols; int width = im->cols;
int channels = im->channels(); int channels = im->channels();
const std::vector<int64_t> INPUT_SHAPE = {1, channels, height, width}; const std::vector<int64_t> INPUT_SHAPE = {1, channels, height, width};
data->input_tensor_->Resize(INPUT_SHAPE); data->input_tensor_->Resize(INPUT_SHAPE);
auto *input_data = data->input_tensor_->mutable_data<float>(); auto *input_data = data->input_tensor_->mutable_data<float>();
for (size_t c = 0; c < channels; c++) { for (size_t c = 0; c < channels; c++) {
for (size_t h = 0; h < height; h++) { for (size_t h = 0; h < height; h++) {
for (size_t w = 0; w < width; w++) { for (size_t w = 0; w < width; w++) {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册