提交 adac158d 编写于 作者: J joey12300

change utils.h, seg_conf_parser.h to google code style.

上级 0fc9b582
......@@ -13,14 +13,13 @@
// limitations under the License.
#pragma once
#include <yaml-cpp/yaml.h>
#include <iostream>
#include <vector>
#include <string>
#include <yaml-cpp/yaml.h>
namespace PaddleSolution {
class PaddleSegModelConfigPaser {
class PaddleSegModelConfigPaser {
public:
PaddleSegModelConfigPaser()
:_class_num(0),
......@@ -70,7 +69,6 @@ namespace PaddleSolution {
}
bool load_config(const std::string& conf_file) {
reset();
YAML::Node config = YAML::LoadFile(conf_file);
......@@ -97,7 +95,8 @@ namespace PaddleSolution {
// 8. get model file_name
_model_file_name = config["DEPLOY"]["MODEL_FILENAME"].as<std::string>();
// 9. get model param file name
_param_file_name = config["DEPLOY"]["PARAMS_FILENAME"].as<std::string>();
_param_file_name =
config["DEPLOY"]["PARAMS_FILENAME"].as<std::string>();
// 10. get pre_processor
_pre_processor = config["DEPLOY"]["PRE_PROCESSOR"].as<std::string>();
// 11. use_gpu
......@@ -112,9 +111,9 @@ namespace PaddleSolution {
}
void debug() const {
std::cout << "EVAL_CROP_SIZE: (" << _resize[0] << ", " << _resize[1] << ")" << std::endl;
std::cout << "EVAL_CROP_SIZE: ("
<< _resize[0] << ", " << _resize[1]
<< ")" << std::endl;
std::cout << "MEAN: [";
for (int i = 0; i < _mean.size(); ++i) {
if (i != _mean.size() - 1) {
......@@ -129,8 +128,7 @@ namespace PaddleSolution {
for (int i = 0; i < _std.size(); ++i) {
if (i != _std.size() - 1) {
std::cout << _std[i] << ", ";
}
else {
} else {
std::cout << _std[i];
}
}
......@@ -141,7 +139,8 @@ namespace PaddleSolution {
std::cout << "DEPLOY.CHANNELS: " << _channels << std::endl;
std::cout << "DEPLOY.MODEL_PATH: " << _model_path << std::endl;
std::cout << "DEPLOY.MODEL_FILENAME: " << _model_file_name << std::endl;
std::cout << "DEPLOY.PARAMS_FILENAME: " << _param_file_name << std::endl;
std::cout << "DEPLOY.PARAMS_FILENAME: "
<< _param_file_name << std::endl;
std::cout << "DEPLOY.PRE_PROCESSOR: " << _pre_processor << std::endl;
std::cout << "DEPLOY.USE_GPU: " << _use_gpu << std::endl;
std::cout << "DEPLOY.PREDICTOR_MODE: " << _predictor_mode << std::endl;
......@@ -174,6 +173,6 @@ namespace PaddleSolution {
std::string _predictor_mode;
// DEPLOY.BATCH_SIZE
int _batch_size;
};
};
}
} // namespace PaddleSolution
......@@ -30,8 +30,9 @@
#endif
namespace PaddleSolution {
namespace utils {
inline std::string path_join(const std::string& dir, const std::string& path) {
namespace utils {
inline std::string path_join(const std::string& dir,
const std::string& path) {
std::string seperator = "/";
#ifdef _WIN32
seperator = "\\";
......@@ -40,8 +41,8 @@ namespace PaddleSolution {
}
#ifndef _WIN32
// scan a directory and get all files with input extensions
inline std::vector<std::string> get_directory_images(const std::string& path, const std::string& exts)
{
inline std::vector<std::string> get_directory_images(
const std::string& path, const std::string& exts) {
std::vector<std::string> imgs;
struct dirent *entry;
DIR *dir = opendir(path.c_str());
......@@ -64,13 +65,15 @@ namespace PaddleSolution {
}
#else
// scan a directory and get all files with input extensions
inline std::vector<std::string> get_directory_images(const std::string& path, const std::string& exts)
{
inline std::vector<std::string> get_directory_images(
const std::string& path, const std::string& exts) {
std::vector<std::string> imgs;
for (const auto& item : std::experimental::filesystem::directory_iterator(path)) {
for (const auto& item :
std::experimental::filesystem::directory_iterator(path)) {
auto suffix = item.path().extension().string();
if (exts.find(suffix) != std::string::npos && suffix.size() > 0) {
auto fullname = path_join(path, item.path().filename().string());
auto fullname = path_join(path,
item.path().filename().string());
imgs.push_back(item.path().string());
}
}
......@@ -79,11 +82,12 @@ namespace PaddleSolution {
#endif
// normalize and HWC_BGR -> CHW_RGB
inline void normalize(cv::Mat& im, float* data, std::vector<float>& fmean, std::vector<float>& fstd) {
inline void normalize(cv::Mat& im, float* data, std::vector<float>& fmean,
std::vector<float>& fstd) {
int rh = im.rows;
int rw = im.cols;
int rc = im.channels();
double normf = (double)1.0 / 255.0;
double normf = static_cast<double>(1.0) / 255.0;
#pragma omp parallel for
for (int h = 0; h < rh; ++h) {
const uchar* ptr = im.ptr<uchar>(h);
......@@ -100,7 +104,8 @@ namespace PaddleSolution {
}
// argmax
inline void argmax(float* out, std::vector<int>& shape, std::vector<uchar>& mask, std::vector<uchar>& scoremap) {
inline void argmax(float* out, std::vector<int>& shape,
std::vector<uchar>& mask, std::vector<uchar>& scoremap) {
int out_img_len = shape[1] * shape[2];
int blob_out_len = out_img_len * shape[0];
/*
......@@ -130,5 +135,5 @@ namespace PaddleSolution {
scoremap[i] = uchar(max_value * 255);
}
}
}
}
} // namespace utils
} // namespace PaddleSolution
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册