提交 01e54a1f 编写于 作者: J jack

use google style

上级 afb8620c
......@@ -13,18 +13,18 @@
// limitations under the License.
#include <glog/logging.h>
#include <omp.h>
#include <algorithm>
#include <chrono>
#include <chrono> // NOLINT
#include <fstream>
#include <iostream>
#include <string>
#include <vector>
#include <utility>
#include <omp.h>
#include "include/paddlex/paddlex.h"
using namespace std::chrono;
using namespace std::chrono; // NOLINT
DEFINE_string(model_dir, "", "Path of inference model");
DEFINE_bool(use_gpu, false, "Infering with GPU or CPU");
......@@ -34,7 +34,9 @@ DEFINE_string(key, "", "key of encryption");
DEFINE_string(image, "", "Path of test image file");
DEFINE_string(image_list, "", "Path of test image list file");
DEFINE_int32(batch_size, 1, "Batch size of infering");
DEFINE_int32(thread_num, omp_get_num_procs(), "Number of preprocessing threads");
DEFINE_int32(thread_num,
omp_get_num_procs(),
"Number of preprocessing threads");
int main(int argc, char** argv) {
// Parsing command-line
......@@ -51,7 +53,12 @@ int main(int argc, char** argv) {
// 加载模型
PaddleX::Model model;
model.Init(FLAGS_model_dir, FLAGS_use_gpu, FLAGS_use_trt, FLAGS_gpu_id, FLAGS_key, FLAGS_batch_size);
model.Init(FLAGS_model_dir,
FLAGS_use_gpu,
FLAGS_use_trt,
FLAGS_gpu_id,
FLAGS_key,
FLAGS_batch_size);
// 进行预测
double total_running_time_s = 0.0;
......@@ -70,32 +77,38 @@ int main(int argc, char** argv) {
image_paths.push_back(image_path);
}
imgs = image_paths.size();
for(int i = 0; i < image_paths.size(); i += FLAGS_batch_size) {
for (int i = 0; i < image_paths.size(); i += FLAGS_batch_size) {
auto start = system_clock::now();
// 读图像
int im_vec_size = std::min((int)image_paths.size(), i + FLAGS_batch_size);
// 读图像
int im_vec_size =
std::min(static_cat<int>(image_paths.size()), i + FLAGS_batch_size);
std::vector<cv::Mat> im_vec(im_vec_size - i);
std::vector<PaddleX::ClsResult> results(im_vec_size - i, PaddleX::ClsResult());
std::vector<PaddleX::ClsResult> results(im_vec_size - i,
PaddleX::ClsResult());
int thread_num = std::min(FLAGS_thread_num, im_vec_size - i);
#pragma omp parallel for num_threads(thread_num)
for(int j = i; j < im_vec_size; ++j){
for (int j = i; j < im_vec_size; ++j) {
im_vec[j - i] = std::move(cv::imread(image_paths[j], 1));
}
auto imread_end = system_clock::now();
model.predict(im_vec, results, thread_num);
model.predict(im_vec, &results, thread_num);
auto imread_duration = duration_cast<microseconds>(imread_end - start);
total_imread_time_s += double(imread_duration.count()) * microseconds::period::num / microseconds::period::den;
total_imread_time_s += static_cast<double>(imread_duration.count()) *
microseconds::period::num /
microseconds::period::den;
auto end = system_clock::now();
auto duration = duration_cast<microseconds>(end - start);
total_running_time_s += double(duration.count()) * microseconds::period::num / microseconds::period::den;
for(int j = i; j < im_vec_size; ++j) {
std::cout << "Path:" << image_paths[j]
<< ", predict label: " << results[j - i].category
<< ", label_id:" << results[j - i].category_id
<< ", score: " << results[j - i].score << std::endl;
}
total_running_time_s += static_cast<double>(duration.count()) *
microseconds::period::num /
microseconds::period::den;
for (int j = i; j < im_vec_size; ++j) {
std::cout << "Path:" << image_paths[j]
<< ", predict label: " << results[j - i].category
<< ", label_id:" << results[j - i].category_id
<< ", score: " << results[j - i].score << std::endl;
}
}
} else {
auto start = system_clock::now();
......@@ -104,21 +117,17 @@ int main(int argc, char** argv) {
model.predict(im, &result);
auto end = system_clock::now();
auto duration = duration_cast<microseconds>(end - start);
total_running_time_s += double(duration.count()) * microseconds::period::num / microseconds::period::den;
total_running_time_s += static_cast<double>(duration.count()) *
microseconds::period::num /
microseconds::period::den;
std::cout << "Predict label: " << result.category
<< ", label_id:" << result.category_id
<< ", score: " << result.score << std::endl;
}
std::cout << "Total running time: "
<< total_running_time_s
<< " s, average running time: "
<< total_running_time_s / imgs
<< " s/img, total read img time: "
<< total_imread_time_s
<< " s, average read time: "
<< total_imread_time_s / imgs
<< " s/img, batch_size = "
<< FLAGS_batch_size
<< std::endl;
std::cout << "Total running time: " << total_running_time_s
<< " s, average running time: " << total_running_time_s / imgs
<< " s/img, total read img time: " << total_imread_time_s
<< " s, average read time: " << total_imread_time_s / imgs
<< " s/img, batch_size = " << FLAGS_batch_size << std::endl;
return 0;
}
......@@ -13,20 +13,20 @@
// limitations under the License.
#include <glog/logging.h>
#include <omp.h>
#include <algorithm>
#include <chrono>
#include <chrono> // NOLINT
#include <fstream>
#include <iostream>
#include <string>
#include <vector>
#include <utility>
#include <omp.h>
#include "include/paddlex/paddlex.h"
#include "include/paddlex/visualize.h"
using namespace std::chrono;
using namespace std::chrono; // NOLINT
DEFINE_string(model_dir, "", "Path of inference model");
DEFINE_bool(use_gpu, false, "Infering with GPU or CPU");
......@@ -37,13 +37,17 @@ DEFINE_string(image, "", "Path of test image file");
DEFINE_string(image_list, "", "Path of test image list file");
DEFINE_string(save_dir, "output", "Path to save visualized image");
DEFINE_int32(batch_size, 1, "Batch size of infering");
DEFINE_double(threshold, 0.5, "The minimum scores of target boxes which are shown");
DEFINE_int32(thread_num, omp_get_num_procs(), "Number of preprocessing threads");
DEFINE_double(threshold,
0.5,
"The minimum scores of target boxes which are shown");
DEFINE_int32(thread_num,
omp_get_num_procs(),
"Number of preprocessing threads");
int main(int argc, char** argv) {
// 解析命令行参数
google::ParseCommandLineFlags(&argc, &argv, true);
if (FLAGS_model_dir == "") {
std::cerr << "--model_dir need to be defined" << std::endl;
return -1;
......@@ -55,7 +59,12 @@ int main(int argc, char** argv) {
std::cout << "Thread num: " << FLAGS_thread_num << std::endl;
// 加载模型
PaddleX::Model model;
model.Init(FLAGS_model_dir, FLAGS_use_gpu, FLAGS_use_trt, FLAGS_gpu_id, FLAGS_key, FLAGS_batch_size);
model.Init(FLAGS_model_dir,
FLAGS_use_gpu,
FLAGS_use_trt,
FLAGS_gpu_id,
FLAGS_key,
FLAGS_batch_size);
double total_running_time_s = 0.0;
double total_imread_time_s = 0.0;
......@@ -75,41 +84,47 @@ int main(int argc, char** argv) {
image_paths.push_back(image_path);
}
imgs = image_paths.size();
for(int i = 0; i < image_paths.size(); i += FLAGS_batch_size) {
for (int i = 0; i < image_paths.size(); i += FLAGS_batch_size) {
auto start = system_clock::now();
int im_vec_size = std::min((int)image_paths.size(), i + FLAGS_batch_size);
int im_vec_size =
std::min(static_cast<int>(image_paths.size()), i + FLAGS_batch_size);
std::vector<cv::Mat> im_vec(im_vec_size - i);
std::vector<PaddleX::DetResult> results(im_vec_size - i, PaddleX::DetResult());
std::vector<PaddleX::DetResult> results(im_vec_size - i,
PaddleX::DetResult());
int thread_num = std::min(FLAGS_thread_num, im_vec_size - i);
#pragma omp parallel for num_threads(thread_num)
for(int j = i; j < im_vec_size; ++j){
for (int j = i; j < im_vec_size; ++j) {
im_vec[j - i] = std::move(cv::imread(image_paths[j], 1));
}
auto imread_end = system_clock::now();
model.predict(im_vec, results, thread_num);
model.predict(im_vec, &results, thread_num);
auto imread_duration = duration_cast<microseconds>(imread_end - start);
total_imread_time_s += double(imread_duration.count()) * microseconds::period::num / microseconds::period::den;
total_imread_time_s += static_cast<double>(imread_duration.count()) *
microseconds::period::num /
microseconds::period::den;
auto end = system_clock::now();
auto duration = duration_cast<microseconds>(end - start);
total_running_time_s += double(duration.count()) * microseconds::period::num / microseconds::period::den;
//输出结果目标框
for(int j = 0; j < im_vec_size - i; ++j) {
for(int k = 0; k < results[j].boxes.size(); ++k) {
std::cout << "image file: " << image_paths[i + j] << ", ";// << std::endl;
total_running_time_s += static_cast<double>(duration.count()) *
microseconds::period::num /
microseconds::period::den;
// 输出结果目标框
for (int j = 0; j < im_vec_size - i; ++j) {
for (int k = 0; k < results[j].boxes.size(); ++k) {
std::cout << "image file: " << image_paths[i + j] << ", ";
std::cout << "predict label: " << results[j].boxes[k].category
<< ", label_id:" << results[j].boxes[k].category_id
<< ", score: " << results[j].boxes[k].score << ", box(xmin, ymin, w, h):("
<< ", score: " << results[j].boxes[k].score
<< ", box(xmin, ymin, w, h):("
<< results[j].boxes[k].coordinate[0] << ", "
<< results[j].boxes[k].coordinate[1] << ", "
<< results[j].boxes[k].coordinate[2] << ", "
<< results[j].boxes[k].coordinate[3] << ")" << std::endl;
}
}
// 可视化
for(int j = 0; j < im_vec_size - i; ++j) {
cv::Mat vis_img =
PaddleX::Visualize(im_vec[j], results[j], model.labels, colormap, FLAGS_threshold);
for (int j = 0; j < im_vec_size - i; ++j) {
cv::Mat vis_img = PaddleX::Visualize(
im_vec[j], results[j], model.labels, colormap, FLAGS_threshold);
std::string save_path =
PaddleX::generate_save_path(FLAGS_save_dir, image_paths[i + j]);
cv::imwrite(save_path, vis_img);
......@@ -121,12 +136,12 @@ int main(int argc, char** argv) {
cv::Mat im = cv::imread(FLAGS_image, 1);
model.predict(im, &result);
for (int i = 0; i < result.boxes.size(); ++i) {
std::cout << "image file: " << FLAGS_image << std::endl;
std::cout << "image file: " << FLAGS_image << std::endl;
std::cout << ", predict label: " << result.boxes[i].category
<< ", label_id:" << result.boxes[i].category_id
<< ", score: " << result.boxes[i].score << ", box(xmin, ymin, w, h):("
<< result.boxes[i].coordinate[0] << ", "
<< result.boxes[i].coordinate[1] << ", "
<< ", score: " << result.boxes[i].score
<< ", box(xmin, ymin, w, h):(" << result.boxes[i].coordinate[0]
<< ", " << result.boxes[i].coordinate[1] << ", "
<< result.boxes[i].coordinate[2] << ", "
<< result.boxes[i].coordinate[3] << ")" << std::endl;
}
......@@ -140,18 +155,12 @@ int main(int argc, char** argv) {
result.clear();
std::cout << "Visualized output saved as " << save_path << std::endl;
}
std::cout << "Total running time: "
<< total_running_time_s
<< " s, average running time: "
<< total_running_time_s / imgs
<< " s/img, total read img time: "
<< total_imread_time_s
<< " s, average read img time: "
<< total_imread_time_s / imgs
<< " s, batch_size = "
<< FLAGS_batch_size
<< std::endl;
std::cout << "Total running time: " << total_running_time_s
<< " s, average running time: " << total_running_time_s / imgs
<< " s/img, total read img time: " << total_imread_time_s
<< " s, average read img time: " << total_imread_time_s / imgs
<< " s, batch_size = " << FLAGS_batch_size << std::endl;
return 0;
}
......@@ -13,19 +13,19 @@
// limitations under the License.
#include <glog/logging.h>
#include <omp.h>
#include <algorithm>
#include <chrono>
#include <chrono> // NOLINT
#include <fstream>
#include <iostream>
#include <string>
#include <vector>
#include <utility>
#include <omp.h>
#include "include/paddlex/paddlex.h"
#include "include/paddlex/visualize.h"
using namespace std::chrono;
using namespace std::chrono; // NOLINT
DEFINE_string(model_dir, "", "Path of inference model");
DEFINE_bool(use_gpu, false, "Infering with GPU or CPU");
......@@ -36,7 +36,9 @@ DEFINE_string(image, "", "Path of test image file");
DEFINE_string(image_list, "", "Path of test image list file");
DEFINE_string(save_dir, "output", "Path to save visualized image");
DEFINE_int32(batch_size, 1, "Batch size of infering");
DEFINE_int32(thread_num, omp_get_num_procs(), "Number of preprocessing threads");
DEFINE_int32(thread_num,
omp_get_num_procs(),
"Number of preprocessing threads");
int main(int argc, char** argv) {
// 解析命令行参数
......@@ -53,7 +55,12 @@ int main(int argc, char** argv) {
// 加载模型
PaddleX::Model model;
model.Init(FLAGS_model_dir, FLAGS_use_gpu, FLAGS_use_trt, FLAGS_gpu_id, FLAGS_key, FLAGS_batch_size);
model.Init(FLAGS_model_dir,
FLAGS_use_gpu,
FLAGS_use_trt,
FLAGS_gpu_id,
FLAGS_key,
FLAGS_batch_size);
double total_running_time_s = 0.0;
double total_imread_time_s = 0.0;
......@@ -72,25 +79,31 @@ int main(int argc, char** argv) {
image_paths.push_back(image_path);
}
imgs = image_paths.size();
for(int i = 0; i < image_paths.size(); i += FLAGS_batch_size){
for (int i = 0; i < image_paths.size(); i += FLAGS_batch_size) {
auto start = system_clock::now();
int im_vec_size = std::min((int)image_paths.size(), i + FLAGS_batch_size);
int im_vec_size =
std::min(static_cast<int>(image_paths.size()), i + FLAGS_batch_size);
std::vector<cv::Mat> im_vec(im_vec_size - i);
std::vector<PaddleX::SegResult> results(im_vec_size - i, PaddleX::SegResult());
std::vector<PaddleX::SegResult> results(im_vec_size - i,
PaddleX::SegResult());
int thread_num = std::min(FLAGS_thread_num, im_vec_size - i);
#pragma omp parallel for num_threads(thread_num)
for(int j = i; j < im_vec_size; ++j){
for (int j = i; j < im_vec_size; ++j) {
im_vec[j - i] = std::move(cv::imread(image_paths[j], 1));
}
auto imread_end = system_clock::now();
model.predict(im_vec, results, thread_num);
model.predict(im_vec, &results, thread_num);
auto imread_duration = duration_cast<microseconds>(imread_end - start);
total_imread_time_s += double(imread_duration.count()) * microseconds::period::num / microseconds::period::den;
total_imread_time_s += static_cast<double>(imread_duration.count()) *
microseconds::period::num /
microseconds::period::den;
auto end = system_clock::now();
auto duration = duration_cast<microseconds>(end - start);
total_running_time_s += double(duration.count()) * microseconds::period::num / microseconds::period::den;
total_running_time_s += static_cast<double>(duration.count()) *
microseconds::period::num /
microseconds::period::den;
// 可视化
for(int j = 0; j < im_vec_size - i; ++j) {
for (int j = 0; j < im_vec_size - i; ++j) {
cv::Mat vis_img =
PaddleX::Visualize(im_vec[j], results[j], model.labels, colormap);
std::string save_path =
......@@ -106,7 +119,9 @@ int main(int argc, char** argv) {
model.predict(im, &result);
auto end = system_clock::now();
auto duration = duration_cast<microseconds>(end - start);
total_running_time_s += double(duration.count()) * microseconds::period::num / microseconds::period::den;
total_running_time_s += static_cast<double>(duration.count()) *
microseconds::period::num /
microseconds::period::den;
// 可视化
cv::Mat vis_img = PaddleX::Visualize(im, result, model.labels, colormap);
std::string save_path =
......@@ -115,17 +130,11 @@ int main(int argc, char** argv) {
result.clear();
std::cout << "Visualized output saved as " << save_path << std::endl;
}
std::cout << "Total running time: "
<< total_running_time_s
<< " s, average running time: "
<< total_running_time_s / imgs
<< " s/img, total read img time: "
<< total_imread_time_s
<< " s, average read img time: "
<< total_imread_time_s / imgs
<< " s, batch_size = "
<< FLAGS_batch_size
<< std::endl;
std::cout << "Total running time: " << total_running_time_s
<< " s, average running time: " << total_running_time_s / imgs
<< " s/img, total read img time: " << total_imread_time_s
<< " s, average read img time: " << total_imread_time_s / imgs
<< " s, batch_size = " << FLAGS_batch_size << std::endl;
return 0;
}
......@@ -54,4 +54,4 @@ class ConfigPaser {
YAML::Node Transforms_;
};
} // namespace PaddleDetection
} // namespace PaddleX
......@@ -16,8 +16,11 @@
#include <functional>
#include <iostream>
#include <map>
#include <memory>
#include <numeric>
#include <string>
#include <vector>
#include "yaml-cpp/yaml.h"
#ifdef _WIN32
......@@ -28,21 +31,21 @@
#include "paddle_inference_api.h" // NOLINT
#include "config_parser.h"
#include "results.h"
#include "transforms.h"
#include "config_parser.h" // NOLINT
#include "results.h" // NOLINT
#include "transforms.h" // NOLINT
#ifdef WITH_ENCRYPTION
#include "paddle_model_decrypt.h"
#include "model_code.h"
#include "paddle_model_decrypt.h" // NOLINT
#include "model_code.h" // NOLINT
#endif
namespace PaddleX {
/*
* @brief
* This class encapsulates all necessary proccess steps of model infering, which
* include image matrix preprocessing, model predicting and results postprocessing.
* This class encapsulates all necessary proccess steps of model infering, which
* include image matrix preprocessing, model predicting and results postprocessing.
* The entire process of model infering can be simplified as below:
* 1. preprocess image matrix (resize, padding, ......)
* 2. model infer
......@@ -63,11 +66,11 @@ class Model {
/*
* @brief
* This method aims to initialize the model configuration
*
*
* @param model_dir: the directory which contains model.yml
* @param use_gpu: use gpu or not when infering
* @param use_trt: use Tensor RT or not when infering
* @param gpu_id: the id of gpu when infering with using gpu
* @param gpu_id: the id of gpu when infering with using gpu
* @param key: the key of encryption when using encrypted model
* @param batch_size: batch size of infering
* */
......@@ -76,7 +79,7 @@ class Model {
bool use_trt = false,
int gpu_id = 0,
std::string key = "",
int batch_size = 1) {
int batch_size = 1) {
create_predictor(model_dir, use_gpu, use_trt, gpu_id, key, batch_size);
}
......@@ -85,11 +88,11 @@ class Model {
bool use_trt = false,
int gpu_id = 0,
std::string key = "",
int batch_size = 1);
int batch_size = 1);
/*
* @brief
* This method aims to load model configurations which include
* @brief
* This method aims to load model configurations which include
* transform steps and label list
*
* @param model_dir: the directory which contains model.yml
......@@ -107,7 +110,7 @@ class Model {
* @return true if preprocess image matrix successfully
* */
bool preprocess(const cv::Mat& input_im, ImageBlob* blob);
/*
* @brief
* This method aims to transform mutiple image matrixs, the result will be
......@@ -115,15 +118,17 @@ class Model {
*
* @param input_im_batch: a batch of image matrixs to be transformed
* @param blob_blob: raw data of a batch of image matrixs after transformed
* @param thread_num: the number of preprocessing threads,
* @param thread_num: the number of preprocessing threads,
* each thread run preprocess on single image matrix
* @return true if preprocess a batch of image matrixs successfully
* */
bool preprocess(const std::vector<cv::Mat> &input_im_batch, std::vector<ImageBlob> &blob_batch, int thread_num = 1);
bool preprocess(const std::vector<cv::Mat> &input_im_batch,
std::vector<ImageBlob> *blob_batch,
int thread_num = 1);
/*
* @brief
* This method aims to execute classification model prediction on single image matrix,
* This method aims to execute classification model prediction on single image matrix,
* the result will be returned at second parameter.
*
* @param im: single image matrix to be predicted
......@@ -134,7 +139,7 @@ class Model {
/*
* @brief
* This method aims to execute classification model prediction on a batch of image matrixs,
* This method aims to execute classification model prediction on a batch of image matrixs,
* the result will be returned at second parameter.
*
* @param im: a batch of image matrixs to be predicted
......@@ -143,7 +148,9 @@ class Model {
* on single image matrix
* @return true if predict successfully
* */
bool predict(const std::vector<cv::Mat> &im_batch, std::vector<ClsResult> &results, int thread_num = 1);
bool predict(const std::vector<cv::Mat> &im_batch,
std::vector<ClsResult> *results,
int thread_num = 1);
/*
* @brief
......@@ -167,11 +174,13 @@ class Model {
* on single image matrix
* @return true if predict successfully
* */
bool predict(const std::vector<cv::Mat> &im_batch, std::vector<DetResult> &result, int thread_num = 1);
bool predict(const std::vector<cv::Mat> &im_batch,
std::vector<DetResult> *result,
int thread_num = 1);
/*
* @brief
* This method aims to execute segmentation model prediction on single image matrix,
* This method aims to execute segmentation model prediction on single image matrix,
* the result will be returned at second parameter.
*
* @param im: single image matrix to be predicted
......@@ -182,7 +191,7 @@ class Model {
/*
* @brief
* This method aims to execute segmentation model prediction on a batch of image matrix,
* This method aims to execute segmentation model prediction on a batch of image matrix,
* the result will be returned at second parameter.
*
* @param im: a batch of image matrix to be predicted
......@@ -191,8 +200,10 @@ class Model {
* on single image matrix
* @return true if predict successfully
* */
bool predict(const std::vector<cv::Mat> &im_batch, std::vector<SegResult> &result, int thread_num = 1);
bool predict(const std::vector<cv::Mat> &im_batch,
std::vector<SegResult> *result,
int thread_num = 1);
// model type, include 3 type: classifier, detector, segmenter
std::string type;
// model name, such as FasterRCNN, YOLOV3 and so on.
......@@ -209,4 +220,4 @@ class Model {
// a predictor which run the model predicting
std::unique_ptr<paddle::PaddlePredictor> predictor_;
};
} // namespce of PaddleX
} // namespace PaddleX
......@@ -66,7 +66,7 @@ class Transform {
* This method executes preprocessing operation on image matrix,
* result will be returned at second parameter.
* @param im: single image matrix to be preprocessed
* @param data: the raw data of single image matrix after preprocessed
* @param data: the raw data of single image matrix after preprocessed
* @return true if transform successfully
* */
virtual bool Run(cv::Mat* im, ImageBlob* data) = 0;
......@@ -92,10 +92,10 @@ class Normalize : public Transform {
/*
* @brief
* This class execute resize by short operation on image matrix. At first, it resizes
* This class execute resize by short operation on image matrix. At first, it resizes
* the short side of image matrix to specified length. Accordingly, the long side
* will be resized in the same proportion. If new length of long side exceeds max
* size, the long size will be resized to max size, and the short size will be
* size, the long size will be resized to max size, and the short size will be
* resized in the same proportion
* */
class ResizeByShort : public Transform {
......@@ -214,6 +214,7 @@ class Padding : public Transform {
}
}
virtual bool Run(cv::Mat* im, ImageBlob* data);
private:
int coarsest_stride_ = -1;
int width_ = 0;
......@@ -229,6 +230,7 @@ class Transforms {
void Init(const YAML::Node& node, bool to_rgb = true);
std::shared_ptr<Transform> CreateTransform(const std::string& name);
bool Run(cv::Mat* im, ImageBlob* data);
private:
std::vector<std::shared_ptr<Transform>> transforms_;
bool to_rgb_ = true;
......
......@@ -47,7 +47,7 @@ namespace PaddleX {
* @brief
* Generate visualization colormap for each class
*
* @param number of class
* @param number of class
* @return color map, the size of vector is 3 * num_class
* */
std::vector<int> GenerateColorMap(int num_class);
......@@ -94,4 +94,4 @@ cv::Mat Visualize(const cv::Mat& img,
* */
std::string generate_save_path(const std::string& save_dir,
const std::string& file_path);
} // namespce of PaddleX
} // namespace PaddleX
此差异已折叠。
......@@ -145,4 +145,4 @@ std::string generate_save_path(const std::string& save_dir,
std::string image_name(file_path.substr(pos + 1));
return save_dir + OS_PATH_SEP + image_name;
}
} // namespace of PaddleX
} // namespace PaddleX
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册