From 5557a2f6f98a218466cd20952da57c013f9f2ff0 Mon Sep 17 00:00:00 2001 From: Wang Guibao Date: Tue, 21 May 2019 17:45:51 +0800 Subject: [PATCH] Fix image classification bug (#6) * Fix image classification bug --- demo-serving/op/classify_op.h | 4 +++ demo-serving/op/reader_op.cpp | 11 +++---- demo-serving/op/reader_op.h | 4 +++ demo-serving/op/text_classification_op.h | 4 +++ .../include/fluid_cpu_engine.h | 4 +++ .../include/fluid_gpu_engine.h | 8 +++-- predictor/src/pdserving.cpp | 2 ++ release.bcloud | 31 ------------------- 8 files changed, 28 insertions(+), 40 deletions(-) delete mode 100644 release.bcloud diff --git a/demo-serving/op/classify_op.h b/demo-serving/op/classify_op.h index a93a6b2d..366793cc 100644 --- a/demo-serving/op/classify_op.h +++ b/demo-serving/op/classify_op.h @@ -15,7 +15,11 @@ #pragma once #include #ifdef BCLOUD +#ifdef WITH_GPU +#include "paddle/paddle_inference_api.h" +#else #include "paddle/fluid/inference/api/paddle_inference_api.h" +#endif #else #include "paddle/fluid/inference/paddle_inference_api.h" #endif diff --git a/demo-serving/op/reader_op.cpp b/demo-serving/op/reader_op.cpp index d50c9ebd..c57e1519 100644 --- a/demo-serving/op/reader_op.cpp +++ b/demo-serving/op/reader_op.cpp @@ -63,9 +63,11 @@ int ReaderOp::inference() { // tls resource assignment size_t dense_capacity = 3 * resize.width * resize.height; size_t len = dense_capacity * sizeof(float) * sample_size; - float* data = - reinterpret_cast(MempoolWrapper::instance().malloc(len)); - if (data == NULL) { + + // Allocate buffer in PaddleTensor, so that buffer will be managed by the Tensor + in_tensor.data.Resize(len); + float *data = reinterpret_cast(in_tensor.data.data()); + if (in_tensor.data.data() == NULL) { LOG(ERROR) << "Failed create temp float array, " << "size=" << dense_capacity * sample_size * sizeof(float); return -1; @@ -144,9 +146,6 @@ int ReaderOp::inference() { } } } - paddle::PaddleBuf pbuf(data, len); - in_tensor.data = pbuf; - in->push_back(in_tensor); return 0; diff --git a/demo-serving/op/reader_op.h b/demo-serving/op/reader_op.h index 6b47f8e0..484d6f62 100644 --- a/demo-serving/op/reader_op.h +++ b/demo-serving/op/reader_op.h @@ -29,7 +29,11 @@ #include "opencv/highgui.h" #ifdef BCLOUD +#ifdef WITH_GPU +#include "paddle/paddle_inference_api.h" +#else #include "paddle/fluid/inference/api/paddle_inference_api.h" +#endif #else #include "paddle/fluid/inference/paddle_inference_api.h" #endif diff --git a/demo-serving/op/text_classification_op.h b/demo-serving/op/text_classification_op.h index f98bd204..bef8ec52 100644 --- a/demo-serving/op/text_classification_op.h +++ b/demo-serving/op/text_classification_op.h @@ -15,7 +15,11 @@ #pragma once #include #ifdef BCLOUD +#ifdef WITH_GPU +#include "paddle/paddle_inference_api.h" +#else #include "paddle/fluid/inference/api/paddle_inference_api.h" +#endif #else #include "paddle/fluid/inference/paddle_inference_api.h" #endif diff --git a/inferencer-fluid-cpu/include/fluid_cpu_engine.h b/inferencer-fluid-cpu/include/fluid_cpu_engine.h index 82b924ac..24109ef0 100644 --- a/inferencer-fluid-cpu/include/fluid_cpu_engine.h +++ b/inferencer-fluid-cpu/include/fluid_cpu_engine.h @@ -22,7 +22,11 @@ #include "configure/include/configure_parser.h" #include "configure/inferencer_configure.pb.h" #ifdef BCLOUD +#ifdef WITH_GPU +#include "paddle/paddle_inference_api.h" +#else #include "paddle/fluid/inference/api/paddle_inference_api.h" +#endif #else #include "paddle/fluid/inference/paddle_inference_api.h" #endif diff --git a/inferencer-fluid-gpu/include/fluid_gpu_engine.h b/inferencer-fluid-gpu/include/fluid_gpu_engine.h index 455ee903..b0778930 100644 --- a/inferencer-fluid-gpu/include/fluid_gpu_engine.h +++ b/inferencer-fluid-gpu/include/fluid_gpu_engine.h @@ -133,8 +133,9 @@ class FluidGpuAnalysisCore : public FluidFamilyCore { analysis_config.SetProgFile(data_path + "/__model__"); analysis_config.EnableUseGpu(100, FLAGS_gpuid); analysis_config.SetCpuMathLibraryNumThreads(1); + analysis_config.EnableMemoryOptim(false, false); analysis_config.SwitchSpecifyInputNames(true); - analysis_config.EnableMemoryOptim(); + AutoLock lock(GlobalPaddleCreateMutex::instance()); _core = paddle::CreatePaddlePredictor(analysis_config); @@ -191,7 +192,8 @@ class FluidGpuAnalysisDirCore : public FluidFamilyCore { analysis_config.EnableUseGpu(100, FLAGS_gpuid); analysis_config.SwitchSpecifyInputNames(true); analysis_config.SetCpuMathLibraryNumThreads(1); - analysis_config.EnableMemoryOptim(); + analysis_config.EnableMemoryOptim(false, false); + AutoLock lock(GlobalPaddleCreateMutex::instance()); _core = paddle::CreatePaddlePredictor(analysis_config); @@ -498,7 +500,7 @@ class FluidGpuAnalysisDirWithSigmoidCore : public FluidGpuWithSigmoidCore { analysis_config.EnableUseGpu(100, FLAGS_gpuid); analysis_config.SwitchSpecifyInputNames(true); analysis_config.SetCpuMathLibraryNumThreads(1); - analysis_config.EnableMemoryOptim(); + analysis_config.EnableMemoryOptim(false, false); AutoLock lock(GlobalPaddleCreateMutex::instance()); _core->_fluid_core = paddle::CreatePaddlePredictor(analysis_config); diff --git a/predictor/src/pdserving.cpp b/predictor/src/pdserving.cpp index c9c7be3f..be7f9887 100644 --- a/predictor/src/pdserving.cpp +++ b/predictor/src/pdserving.cpp @@ -51,6 +51,8 @@ using baidu::paddle_serving::predictor::FLAGS_port; using baidu::paddle_serving::configure::InferServiceConf; using baidu::paddle_serving::configure::read_proto_conf; +DECLARE_bool(logtostderr); + void print_revision(std::ostream& os, void*) { #if defined(PDSERVING_VERSION) os << PDSERVING_VERSION; diff --git a/release.bcloud b/release.bcloud deleted file mode 100644 index 3f76ab21..00000000 --- a/release.bcloud +++ /dev/null @@ -1,31 +0,0 @@ -mkdir -p demo/serving/bin -mv bin/serving demo/serving/bin - -mkdir -p demo/client/bin -mv bin/dense_format demo/client/bin/ -mv bin/echo demo/client/bin -mv bin/int64tensor_format demo/client/bin -mv bin/sparse_format demo/client/bin -mv bin/text_classification demo/client/bin -mv bin/text_classification_press demo/client/bin -mv bin/ximage demo/client/bin - -cp baidu_third-party_mklml/so/* demo/serving/bin/ -rm -rf baidu_third-party_mklml - -# Download test model and test dataset -pushd . -cd demo/client/data -mkdir -p text_classification -cd text_classification -wget --no-check-certificate https://paddle-serving.bj.bcebos.com/data/text_classification/test_set.tar.gz -tar zxvf test_set.tar.gz - -popd - -pushd . -cd demo/serving/ -mkdir -p data/model/paddle/fluid/ -cd data/model/paddle/fluid/ -wget --no-check-certificate https://paddle-serving.bj.bcebos.com/data/text_classification/text_classification_lstm.tar.gz -tar zxvf text_classification_lstm.tar.gz -- GitLab