提交 5557a2f6 编写于 作者: W Wang Guibao 提交者: GitHub

Fix image classification bug (#6)


* Fix image classification bug
上级 7da09539
......@@ -15,7 +15,11 @@
#pragma once
#include <vector>
#ifdef BCLOUD
#ifdef WITH_GPU
#include "paddle/paddle_inference_api.h"
#else
#include "paddle/fluid/inference/api/paddle_inference_api.h"
#endif
#else
#include "paddle/fluid/inference/paddle_inference_api.h"
#endif
......
......@@ -63,9 +63,11 @@ int ReaderOp::inference() {
// tls resource assignment
size_t dense_capacity = 3 * resize.width * resize.height;
size_t len = dense_capacity * sizeof(float) * sample_size;
float* data =
reinterpret_cast<float*>(MempoolWrapper::instance().malloc(len));
if (data == NULL) {
// Allocate buffer in PaddleTensor, so that buffer will be managed by the Tensor
in_tensor.data.Resize(len);
float *data = reinterpret_cast<float *>(in_tensor.data.data());
if (in_tensor.data.data() == NULL) {
LOG(ERROR) << "Failed create temp float array, "
<< "size=" << dense_capacity * sample_size * sizeof(float);
return -1;
......@@ -144,9 +146,6 @@ int ReaderOp::inference() {
}
}
}
paddle::PaddleBuf pbuf(data, len);
in_tensor.data = pbuf;
in->push_back(in_tensor);
return 0;
......
......@@ -29,7 +29,11 @@
#include "opencv/highgui.h"
#ifdef BCLOUD
#ifdef WITH_GPU
#include "paddle/paddle_inference_api.h"
#else
#include "paddle/fluid/inference/api/paddle_inference_api.h"
#endif
#else
#include "paddle/fluid/inference/paddle_inference_api.h"
#endif
......
......@@ -15,7 +15,11 @@
#pragma once
#include <vector>
#ifdef BCLOUD
#ifdef WITH_GPU
#include "paddle/paddle_inference_api.h"
#else
#include "paddle/fluid/inference/api/paddle_inference_api.h"
#endif
#else
#include "paddle/fluid/inference/paddle_inference_api.h"
#endif
......
......@@ -22,7 +22,11 @@
#include "configure/include/configure_parser.h"
#include "configure/inferencer_configure.pb.h"
#ifdef BCLOUD
#ifdef WITH_GPU
#include "paddle/paddle_inference_api.h"
#else
#include "paddle/fluid/inference/api/paddle_inference_api.h"
#endif
#else
#include "paddle/fluid/inference/paddle_inference_api.h"
#endif
......
......@@ -133,8 +133,9 @@ class FluidGpuAnalysisCore : public FluidFamilyCore {
analysis_config.SetProgFile(data_path + "/__model__");
analysis_config.EnableUseGpu(100, FLAGS_gpuid);
analysis_config.SetCpuMathLibraryNumThreads(1);
analysis_config.EnableMemoryOptim(false, false);
analysis_config.SwitchSpecifyInputNames(true);
analysis_config.EnableMemoryOptim();
AutoLock lock(GlobalPaddleCreateMutex::instance());
_core =
paddle::CreatePaddlePredictor<paddle::AnalysisConfig>(analysis_config);
......@@ -191,7 +192,8 @@ class FluidGpuAnalysisDirCore : public FluidFamilyCore {
analysis_config.EnableUseGpu(100, FLAGS_gpuid);
analysis_config.SwitchSpecifyInputNames(true);
analysis_config.SetCpuMathLibraryNumThreads(1);
analysis_config.EnableMemoryOptim();
analysis_config.EnableMemoryOptim(false, false);
AutoLock lock(GlobalPaddleCreateMutex::instance());
_core =
paddle::CreatePaddlePredictor<paddle::AnalysisConfig>(analysis_config);
......@@ -498,7 +500,7 @@ class FluidGpuAnalysisDirWithSigmoidCore : public FluidGpuWithSigmoidCore {
analysis_config.EnableUseGpu(100, FLAGS_gpuid);
analysis_config.SwitchSpecifyInputNames(true);
analysis_config.SetCpuMathLibraryNumThreads(1);
analysis_config.EnableMemoryOptim();
analysis_config.EnableMemoryOptim(false, false);
AutoLock lock(GlobalPaddleCreateMutex::instance());
_core->_fluid_core =
paddle::CreatePaddlePredictor<paddle::AnalysisConfig>(analysis_config);
......
......@@ -51,6 +51,8 @@ using baidu::paddle_serving::predictor::FLAGS_port;
using baidu::paddle_serving::configure::InferServiceConf;
using baidu::paddle_serving::configure::read_proto_conf;
DECLARE_bool(logtostderr);
void print_revision(std::ostream& os, void*) {
#if defined(PDSERVING_VERSION)
os << PDSERVING_VERSION;
......
mkdir -p demo/serving/bin
mv bin/serving demo/serving/bin
mkdir -p demo/client/bin
mv bin/dense_format demo/client/bin/
mv bin/echo demo/client/bin
mv bin/int64tensor_format demo/client/bin
mv bin/sparse_format demo/client/bin
mv bin/text_classification demo/client/bin
mv bin/text_classification_press demo/client/bin
mv bin/ximage demo/client/bin
cp baidu_third-party_mklml/so/* demo/serving/bin/
rm -rf baidu_third-party_mklml
# Download test model and test dataset
pushd .
cd demo/client/data
mkdir -p text_classification
cd text_classification
wget --no-check-certificate https://paddle-serving.bj.bcebos.com/data/text_classification/test_set.tar.gz
tar zxvf test_set.tar.gz
popd
pushd .
cd demo/serving/
mkdir -p data/model/paddle/fluid/
cd data/model/paddle/fluid/
wget --no-check-certificate https://paddle-serving.bj.bcebos.com/data/text_classification/text_classification_lstm.tar.gz
tar zxvf text_classification_lstm.tar.gz
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册