未验证 提交 126d3d69 编写于 作者: S silingtong123 提交者: GitHub

support C++ inference shared library on windows (#24672)

* add SetCommandLineOption

* add the print_FLAGS function

* remove the test demo

* modify the location of macro

* add the 'WITH_STATIC_LIB' option on windows

* modify the macro of PD_INFER_DECL

* modify the the fuction name

* modify the unittest

* modify the code style
上级 23d253e1
......@@ -20,6 +20,8 @@ set(FLUID_INFERENCE_INSTALL_DIR "${CMAKE_BINARY_DIR}/fluid_inference_install_dir
"A path setting fluid inference shared and static libraries")
if(WIN32)
#todo: remove the option
option(WITH_STATIC_LIB "Compile demo with static/shared library, default use static." ON)
if(NOT PYTHON_EXECUTABLE)
FIND_PACKAGE(PythonInterp REQUIRED)
endif()
......@@ -109,7 +111,7 @@ function(copy_part_of_thrid_party TARGET DST)
copy(${TARGET}
SRCS ${XXHASH_INCLUDE_DIR} ${XXHASH_LIBRARIES}
DSTS ${dst_dir} ${dst_dir}/lib)
if (NOT PROTOBUF_FOUND OR WIN32)
set(dst_dir "${DST}/third_party/install/protobuf")
copy(${TARGET}
......@@ -152,14 +154,26 @@ copy_part_of_thrid_party(inference_lib_dist ${FLUID_INFERENCE_INSTALL_DIR})
set(src_dir "${PADDLE_SOURCE_DIR}/paddle/fluid")
if(WIN32)
set(paddle_fluid_lib ${PADDLE_BINARY_DIR}/paddle/fluid/inference/${CMAKE_BUILD_TYPE}/*paddle_fluid.*)
if(WITH_STATIC_LIB)
set(paddle_fluid_lib ${PADDLE_BINARY_DIR}/paddle/fluid/inference/${CMAKE_BUILD_TYPE}/libpaddle_fluid.lib)
else()
set(paddle_fluid_lib ${PADDLE_BINARY_DIR}/paddle/fluid/inference/${CMAKE_BUILD_TYPE}/paddle_fluid.dll
${PADDLE_BINARY_DIR}/paddle/fluid/inference/${CMAKE_BUILD_TYPE}/paddle_fluid.lib)
endif()
else(WIN32)
set(paddle_fluid_lib ${PADDLE_BINARY_DIR}/paddle/fluid/inference/libpaddle_fluid.*)
endif(WIN32)
copy(inference_lib_dist
SRCS ${src_dir}/inference/api/paddle_*.h ${paddle_fluid_lib}
DSTS ${FLUID_INFERENCE_INSTALL_DIR}/paddle/include ${FLUID_INFERENCE_INSTALL_DIR}/paddle/lib)
if(WIN32 AND NOT WITH_STATIC_LIB)
copy(inference_lib_dist
SRCS ${src_dir}/inference/api/paddle_*.h ${paddle_fluid_lib}
DSTS ${FLUID_INFERENCE_INSTALL_DIR}/paddle/include ${FLUID_INFERENCE_INSTALL_DIR}/paddle/lib
${FLUID_INFERENCE_INSTALL_DIR}/paddle/lib)
else()
copy(inference_lib_dist
SRCS ${src_dir}/inference/api/paddle_*.h ${paddle_fluid_lib}
DSTS ${FLUID_INFERENCE_INSTALL_DIR}/paddle/include ${FLUID_INFERENCE_INSTALL_DIR}/paddle/lib)
endif()
copy(inference_lib_dist
SRCS ${CMAKE_BINARY_DIR}/paddle/fluid/framework/framework.pb.h
......@@ -187,10 +201,17 @@ add_custom_target(fluid_lib_dist ALL DEPENDS ${fluid_lib_deps})
set(dst_dir "${FLUID_INSTALL_DIR}/paddle/fluid")
set(module "inference")
copy(fluid_lib_dist
SRCS ${src_dir}/${module}/*.h ${src_dir}/${module}/api/paddle_*.h ${paddle_fluid_lib}
DSTS ${dst_dir}/${module} ${dst_dir}/${module} ${dst_dir}/${module}
)
if(WIN32 AND NOT WITH_STATIC_LIB)
copy(fluid_lib_dist
SRCS ${src_dir}/${module}/*.h ${src_dir}/${module}/api/paddle_*.h ${paddle_fluid_lib}
DSTS ${dst_dir}/${module} ${dst_dir}/${module} ${dst_dir}/${module} ${dst_dir}/${module}
)
else()
copy(fluid_lib_dist
SRCS ${src_dir}/${module}/*.h ${src_dir}/${module}/api/paddle_*.h ${paddle_fluid_lib}
DSTS ${dst_dir}/${module} ${dst_dir}/${module} ${dst_dir}/${module}
)
endif()
set(module "framework")
set(framework_lib_deps framework_proto data_feed_proto trainer_desc_proto)
......
......@@ -17,6 +17,10 @@ if(WITH_TESTING)
include(tests/test.cmake) # some generic cmake function for inference
endif()
if(WIN32)
add_definitions(-DPADDLE_DLL_INFERENCE)
endif()
# TODO(panyx0718): Should this be called paddle_fluid_inference_api_internal?
cc_library(paddle_fluid_api
SRCS io.cc
......@@ -81,6 +85,9 @@ cc_library(paddle_fluid_shared SHARED SRCS ${SHARED_INFERENCE_SRCS}
get_property(os_dependency_modules GLOBAL PROPERTY OS_DEPENDENCY_MODULES)
target_link_libraries(paddle_fluid_shared ${os_dependency_modules})
if(WIN32)
target_link_libraries(paddle_fluid_shared gflags)
endif()
set_target_properties(paddle_fluid_shared PROPERTIES OUTPUT_NAME paddle_fluid)
if(NOT APPLE AND NOT WIN32)
......
......@@ -33,6 +33,10 @@ cc_library(paddle_pass_builder SRCS paddle_pass_builder.cc)
cc_library(paddle_inference_api SRCS api.cc api_impl.cc helper.cc DEPS lod_tensor scope reset_tensor_array
analysis_config zero_copy_tensor trainer_desc_proto)
if(WIN32)
target_link_libraries(paddle_inference_api gflags)
endif()
set(inference_deps ${analysis_deps} paddle_inference_api analysis naive_executor ${GLOB_PASS_LIB})
if(WITH_GPU AND TENSORRT_FOUND)
......
......@@ -119,4 +119,12 @@ std::string get_version() {
return ss.str();
}
#if defined(_WIN32) && defined(PADDLE_ON_INFERENCE)
std::string UpdateDllFlag(const char *name, const char *value) {
return google::SetCommandLineOption(name, value);
}
#endif
} // namespace paddle
......@@ -7,6 +7,10 @@ option(USE_TENSORRT "Compile demo with TensorRT." OFF)
if(NOT WITH_STATIC_LIB)
add_definitions("-DPADDLE_WITH_SHARED_LIB")
else()
# PD_INFER_DECL is mainly used to set the dllimport/dllexport attribute in dynamic library mode.
# Set it to empty in static library mode to avoid compilation issues.
add_definitions("/DPD_INFER_DECL=")
endif()
macro(safe_set_static_flag)
......@@ -173,8 +177,8 @@ if(WIN32)
)
endif()
if(NOT WITH_STATIC_LIB)
add_custom_command(TARGET ${DEMO_NAME} POST_BUILD
COMMAND ${CMAKE_COMMAND} -E copy ${PADDLE_LIB}/paddle/lib/paddle_fluid${CMAKE_SHARED_LIBRARY_SUFFIX} ${CMAKE_BINARY_DIR}/Release
)
add_custom_command(TARGET ${DEMO_NAME} POST_BUILD
COMMAND ${CMAKE_COMMAND} -E copy "${PADDLE_LIB}/paddle/lib/paddle_fluid.dll" ${CMAKE_BINARY_DIR}/${CMAKE_BUILD_TYPE}
)
endif()
endif()
......@@ -145,27 +145,35 @@ void ZeroCopyTensor::copy_to_cpu(T *data) {
#endif
}
}
template void ZeroCopyTensor::copy_from_cpu<float>(const float *data);
template void ZeroCopyTensor::copy_from_cpu<int64_t>(const int64_t *data);
template void ZeroCopyTensor::copy_from_cpu<int32_t>(const int32_t *data);
template void ZeroCopyTensor::copy_from_cpu<uint8_t>(const uint8_t *data);
template void ZeroCopyTensor::copy_to_cpu<float>(float *data);
template void ZeroCopyTensor::copy_to_cpu<int64_t>(int64_t *data);
template void ZeroCopyTensor::copy_to_cpu<int32_t>(int32_t *data);
template void ZeroCopyTensor::copy_to_cpu<uint8_t>(uint8_t *data);
template float *ZeroCopyTensor::data<float>(PaddlePlace *place,
int *size) const;
template int64_t *ZeroCopyTensor::data<int64_t>(PaddlePlace *place,
int *size) const;
template int32_t *ZeroCopyTensor::data<int32_t>(PaddlePlace *place,
int *size) const;
template uint8_t *ZeroCopyTensor::data<uint8_t>(PaddlePlace *place,
int *size) const;
template float *ZeroCopyTensor::mutable_data<float>(PaddlePlace place);
template int64_t *ZeroCopyTensor::mutable_data<int64_t>(PaddlePlace place);
template int32_t *ZeroCopyTensor::mutable_data<int32_t>(PaddlePlace place);
template uint8_t *ZeroCopyTensor::mutable_data<uint8_t>(PaddlePlace place);
template PD_INFER_DECL void ZeroCopyTensor::copy_from_cpu<float>(
const float *data);
template PD_INFER_DECL void ZeroCopyTensor::copy_from_cpu<int64_t>(
const int64_t *data);
template PD_INFER_DECL void ZeroCopyTensor::copy_from_cpu<int32_t>(
const int32_t *data);
template PD_INFER_DECL void ZeroCopyTensor::copy_from_cpu<uint8_t>(
const uint8_t *data);
template PD_INFER_DECL void ZeroCopyTensor::copy_to_cpu<float>(float *data);
template PD_INFER_DECL void ZeroCopyTensor::copy_to_cpu<int64_t>(int64_t *data);
template PD_INFER_DECL void ZeroCopyTensor::copy_to_cpu<int32_t>(int32_t *data);
template PD_INFER_DECL void ZeroCopyTensor::copy_to_cpu<uint8_t>(uint8_t *data);
template PD_INFER_DECL float *ZeroCopyTensor::data<float>(PaddlePlace *place,
int *size) const;
template PD_INFER_DECL int64_t *ZeroCopyTensor::data<int64_t>(
PaddlePlace *place, int *size) const;
template PD_INFER_DECL int32_t *ZeroCopyTensor::data<int32_t>(
PaddlePlace *place, int *size) const;
template PD_INFER_DECL uint8_t *ZeroCopyTensor::data<uint8_t>(
PaddlePlace *place, int *size) const;
template PD_INFER_DECL float *ZeroCopyTensor::mutable_data<float>(
PaddlePlace place);
template PD_INFER_DECL int64_t *ZeroCopyTensor::mutable_data<int64_t>(
PaddlePlace place);
template PD_INFER_DECL int32_t *ZeroCopyTensor::mutable_data<int32_t>(
PaddlePlace place);
template PD_INFER_DECL uint8_t *ZeroCopyTensor::mutable_data<uint8_t>(
PaddlePlace place);
void *ZeroCopyTensor::FindTensor() const {
PADDLE_ENFORCE(!name_.empty(),
......
......@@ -28,10 +28,10 @@ T *ZeroCopyTensor::data(PaddlePlace *place, int *size) const {
return nullptr;
}
template float *ZeroCopyTensor::data<float>(PaddlePlace *place,
int *size) const;
template int64_t *ZeroCopyTensor::data<int64_t>(PaddlePlace *place,
int *size) const;
template PD_INFER_DECL float *ZeroCopyTensor::data<float>(PaddlePlace *place,
int *size) const;
template PD_INFER_DECL int64_t *ZeroCopyTensor::data<int64_t>(
PaddlePlace *place, int *size) const;
template float *ZeroCopyTensor::mutable_data(PaddlePlace place);
template int64_t *ZeroCopyTensor::mutable_data(PaddlePlace place);
......
......@@ -31,9 +31,9 @@
#include <unordered_set>
#include <utility>
#include <vector>
#include "paddle_infer_declare.h" // NOLINT
/*! \file */
// Here we include some header files with relative paths, for that in deploy,
// the abstract path of this header file will be changed.
#include "paddle_api.h" // NOLINT
......@@ -60,7 +60,7 @@ struct MkldnnQuantizerConfig;
/// AnalysisConfig,
/// and loading it into AnalysisPredictor.
///
struct AnalysisConfig {
struct PD_INFER_DECL AnalysisConfig {
AnalysisConfig() = default;
///
/// \brief Construct a new AnalysisConfig from another
......
......@@ -27,6 +27,8 @@
#include <memory>
#include <string>
#include <vector>
#include "gflags/gflags.h" // NOLINT
#include "paddle_infer_declare.h" // NOLINT
/*! \namespace paddle
*/
......@@ -86,7 +88,7 @@ enum PaddleDType {
/// delete[] external_memory; // manage the memory lifetime outside.
/// \endcode
///
class PaddleBuf {
class PD_INFER_DECL PaddleBuf {
public:
///
/// \brief PaddleBuf allocate memory internally, and manage it.
......@@ -151,7 +153,7 @@ class PaddleBuf {
///
/// \brief Basic input and output data structure for PaddlePredictor.
///
struct PaddleTensor {
struct PD_INFER_DECL PaddleTensor {
PaddleTensor() = default;
std::string name; ///< variable name.
std::vector<int> shape;
......@@ -170,7 +172,7 @@ enum class PaddlePlace { kUNK = -1, kCPU, kGPU };
/// AnalysisPredictor.
/// It is obtained through PaddlePredictor::GetinputTensor()
/// and PaddlePredictor::GetOutputTensor() interface.
class ZeroCopyTensor {
class PD_INFER_DECL ZeroCopyTensor {
public:
/// \brief Reset the shape of the tensor.
/// Generally it's only used for the input tensor.
......@@ -247,7 +249,7 @@ class ZeroCopyTensor {
/// \brief A Predictor for executing inference on a model.
/// Base class for AnalysisPredictor and NativePaddlePredictor.
class PaddlePredictor {
class PD_INFER_DECL PaddlePredictor {
public:
struct Config;
PaddlePredictor() = default;
......@@ -339,7 +341,7 @@ class PaddlePredictor {
/// During inference procedure, there are many parameters(model/params path,
/// place of inference, etc.)
///
struct NativeConfig : public PaddlePredictor::Config {
struct PD_INFER_DECL NativeConfig : public PaddlePredictor::Config {
/// GPU related fields.
bool use_gpu{false};
int device{0};
......@@ -388,6 +390,22 @@ struct NativeConfig : public PaddlePredictor::Config {
template <typename ConfigT>
std::unique_ptr<PaddlePredictor> CreatePaddlePredictor(const ConfigT& config);
struct AnalysisConfig;
struct NativeConfig;
struct DemoConfig;
template <>
PD_INFER_DECL std::unique_ptr<PaddlePredictor>
CreatePaddlePredictor<AnalysisConfig>(const AnalysisConfig& config);
template <>
PD_INFER_DECL std::unique_ptr<PaddlePredictor>
CreatePaddlePredictor<NativeConfig>(const NativeConfig& config);
template <>
PD_INFER_DECL std::unique_ptr<PaddlePredictor>
CreatePaddlePredictor<DemoConfig>(const DemoConfig& config);
/// NOTE The following APIs are too trivial, we will discard it in the following
/// versions.
///
......@@ -400,8 +418,20 @@ enum class PaddleEngineKind {
template <typename ConfigT, PaddleEngineKind engine>
std::unique_ptr<PaddlePredictor> CreatePaddlePredictor(const ConfigT& config);
int PaddleDtypeSize(PaddleDType dtype);
template <>
PD_INFER_DECL std::unique_ptr<PaddlePredictor> CreatePaddlePredictor<
NativeConfig, PaddleEngineKind::kNative>(const NativeConfig& config);
template <>
PD_INFER_DECL std::unique_ptr<PaddlePredictor> CreatePaddlePredictor<
AnalysisConfig, PaddleEngineKind::kAnalysis>(const AnalysisConfig& config);
PD_INFER_DECL int PaddleDtypeSize(PaddleDType dtype);
PD_INFER_DECL std::string get_version();
std::string get_version();
#if defined(_WIN32) && defined(PADDLE_ON_INFERENCE)
PD_INFER_DECL std::string UpdateDllFlag(const char* name, const char* value);
#endif
} // namespace paddle
// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#if defined(_WIN32)
#ifndef PD_INFER_DECL
#ifdef PADDLE_DLL_INFERENCE
#define PD_INFER_DECL __declspec(dllexport)
#else
#define PD_INFER_DECL __declspec(dllimport)
#endif // PADDLE_DLL_INFERENCE
#endif // PD_INFER_DECL
#else
#define PD_INFER_DECL __attribute__((visibility("default")))
#endif // _WIN32
......@@ -31,7 +31,8 @@
#include <unordered_set>
#include <vector>
#include "paddle_api.h" // NOLINT
#include "paddle_api.h" // NOLINT
#include "paddle_infer_declare.h" // NOLINT
namespace paddle {
......@@ -59,7 +60,7 @@ enum class ScaleAlgo {
/// It is not recommended to use this config directly, please refer to
/// AnalysisConfig::mkldnn_quantizer_config()
///
struct MkldnnQuantizerConfig {
struct PD_INFER_DECL MkldnnQuantizerConfig {
///
/// \brief Construct a new Mkldnn Quantizer Config object
///
......
......@@ -17,6 +17,7 @@
#include <sstream>
#include <string>
#include <vector>
#include "paddle_infer_declare.h" // NOLINT
///
/// \file paddle_pass_builder.h
......@@ -43,7 +44,7 @@ namespace paddle {
/// const vector<string> passes(1, "conv_relu_mkldnn_fuse_pass");
/// PaddlePassBuilder builder(passes);
/// \endcode
class PaddlePassBuilder {
class PD_INFER_DECL PaddlePassBuilder {
public:
/// \brief Constructor of the class. It stores the input passes.
/// \param[in] passes passes' types.
......@@ -113,7 +114,7 @@ class PaddlePassBuilder {
/// \class PassStrategy
/// \brief This class defines the pass strategies like whether to use gpu/cuDNN
/// kernel/MKLDNN.
class PassStrategy : public PaddlePassBuilder {
class PD_INFER_DECL PassStrategy : public PaddlePassBuilder {
public:
/// \brief Constructor of PassStrategy class. It works the same as
/// PaddlePassBuilder class. \param[in] passes passes' types.
......@@ -148,7 +149,7 @@ class PassStrategy : public PaddlePassBuilder {
/// \class CpuPassStrategy
/// \brief The CPU passes controller, it is used in AnalysisPredictor with CPU
/// mode.
class CpuPassStrategy : public PassStrategy {
class PD_INFER_DECL CpuPassStrategy : public PassStrategy {
public:
/// \brief Default constructor of CpuPassStrategy.
CpuPassStrategy();
......@@ -182,7 +183,7 @@ class CpuPassStrategy : public PassStrategy {
/// \class GpuPassStrategy
/// \brief The GPU passes controller, it is used in AnalysisPredictor with GPU
/// mode.
class GpuPassStrategy : public PassStrategy {
class PD_INFER_DECL GpuPassStrategy : public PassStrategy {
public:
/// \brief Default constructor of GpuPassStrategy.
GpuPassStrategy();
......@@ -212,10 +213,11 @@ class GpuPassStrategy : public PassStrategy {
bool use_cudnn_{false};
/// \endcond
};
/// \brief List of tensorRT subgraph passes.
extern const std::vector<std::string> kTRTSubgraphPasses;
PD_INFER_DECL extern const std::vector<std::string> kTRTSubgraphPasses;
/// \brief List of lite subgraph passes.
extern const std::vector<std::string> kLiteSubgraphPasses;
PD_INFER_DECL extern const std::vector<std::string> kLiteSubgraphPasses;
} // namespace paddle
......@@ -19,11 +19,11 @@
#include <stdio.h>
#if defined(_WIN32)
#ifdef PADDLE_ON_INFERENCE
#ifdef PADDLE_DLL_INFERENCE
#define PADDLE_CAPI_EXPORT __declspec(dllexport)
#else
#define PADDLE_CAPI_EXPORT __declspec(dllimport)
#endif // PADDLE_ON_INFERENCE
#endif // PADDLE_DLL_INFERENCE
#else
#define PADDLE_CAPI_EXPORT __attribute__((visibility("default")))
#endif // _WIN32
......
......@@ -441,6 +441,14 @@ inference_analysis_test(test_analyzer_capi_pd_tensor SRCS analyzer_capi_pd_tenso
EXTRA_DEPS ${INFERENCE_EXTRA_DEPS} paddle_fluid_c
ARGS --infer_model=${MOBILENET_INSTALL_DIR}/model)
inference_analysis_test(test_analyzer_zerocopytensor_tensor SRCS analyzer_zerocopy_tensor_tester.cc
EXTRA_DEPS ${INFERENCE_EXTRA_DEPS}
ARGS --infer_model=${OCR_INSTALL_DIR}/model)
inference_analysis_test(test_analyzer_paddletensor_tensor SRCS analyzer_paddle_tensor_tester.cc
EXTRA_DEPS ${INFERENCE_EXTRA_DEPS}
ARGS --infer_model=${OCR_INSTALL_DIR}/model --infer_data=${OCR_INSTALL_DIR}/data.txt --refer_result=${OCR_INSTALL_DIR}/result.txt)
if(WITH_MKLDNN)
inference_analysis_test(test_analyzer_capi_int SRCS analyzer_capi_int_tester.cc
EXTRA_DEPS ${INFERENCE_EXTRA_DEPS} paddle_fluid_c
......
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/fluid/framework/block_desc.h"
#include "paddle/fluid/framework/op_desc.h"
#include "paddle/fluid/framework/program_desc.h"
#include "paddle/fluid/framework/scope.h"
#include "paddle/fluid/inference/utils/singleton.h"
#include "paddle/fluid/inference/tests/api/tester_helper.h"
namespace paddle {
namespace inference {
struct Record {
std::vector<float> data;
std::vector<int32_t> shape;
};
Record ProcessALine(const std::string &line) {
VLOG(3) << "process a line";
std::vector<std::string> columns;
split(line, '\t', &columns);
CHECK_EQ(columns.size(), 2UL)
<< "data format error, should be <data>\t<shape>";
Record record;
std::vector<std::string> data_strs;
split(columns[0], ' ', &data_strs);
for (auto &d : data_strs) {
record.data.push_back(std::stof(d));
}
std::vector<std::string> shape_strs;
split(columns[1], ' ', &shape_strs);
for (auto &s : shape_strs) {
record.shape.push_back(std::stoi(s));
}
VLOG(3) << "data size " << record.data.size();
VLOG(3) << "data shape size " << record.shape.size();
return record;
}
TEST(test_paddle_tensor, paddle_tensor) {
std::unique_ptr<PaddlePredictor> predictor, analysis_predictor;
AnalysisConfig config;
const std::vector<std::string> passes;
PaddlePassBuilder testPassBuilder(passes);
config.SetModel(FLAGS_infer_model + "/__model__",
FLAGS_infer_model + "/__params__");
predictor = CreatePaddlePredictor<NativeConfig>(config.ToNativeConfig());
analysis_predictor = CreatePaddlePredictor(config);
// Just a single batch of data.
std::string line;
std::ifstream file(FLAGS_infer_data);
std::getline(file, line);
auto record = ProcessALine(line);
file.close();
// Inference.
PaddleTensor input;
input.shape = record.shape;
input.data =
PaddleBuf(record.data.data(), record.data.size() * sizeof(float));
input.dtype = PaddleDType::FLOAT32;
std::vector<PaddleTensor> output, analysis_output;
predictor->Run({input}, &output, 1);
analysis_predictor->Run({input}, &analysis_output, 1);
}
} // namespace inference
} // namespace paddle
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/fluid/framework/block_desc.h"
#include "paddle/fluid/framework/op_desc.h"
#include "paddle/fluid/framework/program_desc.h"
#include "paddle/fluid/framework/scope.h"
#include "paddle/fluid/inference/utils/singleton.h"
#include "paddle/fluid/inference/tests/api/tester_helper.h"
namespace paddle {
namespace inference {
TEST(test_zerocopy_tensor, zerocopy_tensor) {
AnalysisConfig config;
config.SetModel(FLAGS_infer_model + "/__model__",
FLAGS_infer_model + "/__params__");
config.SwitchUseFeedFetchOps(false);
auto predictor = CreatePaddlePredictor(config);
int batch_size = 1;
int channels = 1;
int height = 48;
int width = 512;
int nums = batch_size * channels * height * width;
float* input = new float[nums];
for (int i = 0; i < nums; ++i) input[i] = 0;
auto input_names = predictor->GetInputNames();
PaddlePlace p = PaddlePlace::kCPU;
PaddlePlace* place = &p;
int size;
auto input_t = predictor->GetInputTensor(input_names[0]);
input_t->Reshape({batch_size, channels, height, width});
input_t->copy_from_cpu<float>(input);
input_t->data<float>(place, &size);
input_t->mutable_data<float>(p);
predictor->ZeroCopyRun();
std::vector<float> out_data;
auto output_names = predictor->GetOutputNames();
auto output_t = predictor->GetOutputTensor(output_names[0]);
std::vector<int> output_shape = output_t->shape();
int out_num = std::accumulate(output_shape.begin(), output_shape.end(), 1,
std::multiplies<int>());
out_data.resize(out_num);
output_t->copy_to_cpu<float>(out_data.data());
}
} // namespace inference
} // namespace paddle
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册