diff --git a/CMakeLists.txt b/CMakeLists.txt index 4861e8065a8a70bcc3b859b121cf404461f47ba7..0ab80987b3ad6c4793ceeac1bf3808d2e87fbd5b 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -282,7 +282,3 @@ if(WITH_DOC) find_python_module(recommonmark REQUIRED) add_subdirectory(doc) endif() - -if (WITH_CONTRIB) - add_subdirectory(paddle/contrib) -endif() diff --git a/cmake/inference_lib.cmake b/cmake/inference_lib.cmake index c6979713231f631f8757e4139d6f685d4554b54e..e2c58cd56055455e7fedc598ca8f56183d4b51dc 100644 --- a/cmake/inference_lib.cmake +++ b/cmake/inference_lib.cmake @@ -138,25 +138,24 @@ copy(memory_lib set(inference_deps paddle_fluid_shared paddle_fluid) -if(WITH_CONTRIB) - message(STATUS "installing contrib") - set(contrib_dst_dir "${FLUID_INSTALL_DIR}/contrib/inference") - if (WITH_ANAKIN AND WITH_GPU) - copy(contrib_anakin_inference_lib DEPS paddle_inference_api inference_anakin_api - SRCS - ${PADDLE_BINARY_DIR}/paddle/contrib/inference/libinference_anakin_api* # compiled anakin api - ${PADDLE_BINARY_DIR}/third_party/install/anakin/*.tar.gz # anakin release - DSTS ${contrib_dst_dir}/anakin ${contrib_dst_dir}/anakin) - list(APPEND inference_deps contrib_anakin_inference_lib) - endif() - - copy(contrib_inference_lib DEPS paddle_inference_api paddle_inference_api_shared - SRCS ${PADDLE_SOURCE_DIR}/paddle/contrib/inference/paddle_inference_api.h - ${PADDLE_BINARY_DIR}/paddle/contrib/inference/libpaddle_inference_api* - DSTS ${contrib_dst_dir} ${contrib_dst_dir}) - list(APPEND inference_deps contrib_inference_lib) +set(module "inference/api") +if (WITH_ANAKIN AND WITH_GPU) + copy(anakin_inference_lib DEPS paddle_inference_api inference_anakin_api + SRCS + ${PADDLE_BINARY_DIR}/paddle/fluid/inference/api/libinference_anakin_api* # compiled anakin api + ${PADDLE_BINARY_DIR}/third_party/install/anakin/*.tar.gz # anakin release + DSTS ${dst_dir}/inference/anakin ${dst_dir}/inference/anakin) + list(APPEND inference_deps anakin_inference_lib) endif() +copy(inference_api_lib DEPS paddle_inference_api paddle_inference_api_shared + SRCS ${src_dir}/${module}/paddle_inference_api.h + ${src_dir}/${module}/demo_ci + ${PADDLE_BINARY_DIR}/paddle/fluid/inference/api/libpaddle_inference_api* + DSTS ${dst_dir}/inference ${dst_dir}/inference ${dst_dir}/inference +) +list(APPEND inference_deps inference_api_lib) + set(module "inference") copy(inference_lib DEPS ${inference_deps} SRCS ${src_dir}/${module}/*.h ${PADDLE_BINARY_DIR}/paddle/fluid/inference/libpaddle_fluid.* diff --git a/paddle/contrib/CMakeLists.txt b/paddle/contrib/CMakeLists.txt deleted file mode 100644 index 4b19256ef4533a09162edf907f6cd51146517e46..0000000000000000000000000000000000000000 --- a/paddle/contrib/CMakeLists.txt +++ /dev/null @@ -1,16 +0,0 @@ -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -add_subdirectory(inference) diff --git a/paddle/fluid/inference/CMakeLists.txt b/paddle/fluid/inference/CMakeLists.txt index 86643b9aa111a9d73c184097e0c43bbec30c6b31..c9eff0fc28c5ff52f902d3d5a0ebb37fa7619e9c 100644 --- a/paddle/fluid/inference/CMakeLists.txt +++ b/paddle/fluid/inference/CMakeLists.txt @@ -5,7 +5,7 @@ if (TENSORRT_FOUND) add_subdirectory(tensorrt) endif() -set(FLUID_CORE_MODULES proto_desc memory lod_tensor executor ) +set(FLUID_CORE_MODULES proto_desc memory lod_tensor executor) # TODO(panyx0718): Should this be called paddle_fluid_inference_api_internal? cc_library(paddle_fluid_api @@ -38,3 +38,4 @@ if(WITH_TESTING) # both tests/book and analysis depends the models that generated by python/paddle/fluid/tests/book add_subdirectory(tests/book) endif() +add_subdirectory(api) diff --git a/paddle/contrib/inference/CMakeLists.txt b/paddle/fluid/inference/api/CMakeLists.txt similarity index 84% rename from paddle/contrib/inference/CMakeLists.txt rename to paddle/fluid/inference/api/CMakeLists.txt index df470a0752121e44f4305fd3609c5c41985dfaf7..9d63d08dedf6a1bcdacc51bb83d2ed261bca4117 100644 --- a/paddle/contrib/inference/CMakeLists.txt +++ b/paddle/fluid/inference/api/CMakeLists.txt @@ -43,21 +43,21 @@ function(inference_api_test TARGET_NAME) endfunction(inference_api_test) cc_library(paddle_inference_api - SRCS paddle_inference_api.cc paddle_inference_api_impl.cc + SRCS api.cc api_impl.cc DEPS ${FLUID_CORE_MODULES} ${GLOB_OP_LIB}) if(NOT APPLE) - set(LINK_FLAGS "-Wl,--retain-symbols-file ${CMAKE_CURRENT_SOURCE_DIR}/paddle_inference_api.sym") + set(LINK_FLAGS "-Wl,--retain-symbols-file ${CMAKE_CURRENT_SOURCE_DIR}/api.sym") set_target_properties(paddle_inference_api PROPERTIES LINK_FLAGS "${LINK_FLAGS}") endif() # Here the shared library doesn't depend on other fluid libraries, or double free will occur. cc_library(paddle_inference_api_shared SHARED - SRCS paddle_inference_api.cc paddle_inference_api_impl.cc) + SRCS api.cc api_impl.cc) add_dependencies(paddle_inference_api_shared ${FLUID_CORE_MODULES} ${GLOB_OP_LIB}) set_target_properties(paddle_inference_api_shared PROPERTIES OUTPUT_NAME paddle_inference_api) if(NOT APPLE) - set(LINK_FLAGS "-Wl,--version-script ${CMAKE_CURRENT_SOURCE_DIR}/paddle_inference_api.map") + set(LINK_FLAGS "-Wl,--version-script ${CMAKE_CURRENT_SOURCE_DIR}/api.map") set_target_properties(paddle_inference_api_shared PROPERTIES LINK_FLAGS "${LINK_FLAGS}") FILE(WRITE ${CMAKE_CURRENT_BINARY_DIR}/check_symbol.cmake "execute_process(COMMAND bash -c \"${CMAKE_CURRENT_SOURCE_DIR}/check_symbol.sh" @@ -73,32 +73,32 @@ if(NOT APPLE) endif() cc_test(test_paddle_inference_api - SRCS test_paddle_inference_api.cc + SRCS test_api.cc DEPS paddle_inference_api) -inference_api_test(test_paddle_inference_api_impl +inference_api_test(test_api_impl ARGS test_word2vec test_image_classification) if(WITH_GPU AND TENSORRT_FOUND) cc_library(paddle_inference_tensorrt_subgraph_engine - SRCS paddle_inference_api_tensorrt_subgraph_engine.cc - DEPS paddle_inference_api analysis tensorrt_engine paddle_inference_api paddle_fluid_api) + SRCS api_tensorrt_subgraph_engine.cc + DEPS paddle_inference_api analysis tensorrt_engine paddle_fluid_api) -inference_api_test(test_paddle_inference_api_tensorrt_subgraph_engine ARGS test_word2vec) +inference_api_test(test_api_tensorrt_subgraph_engine ARGS test_word2vec) endif() if (WITH_ANAKIN) # only needed in CI # Due to Anakin do not have official library releases and the versions of protobuf and cuda do not match Paddle's, # so anakin library will not be merged to our official inference library. To use anakin prediction API, one need to # compile the libinference_anakin_api.a and compile with anakin.so. - nv_library(inference_anakin_api SRCS paddle_inference_api.cc paddle_inference_api_anakin_engine.cc) - nv_library(inference_anakin_api_shared SHARED SRCS paddle_inference_api.cc paddle_inference_api_anakin_engine.cc) + nv_library(inference_anakin_api SRCS api.cc api_anakin_engine.cc) + nv_library(inference_anakin_api_shared SHARED SRCS api.cc api_anakin_engine.cc) target_compile_options(inference_anakin_api BEFORE PUBLIC ${ANAKIN_COMPILE_EXTRA_FLAGS}) target_compile_options(inference_anakin_api_shared BEFORE PUBLIC ${ANAKIN_COMPILE_EXTRA_FLAGS}) target_link_libraries(inference_anakin_api anakin anakin_saber_common) target_link_libraries(inference_anakin_api_shared anakin anakin_saber_common) if (WITH_TESTING) - cc_test(inference_anakin_test SRCS paddle_inference_api_anakin_engine_tester.cc + cc_test(inference_anakin_test SRCS api_anakin_engine_tester.cc ARGS --model=${ANAKIN_INSTALL_DIR}/mobilenet_v2.anakin.bin DEPS inference_anakin_api) target_compile_options(inference_anakin_test BEFORE PUBLIC ${ANAKIN_COMPILE_EXTRA_FLAGS}) diff --git a/paddle/contrib/inference/README.md b/paddle/fluid/inference/api/README.md similarity index 100% rename from paddle/contrib/inference/README.md rename to paddle/fluid/inference/api/README.md diff --git a/paddle/contrib/inference/paddle_inference_api.cc b/paddle/fluid/inference/api/api.cc similarity index 96% rename from paddle/contrib/inference/paddle_inference_api.cc rename to paddle/fluid/inference/api/api.cc index 4fe198ad7d4a752882965e9e7fc460741de53d22..e74f23ff969f5a8f58a71da337c16dcbc14f10c0 100644 --- a/paddle/contrib/inference/paddle_inference_api.cc +++ b/paddle/fluid/inference/api/api.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/contrib/inference/paddle_inference_api.h" +#include "paddle/fluid/inference/api/paddle_inference_api.h" namespace paddle { diff --git a/paddle/contrib/inference/paddle_inference_api.map b/paddle/fluid/inference/api/api.map similarity index 100% rename from paddle/contrib/inference/paddle_inference_api.map rename to paddle/fluid/inference/api/api.map diff --git a/paddle/contrib/inference/paddle_inference_api.sym b/paddle/fluid/inference/api/api.sym similarity index 100% rename from paddle/contrib/inference/paddle_inference_api.sym rename to paddle/fluid/inference/api/api.sym diff --git a/paddle/contrib/inference/paddle_inference_api_anakin_engine.cc b/paddle/fluid/inference/api/api_anakin_engine.cc similarity index 90% rename from paddle/contrib/inference/paddle_inference_api_anakin_engine.cc rename to paddle/fluid/inference/api/api_anakin_engine.cc index daf0b56825498d48ca48d1d5b0ea6a3d7314bfc4..f6f3cb335897b02905e24c229b92f3940a37dbf8 100644 --- a/paddle/contrib/inference/paddle_inference_api_anakin_engine.cc +++ b/paddle/fluid/inference/api/api_anakin_engine.cc @@ -12,8 +12,9 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/contrib/inference/paddle_inference_api_anakin_engine.h" +#include "paddle/fluid/inference/api/api_anakin_engine.h" #include +#include namespace paddle { @@ -47,8 +48,7 @@ bool PaddleInferenceAnakinPredictor::Run( } auto d_tensor_in_p = executor_.get_in(input.name); float *d_data_p = d_tensor_in_p->mutable_data(); - if (cudaMemcpy(d_data_p, - static_cast(input.data.data()), + if (cudaMemcpy(d_data_p, static_cast(input.data.data()), d_tensor_in_p->valid_size() * sizeof(float), cudaMemcpyHostToDevice) != 0) { LOG(ERROR) << "copy data from CPU to GPU error"; @@ -70,8 +70,7 @@ bool PaddleInferenceAnakinPredictor::Run( output.data.Resize(tensor->valid_size() * sizeof(float)); } // Copy data from GPU -> CPU - if (cudaMemcpy(output.data.data(), - tensor->mutable_data(), + if (cudaMemcpy(output.data.data(), tensor->mutable_data(), tensor->valid_size() * sizeof(float), cudaMemcpyDeviceToHost) != 0) { LOG(ERROR) << "copy data from GPU to CPU error"; @@ -106,13 +105,12 @@ std::unique_ptr PaddleInferenceAnakinPredictor::Clone() { // A factory to help create difference predictor. template <> -std::unique_ptr -CreatePaddlePredictor( - const AnakinConfig &config) { +std::unique_ptr CreatePaddlePredictor< + AnakinConfig, PaddleEngineKind::kAnakin>(const AnakinConfig &config) { VLOG(3) << "Anakin Predictor create."; std::unique_ptr x( new PaddleInferenceAnakinPredictor(config)); return x; -}; +} } // namespace paddle diff --git a/paddle/contrib/inference/paddle_inference_api_anakin_engine.h b/paddle/fluid/inference/api/api_anakin_engine.h similarity index 88% rename from paddle/contrib/inference/paddle_inference_api_anakin_engine.h rename to paddle/fluid/inference/api/api_anakin_engine.h index 212ba41cdf8ff2feccb6b6498f9679d76a2efe7c..85ca83cd00756cca04d7b92437e9955d8ab297e7 100644 --- a/paddle/contrib/inference/paddle_inference_api_anakin_engine.h +++ b/paddle/fluid/inference/api/api_anakin_engine.h @@ -19,7 +19,8 @@ limitations under the License. */ #pragma once -#include "paddle/contrib/inference/paddle_inference_api.h" +#include +#include "paddle/fluid/inference/api/paddle_inference_api.h" // from anakin #include "framework/core/net/net.h" @@ -31,7 +32,7 @@ class PaddleInferenceAnakinPredictor : public PaddlePredictor { public: PaddleInferenceAnakinPredictor() {} - PaddleInferenceAnakinPredictor(const AnakinConfig& config); + explicit PaddleInferenceAnakinPredictor(const AnakinConfig& config); // NOTE Unlike the native engine, the buffers of anakin engine's output_data // should be allocated first. @@ -48,8 +49,7 @@ class PaddleInferenceAnakinPredictor : public PaddlePredictor { private: bool Init(const AnakinConfig& config); - anakin::graph::Graph graph_; anakin::Net diff --git a/paddle/contrib/inference/paddle_inference_api_anakin_engine_tester.cc b/paddle/fluid/inference/api/api_anakin_engine_tester.cc similarity index 97% rename from paddle/contrib/inference/paddle_inference_api_anakin_engine_tester.cc rename to paddle/fluid/inference/api/api_anakin_engine_tester.cc index f92e9d4190412f5847e353ef1dc0324cad668c9a..d6d631bfbad4278fe99e4553a410a9d9162dcc7b 100644 --- a/paddle/contrib/inference/paddle_inference_api_anakin_engine_tester.cc +++ b/paddle/fluid/inference/api/api_anakin_engine_tester.cc @@ -16,7 +16,7 @@ limitations under the License. */ #include #include -#include "paddle/contrib/inference/paddle_inference_api.h" +#include "paddle/fluid/inference/api/paddle_inference_api.h" DEFINE_string(model, "", "Directory of the inference model."); diff --git a/paddle/contrib/inference/paddle_inference_api_impl.cc b/paddle/fluid/inference/api/api_impl.cc similarity index 92% rename from paddle/contrib/inference/paddle_inference_api_impl.cc rename to paddle/fluid/inference/api/api_impl.cc index b1e5b875981e0142f6970cf6864b7b598743654b..9d9e126e134deafc18f10f7299e81d92702e9ca0 100644 --- a/paddle/contrib/inference/paddle_inference_api_impl.cc +++ b/paddle/fluid/inference/api/api_impl.cc @@ -21,7 +21,7 @@ limitations under the License. */ #include #include -#include "paddle/contrib/inference/paddle_inference_api_impl.h" +#include "paddle/fluid/inference/api/api_impl.h" namespace paddle { namespace { @@ -77,8 +77,8 @@ bool NativePaddlePredictor::Init( if (!config_.model_dir.empty()) { // Parameters are saved in separate files sited in // the specified `dirname`. - inference_program_ = paddle::inference::Load( - executor_.get(), scope_.get(), config_.model_dir); + inference_program_ = paddle::inference::Load(executor_.get(), scope_.get(), + config_.model_dir); } else if (!config_.prog_file.empty() && !config_.param_file.empty()) { // All parameters are saved in a single file. // The file names should be consistent with that used @@ -91,8 +91,8 @@ bool NativePaddlePredictor::Init( } ctx_ = executor_->Prepare(*inference_program_, 0); - executor_->CreateVariables( - *inference_program_, sub_scope_ ? sub_scope_ : scope_.get(), 0); + executor_->CreateVariables(*inference_program_, + sub_scope_ ? sub_scope_ : scope_.get(), 0); // Get the feed_target_names and fetch_target_names feed_target_names_ = inference_program_->GetFeedTargetNames(); @@ -105,7 +105,7 @@ NativePaddlePredictor::~NativePaddlePredictor() { PADDLE_ENFORCE_NOT_NULL(scope_, "Should have parent scope!"); scope_->DeleteScope(sub_scope_); } -}; +} bool NativePaddlePredictor::Run(const std::vector &inputs, std::vector *output_data) { @@ -134,10 +134,8 @@ bool NativePaddlePredictor::Run(const std::vector &inputs, // if share variables, we need not create variables VLOG(4) << "Run prepared context"; executor_->RunPreparedContext( - ctx_.get(), - sub_scope_ != nullptr ? sub_scope_ : scope_.get(), - &feed_targets, - &fetch_targets, + ctx_.get(), sub_scope_ != nullptr ? sub_scope_ : scope_.get(), + &feed_targets, &fetch_targets, false /* don't create variable eatch time */); VLOG(4) << "Finish prepared context"; if (!GetFetch(fetchs, output_data)) { @@ -181,8 +179,7 @@ bool NativePaddlePredictor::SetFeed(const std::vector &inputs, } // TODO(panyx0718): Init LoDTensor from existing memcpy to save a copy. - std::memcpy(static_cast(input_ptr), - inputs[i].data.data(), + std::memcpy(static_cast(input_ptr), inputs[i].data.data(), inputs[i].data.length()); feeds->push_back(input); } @@ -232,8 +229,7 @@ bool NativePaddlePredictor::GetFetch( size_t start = lod[0][j - 1] * common_dim; size_t end = lod[0][j] * common_dim; if (end > start) { - std::copy(output_ptr + start, - output_ptr + end, + std::copy(output_ptr + start, output_ptr + end, data.begin() + (j - 1) * max_dim * common_dim); } } @@ -257,15 +253,13 @@ bool NativePaddlePredictor::GetFetch( } template <> -std::unique_ptr -CreatePaddlePredictor( - const NativeConfig &config) { +std::unique_ptr CreatePaddlePredictor< + NativeConfig, PaddleEngineKind::kNative>(const NativeConfig &config) { VLOG(3) << "create NativePaddlePredictor"; if (config.use_gpu) { // 1. GPU memeroy PADDLE_ENFORCE_GT( - config.fraction_of_gpu_memory, - 0.f, + config.fraction_of_gpu_memory, 0.f, "fraction_of_gpu_memory in the config should be set to range (0., 1.]"); PADDLE_ENFORCE_GE(config.device, 0, "Invalid device id %d", config.device); std::vector flags; diff --git a/paddle/contrib/inference/paddle_inference_api_impl.h b/paddle/fluid/inference/api/api_impl.h similarity index 97% rename from paddle/contrib/inference/paddle_inference_api_impl.h rename to paddle/fluid/inference/api/api_impl.h index f9ec6f55449fc46b4a44b9563980cb5f8e80a951..92e693578ab657004f3c40c09b979897afea1e1f 100644 --- a/paddle/contrib/inference/paddle_inference_api_impl.h +++ b/paddle/fluid/inference/api/api_impl.h @@ -19,7 +19,7 @@ #include #include -#include "paddle/contrib/inference/paddle_inference_api.h" +#include "paddle/fluid/inference/api/paddle_inference_api.h" #include "paddle/fluid/framework/ddim.h" #include "paddle/fluid/framework/lod_tensor.h" diff --git a/paddle/contrib/inference/paddle_inference_api_tensorrt_subgraph_engine.cc b/paddle/fluid/inference/api/api_tensorrt_subgraph_engine.cc similarity index 93% rename from paddle/contrib/inference/paddle_inference_api_tensorrt_subgraph_engine.cc rename to paddle/fluid/inference/api/api_tensorrt_subgraph_engine.cc index a11396cee91a758e86af2efd9e58b9da68442590..0cdc88fa1eaf3935ce0da143e1e91eb84cd70dcf 100644 --- a/paddle/contrib/inference/paddle_inference_api_tensorrt_subgraph_engine.cc +++ b/paddle/fluid/inference/api/api_tensorrt_subgraph_engine.cc @@ -12,9 +12,9 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/contrib/inference/paddle_inference_api.h" -#include "paddle/contrib/inference/paddle_inference_api_impl.h" #include "paddle/fluid/inference/analysis/analyzer.h" +#include "paddle/fluid/inference/api/api_impl.h" +#include "paddle/fluid/inference/api/paddle_inference_api.h" #include "paddle/fluid/inference/utils/singleton.h" namespace paddle { @@ -77,8 +77,8 @@ class TensorRTSubgraphPredictor : public NativePaddlePredictor { ctx_ = executor_->Prepare(*inference_program_, 0); VLOG(5) << "to create variables"; - executor_->CreateVariables( - *inference_program_, sub_scope_ ? sub_scope_ : scope_.get(), 0); + executor_->CreateVariables(*inference_program_, + sub_scope_ ? sub_scope_ : scope_.get(), 0); // Get the feed_target_names and fetch_target_names feed_target_names_ = inference_program_->GetFeedTargetNames(); @@ -98,8 +98,7 @@ CreatePaddlePredictor( if (config.use_gpu) { // 1. GPU memeroy PADDLE_ENFORCE_GT( - config.fraction_of_gpu_memory, - 0.f, + config.fraction_of_gpu_memory, 0.f, "fraction_of_gpu_memory in the config should be set to range (0., 1.]"); PADDLE_ENFORCE_GE(config.device, 0, "Invalid device id %d", config.device); std::vector flags; diff --git a/paddle/contrib/inference/check_symbol.sh b/paddle/fluid/inference/api/check_symbol.sh similarity index 100% rename from paddle/contrib/inference/check_symbol.sh rename to paddle/fluid/inference/api/check_symbol.sh diff --git a/paddle/contrib/inference/demo_ci/.gitignore b/paddle/fluid/inference/api/demo_ci/.gitignore similarity index 100% rename from paddle/contrib/inference/demo_ci/.gitignore rename to paddle/fluid/inference/api/demo_ci/.gitignore diff --git a/paddle/contrib/inference/demo_ci/CMakeLists.txt b/paddle/fluid/inference/api/demo_ci/CMakeLists.txt similarity index 95% rename from paddle/contrib/inference/demo_ci/CMakeLists.txt rename to paddle/fluid/inference/api/demo_ci/CMakeLists.txt index 33a0dfaf88b692f5d290920d97a650ad67f3dbca..7f9bb4b33e97b5ea37e9216b00ce0c82ca3ce230 100644 --- a/paddle/contrib/inference/demo_ci/CMakeLists.txt +++ b/paddle/fluid/inference/api/demo_ci/CMakeLists.txt @@ -55,11 +55,11 @@ endif() # Note: libpaddle_inference_api.so/a must put before libpaddle_fluid.so/a if(WITH_STATIC_LIB) set(DEPS - ${PADDLE_LIB}/contrib/inference/libpaddle_inference_api.a + ${PADDLE_LIB}/paddle/fluid/inference/libpaddle_inference_api.a ${PADDLE_LIB}/paddle/fluid/inference/libpaddle_fluid.a) else() set(DEPS - ${PADDLE_LIB}/contrib/inference/libpaddle_inference_api.so + ${PADDLE_LIB}/paddle/fluid/inference/libpaddle_inference_api.so ${PADDLE_LIB}/paddle/fluid/inference/libpaddle_fluid.so) endif() set(EXTERNAL_LIB "-lrt -ldl -lpthread") diff --git a/paddle/contrib/inference/demo_ci/README.md b/paddle/fluid/inference/api/demo_ci/README.md similarity index 100% rename from paddle/contrib/inference/demo_ci/README.md rename to paddle/fluid/inference/api/demo_ci/README.md diff --git a/paddle/contrib/inference/demo_ci/run.sh b/paddle/fluid/inference/api/demo_ci/run.sh similarity index 98% rename from paddle/contrib/inference/demo_ci/run.sh rename to paddle/fluid/inference/api/demo_ci/run.sh index 87690ac9603e1e54a46a761149798b75ea8a7990..3e829dd726b132844a45427b7b0b39eedf197496 100755 --- a/paddle/contrib/inference/demo_ci/run.sh +++ b/paddle/fluid/inference/api/demo_ci/run.sh @@ -64,7 +64,7 @@ for WITH_STATIC_LIB in ON OFF; do -DWITH_GPU=$TEST_GPU_CPU \ -DWITH_STATIC_LIB=$WITH_STATIC_LIB make -j - for use_gpu in false; do + for use_gpu in $use_gpu_list; do for vis_demo_name in $vis_demo_list; do ./vis_demo \ --modeldir=../data/$vis_demo_name/model \ diff --git a/paddle/contrib/inference/demo_ci/simple_on_word2vec.cc b/paddle/fluid/inference/api/demo_ci/simple_on_word2vec.cc similarity index 93% rename from paddle/contrib/inference/demo_ci/simple_on_word2vec.cc rename to paddle/fluid/inference/api/demo_ci/simple_on_word2vec.cc index 9713837f86d40383da946af1681e1945c84336b0..5f96fecf93f7a6c42bc6b9fe4e0d985c626388d7 100644 --- a/paddle/contrib/inference/demo_ci/simple_on_word2vec.cc +++ b/paddle/fluid/inference/api/demo_ci/simple_on_word2vec.cc @@ -19,8 +19,8 @@ limitations under the License. */ #include #include #include -#include -#include "contrib/inference/paddle_inference_api.h" +#include //NOLINT +#include "paddle/fluid/inference/paddle_inference_api.h" #include "paddle/fluid/platform/enforce.h" DEFINE_string(dirname, "", "Directory of the inference model."); @@ -63,8 +63,8 @@ void Main(bool use_gpu) { PADDLE_ENFORCE(outputs.size(), 1UL); // Check the output buffer size and result of each tid. PADDLE_ENFORCE(outputs.front().data.length(), 33168UL); - float result[5] = { - 0.00129761, 0.00151112, 0.000423564, 0.00108815, 0.000932706}; + float result[5] = {0.00129761, 0.00151112, 0.000423564, 0.00108815, + 0.000932706}; const size_t num_elements = outputs.front().data.length() / sizeof(float); // The outputs' buffers are in CPU memory. for (size_t i = 0; i < std::min(5UL, num_elements); i++) { @@ -107,8 +107,8 @@ void MainThreads(int num_threads, bool use_gpu) { PADDLE_ENFORCE(outputs.size(), 1UL); // Check the output buffer size and result of each tid. PADDLE_ENFORCE(outputs.front().data.length(), 33168UL); - float result[5] = { - 0.00129761, 0.00151112, 0.000423564, 0.00108815, 0.000932706}; + float result[5] = {0.00129761, 0.00151112, 0.000423564, 0.00108815, + 0.000932706}; const size_t num_elements = outputs.front().data.length() / sizeof(float); // The outputs' buffers are in CPU memory. diff --git a/paddle/contrib/inference/demo_ci/utils.h b/paddle/fluid/inference/api/demo_ci/utils.h similarity index 93% rename from paddle/contrib/inference/demo_ci/utils.h rename to paddle/fluid/inference/api/demo_ci/utils.h index 017b39edaf1dd8fb9d3b66f278abcf9ba7991ba9..cb8990671162dff47228736e69617229528cc093 100644 --- a/paddle/contrib/inference/demo_ci/utils.h +++ b/paddle/fluid/inference/api/demo_ci/utils.h @@ -13,16 +13,15 @@ // limitations under the License. #pragma once +#include #include #include - -#include "contrib/inference/paddle_inference_api.h" +#include "paddle/fluid/inference/paddle_inference_api.h" namespace paddle { namespace demo { -static void split(const std::string& str, - char sep, +static void split(const std::string& str, char sep, std::vector* pieces) { pieces->clear(); if (str.empty()) { diff --git a/paddle/contrib/inference/demo_ci/vis_demo.cc b/paddle/fluid/inference/api/demo_ci/vis_demo.cc similarity index 99% rename from paddle/contrib/inference/demo_ci/vis_demo.cc rename to paddle/fluid/inference/api/demo_ci/vis_demo.cc index 1c570a9f658be5641ac2b3a288376bdf19b293a1..0a2a2b713ab21a3124d8a85ba469f64278623ec4 100644 --- a/paddle/contrib/inference/demo_ci/vis_demo.cc +++ b/paddle/fluid/inference/api/demo_ci/vis_demo.cc @@ -29,8 +29,7 @@ DECLARE_double(fraction_of_gpu_memory_to_use); DEFINE_string(modeldir, "", "Directory of the inference model."); DEFINE_string(refer, "", "path to reference result for comparison."); DEFINE_string( - data, - "", + data, "", "path of data; each line is a record, format is " "'\t #include +#include "paddle/fluid/inference/api/paddle_inference_api.h" namespace paddle { diff --git a/paddle/contrib/inference/test_paddle_inference_api_impl.cc b/paddle/fluid/inference/api/test_api_impl.cc similarity index 98% rename from paddle/contrib/inference/test_paddle_inference_api_impl.cc rename to paddle/fluid/inference/api/test_api_impl.cc index c3649dcb96c77f449d876bef34c4aea7afb31daa..fc1364b80ac1ee2d304eb2fe429eae5f56967516 100644 --- a/paddle/contrib/inference/test_paddle_inference_api_impl.cc +++ b/paddle/fluid/inference/api/test_api_impl.cc @@ -15,10 +15,10 @@ limitations under the License. */ #include #include -#include +#include // NOLINT #include "gflags/gflags.h" -#include "paddle/contrib/inference/paddle_inference_api_impl.h" +#include "paddle/fluid/inference/api/api_impl.h" #include "paddle/fluid/inference/tests/test_helper.h" DEFINE_string(dirname, "", "Directory of the inference model."); @@ -121,8 +121,8 @@ void MainImageClassification(bool use_gpu) { // which should be in the range [0.0, 1.0]. feed_target_shapes[0][0] = batch_size; framework::DDim input_dims = framework::make_ddim(feed_target_shapes[0]); - SetupTensor( - &input, input_dims, static_cast(0), static_cast(1)); + SetupTensor(&input, input_dims, static_cast(0), + static_cast(1)); std::vector cpu_feeds; cpu_feeds.push_back(&input); diff --git a/paddle/contrib/inference/test_paddle_inference_api_tensorrt_subgraph_engine.cc b/paddle/fluid/inference/api/test_api_tensorrt_subgraph_engine.cc similarity index 96% rename from paddle/contrib/inference/test_paddle_inference_api_tensorrt_subgraph_engine.cc rename to paddle/fluid/inference/api/test_api_tensorrt_subgraph_engine.cc index b100630dbe412ca811f1a8f2b8191356f5ebec2f..585f6d29376c3341c21ff76361d5335512c1b1b6 100644 --- a/paddle/contrib/inference/test_paddle_inference_api_tensorrt_subgraph_engine.cc +++ b/paddle/fluid/inference/api/test_api_tensorrt_subgraph_engine.cc @@ -15,7 +15,7 @@ #include #include #include -#include "paddle/contrib/inference/paddle_inference_api.h" +#include "paddle/fluid/inference/api/paddle_inference_api.h" namespace paddle { @@ -61,4 +61,4 @@ void Main(bool use_gpu) { TEST(paddle_inference_api_tensorrt_subgraph_engine, main) { Main(true); } -} // namespace paddle \ No newline at end of file +} // namespace paddle diff --git a/paddle/scripts/paddle_build.sh b/paddle/scripts/paddle_build.sh index de915d4e56671dca8a61a2083e0840358f1b66ea..5f2c366efd9b043b551a88a42b39f703bd052b6b 100755 --- a/paddle/scripts/paddle_build.sh +++ b/paddle/scripts/paddle_build.sh @@ -545,7 +545,7 @@ function test_fluid_inference_lib() { Testing fluid inference library ... ======================================== EOF - cd ${PADDLE_ROOT}/paddle/contrib/inference/demo_ci + cd ${PADDLE_ROOT}/paddle/fluid/inference/api/demo_ci ./run.sh ${PADDLE_ROOT} ${WITH_MKL:-ON} ${WITH_GPU:-OFF} fi }