From 24331d919396ffdfa02b7b9899da2f803655c295 Mon Sep 17 00:00:00 2001 From: huzhiqiang <912790387@qq.com> Date: Mon, 27 Apr 2020 14:38:40 +0800 Subject: [PATCH] [demo][x86] Add cxx_demo for x86 backend (#3482) --- lite/CMakeLists.txt | 18 +++-- .../x86_mobilenetv1_full_demo/CMakeLists.txt | 21 ++++++ .../cxx/x86_mobilenetv1_full_demo/build.sh | 6 ++ .../mobilenet_full_api.cc | 66 +++++++++++++++++++ .../x86_mobilenetv1_light_demo/CMakeLists.txt | 21 ++++++ .../cxx/x86_mobilenetv1_light_demo/build.sh | 6 ++ .../mobilenet_light_api.cc | 64 ++++++++++++++++++ 7 files changed, 196 insertions(+), 6 deletions(-) create mode 100644 lite/demo/cxx/x86_mobilenetv1_full_demo/CMakeLists.txt create mode 100644 lite/demo/cxx/x86_mobilenetv1_full_demo/build.sh create mode 100644 lite/demo/cxx/x86_mobilenetv1_full_demo/mobilenet_full_api.cc create mode 100644 lite/demo/cxx/x86_mobilenetv1_light_demo/CMakeLists.txt create mode 100644 lite/demo/cxx/x86_mobilenetv1_light_demo/build.sh create mode 100644 lite/demo/cxx/x86_mobilenetv1_light_demo/mobilenet_light_api.cc diff --git a/lite/CMakeLists.txt b/lite/CMakeLists.txt index b89a4de37a..3a9e77e35d 100644 --- a/lite/CMakeLists.txt +++ b/lite/CMakeLists.txt @@ -188,15 +188,17 @@ if (LITE_WITH_CUDA OR LITE_WITH_X86) COMMAND cp "${CMAKE_BINARY_DIR}/libpaddle_api_light_bundled.a" "${INFER_LITE_PUBLISH_ROOT}/cxx/lib" COMMAND cp "${CMAKE_BINARY_DIR}/lite/api/*.so" "${INFER_LITE_PUBLISH_ROOT}/cxx/lib" ) - add_custom_target(publish_inference_third_party ${TARGET} - COMMAND mkdir -p "${INFER_LITE_PUBLISH_ROOT}/third_party" - COMMAND cp -r "${CMAKE_BINARY_DIR}/third_party/install/*" "${INFER_LITE_PUBLISH_ROOT}/third_party") + if (LITE_WITH_CUDA) + add_custom_target(publish_inference_third_party ${TARGET} + COMMAND mkdir -p "${INFER_LITE_PUBLISH_ROOT}/third_party" + COMMAND cp -r "${CMAKE_BINARY_DIR}/third_party/install/*" "${INFER_LITE_PUBLISH_ROOT}/third_party") + add_dependencies(publish_inference publish_inference_third_party) + endif() add_dependencies(publish_inference_cxx_lib bundle_full_api) add_dependencies(publish_inference_cxx_lib bundle_light_api) add_dependencies(publish_inference_cxx_lib paddle_full_api_shared) add_dependencies(publish_inference_cxx_lib paddle_light_api_shared) add_dependencies(publish_inference publish_inference_cxx_lib) - add_dependencies(publish_inference publish_inference_third_party) endif() endif() @@ -238,9 +240,13 @@ if (LITE_WITH_X86) add_dependencies(publish_inference_x86_cxx_lib test_model_bin) add_custom_target(publish_inference_x86_cxx_demos ${TARGET} + COMMAND rm -rf "${INFER_LITE_PUBLISH_ROOT}/demo/cxx" + COMMAND mkdir -p "${INFER_LITE_PUBLISH_ROOT}/demo/cxx" + COMMAND cp -r "${CMAKE_SOURCE_DIR}/lite/demo/cxx/x86_mobilenetv1_light_demo" "${INFER_LITE_PUBLISH_ROOT}/demo/cxx/mobilenetv1_light" + COMMAND cp -r "${CMAKE_SOURCE_DIR}/lite/demo/cxx/x86_mobilenetv1_full_demo" "${INFER_LITE_PUBLISH_ROOT}/demo/cxx/mobilenetv1_full" COMMAND mkdir -p "${INFER_LITE_PUBLISH_ROOT}/third_party" - COMMAND cp -r "${CMAKE_BINARY_DIR}/third_party/eigen3" "${INFER_LITE_PUBLISH_ROOT}/third_party" - ) + COMMAND cp -r "${CMAKE_BINARY_DIR}/third_party/install/mklml" "${INFER_LITE_PUBLISH_ROOT}/third_party/" + ) add_dependencies(publish_inference_x86_cxx_lib publish_inference_x86_cxx_demos) add_dependencies(publish_inference_x86_cxx_demos paddle_full_api_shared eigen3) add_dependencies(publish_inference publish_inference_x86_cxx_lib) diff --git a/lite/demo/cxx/x86_mobilenetv1_full_demo/CMakeLists.txt b/lite/demo/cxx/x86_mobilenetv1_full_demo/CMakeLists.txt new file mode 100644 index 0000000000..5039ef7727 --- /dev/null +++ b/lite/demo/cxx/x86_mobilenetv1_full_demo/CMakeLists.txt @@ -0,0 +1,21 @@ +cmake_minimum_required(VERSION 2.8) + +set(TARGET mobilenet_full_api) + +# 1. path to Paddle-Lite lib and mklml lib +set(LITE_DIR "${PROJECT_SOURCE_DIR}/../../../cxx") +set(MKLML_DIR "${PROJECT_SOURCE_DIR}/../../../third_party/mklml/") + +# 2. link mklml and Paddle-Lite directory +link_directories(${LITE_DIR}/lib ${MKLML_DIR}/lib) +include_directories(${LITE_DIR}/include/ ${MKLML_DIR}/include) + +# 3. compile options +add_definitions(-std=c++11 -g -O3 -pthread) +set(EXECUTABLE_OUTPUT_PATH ${PROJECT_SOURCE_DIR}) + +# 4.add executable output +add_executable(${TARGET} ${TARGET}.cc) +target_link_libraries(${TARGET} -lpaddle_full_api_shared) +target_link_libraries(${TARGET} -lmklml_intel) +target_link_libraries(${TARGET} -ldl) diff --git a/lite/demo/cxx/x86_mobilenetv1_full_demo/build.sh b/lite/demo/cxx/x86_mobilenetv1_full_demo/build.sh new file mode 100644 index 0000000000..c9570e326e --- /dev/null +++ b/lite/demo/cxx/x86_mobilenetv1_full_demo/build.sh @@ -0,0 +1,6 @@ +mkdir ./build +cd ./build +cmake .. +make +cd .. +rm -rf ./build diff --git a/lite/demo/cxx/x86_mobilenetv1_full_demo/mobilenet_full_api.cc b/lite/demo/cxx/x86_mobilenetv1_full_demo/mobilenet_full_api.cc new file mode 100644 index 0000000000..c2837e0fdd --- /dev/null +++ b/lite/demo/cxx/x86_mobilenetv1_full_demo/mobilenet_full_api.cc @@ -0,0 +1,66 @@ +// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include +#include "paddle_api.h" // NOLINT + +using namespace paddle::lite_api; // NOLINT + +int64_t ShapeProduction(const shape_t& shape) { + int64_t res = 1; + for (auto i : shape) res *= i; + return res; +} + +void RunModel(std::string model_dir) { + // 1. Create CxxConfig + CxxConfig config; + config.set_model_dir(model_dir); + config.set_valid_places({Place{TARGET(kX86), PRECISION(kFloat)}, + Place{TARGET(kHost), PRECISION(kFloat)}}); + // 2. Create PaddlePredictor by CxxConfig + std::shared_ptr predictor = + CreatePaddlePredictor(config); + + // 3. Prepare input data + std::unique_ptr input_tensor(std::move(predictor->GetInput(0))); + input_tensor->Resize({1, 3, 224, 224}); + auto* data = input_tensor->mutable_data(); + for (int i = 0; i < ShapeProduction(input_tensor->shape()); ++i) { + data[i] = 1; + } + + // 4. Run predictor + predictor->Run(); + + // 5. Get output + std::unique_ptr output_tensor( + std::move(predictor->GetOutput(0))); + std::cout << "Output shape " << output_tensor->shape()[1] << std::endl; + for (int i = 0; i < ShapeProduction(output_tensor->shape()); i += 100) { + std::cout << "Output[" << i << "]: " << output_tensor->data()[i] + << std::endl; + } +} + +int main(int argc, char** argv) { + if (argc < 2) { + std::cerr << "[ERROR] usage: ./" << argv[0] << " naive_buffer_model_dir\n"; + exit(1); + } + std::string model_dir = argv[1]; + RunModel(model_dir); + return 0; +} diff --git a/lite/demo/cxx/x86_mobilenetv1_light_demo/CMakeLists.txt b/lite/demo/cxx/x86_mobilenetv1_light_demo/CMakeLists.txt new file mode 100644 index 0000000000..6f917b5353 --- /dev/null +++ b/lite/demo/cxx/x86_mobilenetv1_light_demo/CMakeLists.txt @@ -0,0 +1,21 @@ +cmake_minimum_required(VERSION 2.8) + +set(TARGET mobilenet_light_api) + +# 1. path to Paddle-Lite lib and mklml lib +set(LITE_DIR "${PROJECT_SOURCE_DIR}/../../../cxx") +set(MKLML_DIR "${PROJECT_SOURCE_DIR}/../../../third_party/mklml/") + +# 2. link mklml and Paddle-Lite directory +link_directories(${LITE_DIR}/lib ${MKLML_DIR}/lib) +include_directories(${LITE_DIR}/include/ ${MKLML_DIR}/include) + +# 3. compile options +add_definitions(-std=c++11 -g -O3 -pthread) +set(EXECUTABLE_OUTPUT_PATH ${PROJECT_SOURCE_DIR}) + +# 4.add executable output +add_executable(${TARGET} ${TARGET}.cc) +target_link_libraries(${TARGET} -lpaddle_light_api_shared) +target_link_libraries(${TARGET} -lmklml_intel) +target_link_libraries(${TARGET} -ldl) diff --git a/lite/demo/cxx/x86_mobilenetv1_light_demo/build.sh b/lite/demo/cxx/x86_mobilenetv1_light_demo/build.sh new file mode 100644 index 0000000000..c9570e326e --- /dev/null +++ b/lite/demo/cxx/x86_mobilenetv1_light_demo/build.sh @@ -0,0 +1,6 @@ +mkdir ./build +cd ./build +cmake .. +make +cd .. +rm -rf ./build diff --git a/lite/demo/cxx/x86_mobilenetv1_light_demo/mobilenet_light_api.cc b/lite/demo/cxx/x86_mobilenetv1_light_demo/mobilenet_light_api.cc new file mode 100644 index 0000000000..17997a228f --- /dev/null +++ b/lite/demo/cxx/x86_mobilenetv1_light_demo/mobilenet_light_api.cc @@ -0,0 +1,64 @@ +// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include +#include "paddle_api.h" // NOLINT + +using namespace paddle::lite_api; // NOLINT + +int64_t ShapeProduction(const shape_t& shape) { + int64_t res = 1; + for (auto i : shape) res *= i; + return res; +} + +void RunModel(std::string model_dir) { + // 1. Create MobileConfig + MobileConfig config; + config.set_model_dir(model_dir); + // 2. Create PaddlePredictor by CxxConfig + std::shared_ptr predictor = + CreatePaddlePredictor(config); + + // 3. Prepare input data + std::unique_ptr input_tensor(std::move(predictor->GetInput(0))); + input_tensor->Resize({1, 3, 224, 224}); + auto* data = input_tensor->mutable_data(); + for (int i = 0; i < ShapeProduction(input_tensor->shape()); ++i) { + data[i] = 1; + } + + // 4. Run predictor + predictor->Run(); + + // 5. Get output + std::unique_ptr output_tensor( + std::move(predictor->GetOutput(0))); + std::cout << "Output shape " << output_tensor->shape()[1] << std::endl; + for (int i = 0; i < ShapeProduction(output_tensor->shape()); i += 100) { + std::cout << "Output[" << i << "]: " << output_tensor->data()[i] + << std::endl; + } +} + +int main(int argc, char** argv) { + if (argc < 2) { + std::cerr << "[ERROR] usage: ./" << argv[0] << " naive_buffer_model_dir\n"; + exit(1); + } + std::string model_dir = argv[1]; + RunModel(model_dir); + return 0; +} -- GitLab