diff --git a/lite/demo/cxx/bm_demo/CMakeLists.txt b/lite/demo/cxx/bm_demo/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..4119196d16e87f53e7b1d8078fde19d2a1ecb50f --- /dev/null +++ b/lite/demo/cxx/bm_demo/CMakeLists.txt @@ -0,0 +1,14 @@ +cmake_minimum_required(VERSION 3.0) +project(demo CXX C) + +set(TARGET mobilenet_full_api) +# 1. lib & include +link_directories(${PROJECT_SOURCE_DIR}/lib ${PROJECT_SOURCE_DIR}/lib/pcie ${PROJECT_SOURCE_DIR}/lib/bmcompiler) +include_directories(${PROJECT_SOURCE_DIR}/include) +# 2. compile options +add_definitions(-std=c++11 -g -O3 -DNDEBUG -pthread) +#add_definitions(-g ) +set(EXECUTABLE_OUTPUT_PATH ${PROJECT_SOURCE_DIR}) +# 3.add executable output +add_executable(${TARGET} ${TARGET}.cc) +target_link_libraries(${TARGET} -lpaddle_full_api_shared -lbmlib -lbmrt -lbmcompiler) diff --git a/lite/demo/cxx/bm_demo/ci.sh b/lite/demo/cxx/bm_demo/ci.sh new file mode 100755 index 0000000000000000000000000000000000000000..f18724fd662ac0bec3b239b8ee2951fff53c5318 --- /dev/null +++ b/lite/demo/cxx/bm_demo/ci.sh @@ -0,0 +1,20 @@ +wget http://paddle-inference-dist.bj.bcebos.com/mobilenet_v1.tar.gz +tar -xvf mobilenet_v1.tar.gz + +mkdir ./lib +cp -r /paddlelite/build.lite.bm/lite/api/libpaddle_full_api_shared.so \ +/paddlelite/third-party/bmlibs/bm_sc3_libs/lib/bmcompiler \ +/paddlelite/third-party/bmlibs/bm_sc3_libs/lib/bmnn/pcie ./lib + +mkdir ./include +cp -r /paddlelite/lite/api/paddle_place.h /paddlelite/lite/api/paddle_api.h ./include + +mkdir ./build +cd ./build +cmake .. +make +cd .. +rm -rf ./build +rm -rf ./mobilenet_v1.tar.gz + +./mobilenet_full_api ./mobilenet_v1 224 224 diff --git a/lite/demo/cxx/bm_demo/mobilenet_full_api.cc b/lite/demo/cxx/bm_demo/mobilenet_full_api.cc new file mode 100644 index 0000000000000000000000000000000000000000..ca557268f729bf71530a70a62311cb62cdf66359 --- /dev/null +++ b/lite/demo/cxx/bm_demo/mobilenet_full_api.cc @@ -0,0 +1,90 @@ +// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include +#include "paddle_api.h" //NOLINT +using namespace paddle::lite_api; // NOLINT +int g_batch_size = 1; +int64_t ShapeProduction(const shape_t& shape) { + int64_t res = 1; + for (auto i : shape) res *= i; + return res; +} + +void RunModel(std::string model_dir, int im_height, int im_width) { + CxxConfig config; + config.set_model_dir(model_dir); + // config.set_model_file(model_dir+"/model"); + // config.set_param_file(model_dir+"/params"); + config.set_valid_places({Place{TARGET(kBM), PRECISION(kFloat)}, + Place{TARGET(kHost), PRECISION(kFloat)}}); + std::shared_ptr predictor = + CreatePaddlePredictor(config); + auto cloned_predictor = predictor->Clone(); + + std::unique_ptr input_tensor(std::move(predictor->GetInput(0))); + input_tensor->Resize({1, 3, im_height, im_width}); + auto* data = input_tensor->mutable_data(); + int item_size = ShapeProduction(input_tensor->shape()); + for (int i = 0; i < item_size; i++) { + data[i] = 1; + } + for (int i = 0; i < 100; i++) { + predictor->Run(); + } + + FILE* fp = fopen("result.txt", "wb"); + std::unique_ptr output_tensor( + std::move(predictor->GetOutput(0))); + std::cout << "Output shape " << output_tensor->shape()[1] << std::endl; + for (int i = 0; i < ShapeProduction(output_tensor->shape()); i++) { + fprintf(fp, "%f\n", output_tensor->data()[i]); + } + + fclose(fp); + + std::unique_ptr cloned_input_tensor( + std::move(cloned_predictor->GetInput(0))); + cloned_input_tensor->Resize({1, 3, im_height, im_width}); + auto* cloned_data = cloned_input_tensor->mutable_data(); + for (int i = 0; i < ShapeProduction(cloned_input_tensor->shape()); ++i) { + cloned_data[i] = 1; + } + + for (int i = 0; i < 1; i++) { + cloned_predictor->Run(); + } + std::unique_ptr cloned_output_tensor( + std::move(cloned_predictor->GetOutput(0))); + std::cout << "cloned_Output shape " << cloned_output_tensor->shape()[1] + << std::endl; + for (int i = 0; i < ShapeProduction(cloned_output_tensor->shape()); + i += 100) { + std::cout << "cloned_Output[" << i + << "]: " << cloned_output_tensor->data()[i] << std::endl; + } +} + +int main(int argc, char** argv) { + if (argc < 2) { + std::cerr << "[ERROR] usage: ./" << argv[0] << " naive_buffer_model_dir\n"; + exit(1); + } + std::string model_dir = argv[1]; + int im_height = std::stoi(argv[2]); + int im_width = std::stoi(argv[3]); + RunModel(model_dir, im_height, im_width); + return 0; +} diff --git a/lite/tools/build_bm.sh b/lite/tools/build_bm.sh index 53f62695bcba061637956a52c1cd12c2e8b3c1c5..4d4d12a98974dd38d9bfc48c1df82527babf353f 100755 --- a/lite/tools/build_bm.sh +++ b/lite/tools/build_bm.sh @@ -102,7 +102,10 @@ function main { case $i in --target_name=*) TARGET_NAME="${i#*=}" - build_bm + shift + ;; + --test=*) + WITH_TESTING=${i#*=} shift ;; *) @@ -112,5 +115,6 @@ function main { ;; esac done + build_bm } main $@