diff --git a/paddle/fluid/lite/api/CMakeLists.txt b/paddle/fluid/lite/api/CMakeLists.txt index 4440acd61b8e64bfdeccf455f641cb57eb0cdcdf..84da4757b1018db0fe66e708c7216cc1dc8c6d1a 100644 --- a/paddle/fluid/lite/api/CMakeLists.txt +++ b/paddle/fluid/lite/api/CMakeLists.txt @@ -74,18 +74,16 @@ endif() # These tests needs CLI arguments, and is not supported in ARM CI. # TODO(Superjomn) support latter. -lite_cc_test(test_light_api SRCS light_api_test.cc +lite_cc_test(test_light_api_lite SRCS light_api_test.cc DEPS light_api_lite program_lite mir_passes ARGS --optimized_model=${LITE_MODEL_DIR}/lite_naive_model_opt SERIAL) -if(NOT LITE_ON_MOBILE) - lite_cc_test(test_apis_lite SRCS apis_test.cc - DEPS cxx_api_lite light_api_lite ${ops_lite} - X86_DEPS ${x86_kernels} operator - ARGS --model_dir=${LITE_MODEL_DIR}/lite_naive_model - --optimized_model=${LITE_MODEL_DIR}/lite_naive_model_opt SERIAL) -endif() +lite_cc_test(test_apis_lite SRCS apis_test.cc + DEPS cxx_api_lite light_api_lite ${ops_lite} + X86_DEPS ${x86_kernels} operator + ARGS --model_dir=${LITE_MODEL_DIR}/lite_naive_model + --optimized_model=${LITE_MODEL_DIR}/lite_naive_model_opt SERIAL) #lite_cc_binary(cxx_api_lite_bin SRCS cxx_api_bin.cc #X86_DEPS operator diff --git a/paddle/fluid/lite/api/apis_test.cc b/paddle/fluid/lite/api/apis_test.cc index 0b8e9550a104aeda94147ecdb9032424aa0baab1..a5cf93f0e29ce4b0ec5cc733da3ac609668ae539 100644 --- a/paddle/fluid/lite/api/apis_test.cc +++ b/paddle/fluid/lite/api/apis_test.cc @@ -46,16 +46,34 @@ bool CompareTensors(const std::string& name, const Predictor& cxx_api, return TensorCompareWith(*a, *b); } -#ifndef LITE_WITH_LIGHT_WEIGHT_FRAMEWORK +TEST(CXXApi_LightApi, optim_model) { + lite::Predictor cxx_api; + std::vector valid_places({ + Place{TARGET(kHost), PRECISION(kFloat)}, + Place{TARGET(kX86), PRECISION(kFloat)}, + Place{TARGET(kARM), PRECISION(kFloat)}, // Both works on X86 and ARM + }); + // On ARM devices, the preferred X86 target not works, but it can still + // select ARM kernels. + cxx_api.Build(FLAGS_model_dir, Place{TARGET(kX86), PRECISION(kFloat)}, + valid_places); + cxx_api.SaveModel(FLAGS_optimized_model); +} + TEST(CXXApi_LightApi, save_and_load_model) { lite::Predictor cxx_api; lite::LightPredictor light_api(FLAGS_optimized_model); // CXXAPi { - std::vector valid_places({Place{TARGET(kHost), PRECISION(kFloat)}, - Place{TARGET(kX86), PRECISION(kFloat)}}); - cxx_api.Build(FLAGS_model_dir, Place{TARGET(kCUDA), PRECISION(kFloat)}, + std::vector valid_places({ + Place{TARGET(kHost), PRECISION(kFloat)}, + Place{TARGET(kX86), PRECISION(kFloat)}, + Place{TARGET(kARM), PRECISION(kFloat)}, // Both works on X86 and ARM + }); + // On ARM devices, the preferred X86 target not works, but it can still + // select ARM kernels. + cxx_api.Build(FLAGS_model_dir, Place{TARGET(kX86), PRECISION(kFloat)}, valid_places); auto* x = cxx_api.GetInput(0); @@ -87,7 +105,6 @@ TEST(CXXApi_LightApi, save_and_load_model) { ASSERT_TRUE(CompareTensors(tensor_name, cxx_api, light_api)); } } -#endif // LITE_WITH_LIGHT_WEIGHT_FRAMEWORK } // namespace lite } // namespace paddle diff --git a/paddle/fluid/lite/api/cxx_api.cc b/paddle/fluid/lite/api/cxx_api.cc index 7c6ffccfa0bdab393c6870283834c76c5d1a2668..16a5cc891668f604b8f1bdc459473499e8a8a551 100644 --- a/paddle/fluid/lite/api/cxx_api.cc +++ b/paddle/fluid/lite/api/cxx_api.cc @@ -24,13 +24,11 @@ namespace lite { void Predictor::SaveModel(const std::string &dir) { #ifndef LITE_WITH_ARM - LOG(INFO) << "Save model to " << dir; MkDirRecur(dir); - program_->PersistModel(dir, program_desc_); #else - LOG(INFO) << "Save model to ./"; - program_->PersistModel("./", program_desc_); #endif + program_->PersistModel(dir, program_desc_); + LOG(INFO) << "Save model to " << dir; } lite::Tensor *Predictor::GetInput(size_t offset) { @@ -61,5 +59,24 @@ const framework::proto::ProgramDesc &Predictor::program_desc() const { return program_desc_; } +void Predictor::Build(const framework::proto::ProgramDesc &desc, + const Place &prefer_place, + const std::vector &valid_places) { + program_desc_ = desc; + Program program(desc, scope_, valid_places); + + optimizer_.KernelPickPreferPlace(prefer_place); + core::KernelPickFactor factor; + factor.ConsiderTarget(); + factor.ConsiderPrecision(); + optimizer_.Run(std::move(program), valid_places, factor); + program_ = optimizer_.GenRuntimeProgram(); +} + +const lite::Tensor *Predictor::GetTensor(const std::string &name) const { + auto *var = program_->exec_scope()->FindVar(name); + return &var->Get(); +} + } // namespace lite } // namespace paddle diff --git a/paddle/fluid/lite/api/cxx_api.h b/paddle/fluid/lite/api/cxx_api.h index e7b74a04da25ba3d228aba78d9a5ce9d0909d708..5434bc18eb634a7c2136a64f4afdb490db92119d 100644 --- a/paddle/fluid/lite/api/cxx_api.h +++ b/paddle/fluid/lite/api/cxx_api.h @@ -42,18 +42,7 @@ class Predictor { const std::vector& valid_places); void Build(const framework::proto::ProgramDesc& desc, - const Place& prefer_place, - const std::vector& valid_places) { - program_desc_ = desc; - Program program(desc, scope_, valid_places); - - optimizer_.KernelPickPreferPlace(prefer_place); - core::KernelPickFactor factor; - factor.ConsiderTarget(); - factor.ConsiderPrecision(); - optimizer_.Run(std::move(program), valid_places, factor); - program_ = optimizer_.GenRuntimeProgram(); - } + const Place& prefer_place, const std::vector& valid_places); // Run the predictor for a single batch of data. void Run() { program_->Run(); } @@ -66,10 +55,7 @@ class Predictor { // Return the program desc for debug. const framework::proto::ProgramDesc& program_desc() const; - const lite::Tensor* GetTensor(const std::string& name) const { - auto* var = program_->exec_scope()->FindVar(name); - return &var->Get(); - } + const lite::Tensor* GetTensor(const std::string& name) const; // This method is disabled in mobile, for unnecessary dependencies required. void SaveModel(const std::string& dir); diff --git a/paddle/fluid/lite/tools/build.sh b/paddle/fluid/lite/tools/build.sh index a4b75f7ef0d5c982e03b1715fee2f73c3a901fbf..fe956a0384554ea2d2d065c5bd231cbd6d646ecb 100755 --- a/paddle/fluid/lite/tools/build.sh +++ b/paddle/fluid/lite/tools/build.sh @@ -116,7 +116,7 @@ function test_arm_android { echo "test name: ${test_name}" adb_work_dir="/data/local/tmp" - skip_list=("test_model_parser_lite" "test_mobilenetv1_lite" "test_mobilenetv2_lite" "test_resnet50_lite" "test_inceptionv4_lite" "test_light_api") + skip_list=("test_model_parser_lite" "test_mobilenetv1_lite" "test_mobilenetv2_lite" "test_resnet50_lite" "test_inceptionv4_lite" "test_light_api_lite" "test_apis_lite") for skip_name in ${skip_list[@]} ; do [[ $skip_name =~ (^|[[:space:]])$test_name($|[[:space:]]) ]] && echo "skip $test_name" && return done @@ -368,6 +368,22 @@ function build_test_arm_subtask_model { echo "Done" } + +# this test load a model, optimize it and check the prediction result of both cxx and light APIS. +function test_arm_predict_apis { + local port=$1 + local workspace=$2 + local naive_model_path=$3 + local api_test_path=$(find . -name "test_apis_lite") + # the model is pushed to ./lite_naive_model + adb -s emulator-${port} push ${naive_model_path} ${workspace} + adb -s emulator-${port} push $api_test_path ${workspace} + + # test cxx_api first to store the optimized model. + adb -s emulator-${port} shell ./test_apis_lite --model_dir ./lite_naive_model --optimized_model ./lite_naive_model_opt +} + + # Build the code and run lite arm tests. This is executed in the CI system. function build_test_arm { ######################################################################## diff --git a/paddle/fluid/lite/utils/io.h b/paddle/fluid/lite/utils/io.h index 4e64ee1d4e4b016fadf40167fb96557e96061fba..86161a4b1ab7139795d777cb6a8f266835bcd680 100644 --- a/paddle/fluid/lite/utils/io.h +++ b/paddle/fluid/lite/utils/io.h @@ -14,9 +14,7 @@ #pragma once -#ifndef LITE_WITH_ARM -#include -#endif +#include #include #include #include "paddle/fluid/lite/utils/cp_logging.h" @@ -35,12 +33,14 @@ static bool IsFileExists(const std::string& path) { } // ARM mobile not support mkdir in C++ -#ifndef LITE_WITH_ARM static void MkDirRecur(const std::string& path) { +#ifndef LITE_WITH_ARM CHECK_EQ(system(string_format("mkdir -p %s", path.c_str()).c_str()), 0) << "Cann't mkdir " << path; -} +#else // On ARM + CHECK_NE(mkdir(path.c_str(), S_IRWXU), -1) << "Cann't mkdir " << path; #endif +} } // namespace lite } // namespace paddle