提交 c8deaaa9 编写于 作者: C Chunwei

Merge branch 'chunwei/refine-light-api' into 'incubate/lite'

Chunwei/refine light api

See merge request inference/paddlelite!46
......@@ -74,18 +74,16 @@ endif()
# These tests needs CLI arguments, and is not supported in ARM CI.
# TODO(Superjomn) support latter.
lite_cc_test(test_light_api SRCS light_api_test.cc
lite_cc_test(test_light_api_lite SRCS light_api_test.cc
DEPS light_api_lite program_lite mir_passes
ARGS --optimized_model=${LITE_MODEL_DIR}/lite_naive_model_opt
SERIAL)
if(NOT LITE_ON_MOBILE)
lite_cc_test(test_apis_lite SRCS apis_test.cc
DEPS cxx_api_lite light_api_lite ${ops_lite}
X86_DEPS ${x86_kernels} operator
ARGS --model_dir=${LITE_MODEL_DIR}/lite_naive_model
--optimized_model=${LITE_MODEL_DIR}/lite_naive_model_opt SERIAL)
endif()
lite_cc_test(test_apis_lite SRCS apis_test.cc
DEPS cxx_api_lite light_api_lite ${ops_lite}
X86_DEPS ${x86_kernels} operator
ARGS --model_dir=${LITE_MODEL_DIR}/lite_naive_model
--optimized_model=${LITE_MODEL_DIR}/lite_naive_model_opt SERIAL)
#lite_cc_binary(cxx_api_lite_bin SRCS cxx_api_bin.cc
#X86_DEPS operator
......
......@@ -46,16 +46,34 @@ bool CompareTensors(const std::string& name, const Predictor& cxx_api,
return TensorCompareWith(*a, *b);
}
#ifndef LITE_WITH_LIGHT_WEIGHT_FRAMEWORK
TEST(CXXApi_LightApi, optim_model) {
lite::Predictor cxx_api;
std::vector<Place> valid_places({
Place{TARGET(kHost), PRECISION(kFloat)},
Place{TARGET(kX86), PRECISION(kFloat)},
Place{TARGET(kARM), PRECISION(kFloat)}, // Both works on X86 and ARM
});
// On ARM devices, the preferred X86 target not works, but it can still
// select ARM kernels.
cxx_api.Build(FLAGS_model_dir, Place{TARGET(kX86), PRECISION(kFloat)},
valid_places);
cxx_api.SaveModel(FLAGS_optimized_model);
}
TEST(CXXApi_LightApi, save_and_load_model) {
lite::Predictor cxx_api;
lite::LightPredictor light_api(FLAGS_optimized_model);
// CXXAPi
{
std::vector<Place> valid_places({Place{TARGET(kHost), PRECISION(kFloat)},
Place{TARGET(kX86), PRECISION(kFloat)}});
cxx_api.Build(FLAGS_model_dir, Place{TARGET(kCUDA), PRECISION(kFloat)},
std::vector<Place> valid_places({
Place{TARGET(kHost), PRECISION(kFloat)},
Place{TARGET(kX86), PRECISION(kFloat)},
Place{TARGET(kARM), PRECISION(kFloat)}, // Both works on X86 and ARM
});
// On ARM devices, the preferred X86 target not works, but it can still
// select ARM kernels.
cxx_api.Build(FLAGS_model_dir, Place{TARGET(kX86), PRECISION(kFloat)},
valid_places);
auto* x = cxx_api.GetInput(0);
......@@ -87,7 +105,6 @@ TEST(CXXApi_LightApi, save_and_load_model) {
ASSERT_TRUE(CompareTensors(tensor_name, cxx_api, light_api));
}
}
#endif // LITE_WITH_LIGHT_WEIGHT_FRAMEWORK
} // namespace lite
} // namespace paddle
......@@ -24,13 +24,11 @@ namespace lite {
void Predictor::SaveModel(const std::string &dir) {
#ifndef LITE_WITH_ARM
LOG(INFO) << "Save model to " << dir;
MkDirRecur(dir);
program_->PersistModel(dir, program_desc_);
#else
LOG(INFO) << "Save model to ./";
program_->PersistModel("./", program_desc_);
#endif
program_->PersistModel(dir, program_desc_);
LOG(INFO) << "Save model to " << dir;
}
lite::Tensor *Predictor::GetInput(size_t offset) {
......@@ -61,5 +59,24 @@ const framework::proto::ProgramDesc &Predictor::program_desc() const {
return program_desc_;
}
void Predictor::Build(const framework::proto::ProgramDesc &desc,
const Place &prefer_place,
const std::vector<Place> &valid_places) {
program_desc_ = desc;
Program program(desc, scope_, valid_places);
optimizer_.KernelPickPreferPlace(prefer_place);
core::KernelPickFactor factor;
factor.ConsiderTarget();
factor.ConsiderPrecision();
optimizer_.Run(std::move(program), valid_places, factor);
program_ = optimizer_.GenRuntimeProgram();
}
const lite::Tensor *Predictor::GetTensor(const std::string &name) const {
auto *var = program_->exec_scope()->FindVar(name);
return &var->Get<lite::Tensor>();
}
} // namespace lite
} // namespace paddle
......@@ -42,18 +42,7 @@ class Predictor {
const std::vector<Place>& valid_places);
void Build(const framework::proto::ProgramDesc& desc,
const Place& prefer_place,
const std::vector<Place>& valid_places) {
program_desc_ = desc;
Program program(desc, scope_, valid_places);
optimizer_.KernelPickPreferPlace(prefer_place);
core::KernelPickFactor factor;
factor.ConsiderTarget();
factor.ConsiderPrecision();
optimizer_.Run(std::move(program), valid_places, factor);
program_ = optimizer_.GenRuntimeProgram();
}
const Place& prefer_place, const std::vector<Place>& valid_places);
// Run the predictor for a single batch of data.
void Run() { program_->Run(); }
......@@ -66,10 +55,7 @@ class Predictor {
// Return the program desc for debug.
const framework::proto::ProgramDesc& program_desc() const;
const lite::Tensor* GetTensor(const std::string& name) const {
auto* var = program_->exec_scope()->FindVar(name);
return &var->Get<lite::Tensor>();
}
const lite::Tensor* GetTensor(const std::string& name) const;
// This method is disabled in mobile, for unnecessary dependencies required.
void SaveModel(const std::string& dir);
......
......@@ -116,7 +116,7 @@ function test_arm_android {
echo "test name: ${test_name}"
adb_work_dir="/data/local/tmp"
skip_list=("test_model_parser_lite" "test_mobilenetv1_lite" "test_mobilenetv2_lite" "test_resnet50_lite" "test_inceptionv4_lite" "test_light_api")
skip_list=("test_model_parser_lite" "test_mobilenetv1_lite" "test_mobilenetv2_lite" "test_resnet50_lite" "test_inceptionv4_lite" "test_light_api_lite" "test_apis_lite")
for skip_name in ${skip_list[@]} ; do
[[ $skip_name =~ (^|[[:space:]])$test_name($|[[:space:]]) ]] && echo "skip $test_name" && return
done
......@@ -368,6 +368,22 @@ function build_test_arm_subtask_model {
echo "Done"
}
# this test load a model, optimize it and check the prediction result of both cxx and light APIS.
function test_arm_predict_apis {
local port=$1
local workspace=$2
local naive_model_path=$3
local api_test_path=$(find . -name "test_apis_lite")
# the model is pushed to ./lite_naive_model
adb -s emulator-${port} push ${naive_model_path} ${workspace}
adb -s emulator-${port} push $api_test_path ${workspace}
# test cxx_api first to store the optimized model.
adb -s emulator-${port} shell ./test_apis_lite --model_dir ./lite_naive_model --optimized_model ./lite_naive_model_opt
}
# Build the code and run lite arm tests. This is executed in the CI system.
function build_test_arm {
########################################################################
......
......@@ -14,9 +14,7 @@
#pragma once
#ifndef LITE_WITH_ARM
#include <bits/stdc++.h>
#endif
#include <sys/stat.h>
#include <fstream>
#include <string>
#include "paddle/fluid/lite/utils/cp_logging.h"
......@@ -35,12 +33,14 @@ static bool IsFileExists(const std::string& path) {
}
// ARM mobile not support mkdir in C++
#ifndef LITE_WITH_ARM
static void MkDirRecur(const std::string& path) {
#ifndef LITE_WITH_ARM
CHECK_EQ(system(string_format("mkdir -p %s", path.c_str()).c_str()), 0)
<< "Cann't mkdir " << path;
}
#else // On ARM
CHECK_NE(mkdir(path.c_str(), S_IRWXU), -1) << "Cann't mkdir " << path;
#endif
}
} // namespace lite
} // namespace paddle
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册