提交 321e2a28 编写于 作者: C Chunwei

Merge branch 'add-gitlab-ci' into 'incubate/lite'

add gitlab-ci.yml

See merge request inference/paddlelite!1
before_script:
- env
image: $SERVER_LITE_DOCKER_IMAGE
stages:
- ci
- build_server
- build_mobile
check:prebuilt:
stage: ci
script:
#- pip3 install pre-commit
#- alias python=python3
- rm -rf ~/.pip
- pip install pre-commit
- pre-commit install
- ./paddle/fluid/lite/tools/build.sh check_style
#- ./paddle/fluid/lite/tools/build.sh check_need_ci
build:server:
image: $SERVER_LITE_DOCKER_IMAGE
stage: build_server
cache:
key: server_thirdparty
paths:
- build/third_party
script:
- export http_proxy=http://172.19.57.45:3128
- export https_proxy=http://172.19.57.45:3128
- mkdir -p build
- cd build
- ../paddle/fluid/lite/tools/build.sh cmake_x86
- make extern_eigen3
- make extern_boost
- make framework_proto
- make extern_warpctc
- cd ..
- export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$PWD/build/third_party/install/mklml/lib
- ./paddle/fluid/lite/tools/build.sh build_test_server
dependencies:
- check:prebuilt
build:mobile:
stage: build_mobile
image: $MOBILE_LITE_DOCKER_IMAGE
cache:
key: mobile_thirdparty
paths:
- $MOBILE_LITE_CACHE0
- $MOBILE_LITE_CACHE1
script:
- export http_proxy=http://172.19.57.45:3128
- export https_proxy=http://172.19.57.45:3128
- ./paddle/fluid/lite/tools/build.sh build_test_arm
dependencies:
- build:server
......@@ -14,7 +14,7 @@ if(LITE_WITH_CUDA)
set(light_api_deps ${light_api_deps} target_wrapper_cuda)
endif()
cc_library(light_api_lite SRCS light_api.cc DEPS ${light_api_deps} ${ops_lite} ${host_kernels})
#cc_library(light_api_lite SRCS light_api.cc DEPS ${light_api_deps} ${ops_lite} ${host_kernels})
message(STATUS "get ops ${ops_lite}")
message(STATUS "get Host kernels ${host_kernels}")
......
......@@ -72,9 +72,8 @@ class LightPredictor {
// Create the kernels of the target places, and filter out the specific
// kernel with the target alias.
for (auto& op : program.ops_) {
lite::pb::OpDesc desc(op->op_info()->desc());
auto kernel_type = desc.GetAttr(kKernelTypeAttr).get<std::string>();
for (auto& op : program.ops()) {
auto kernel_type = op->op_info()->GetAttr<std::string>(kKernelTypeAttr);
std::string op_type, alias;
Place place;
KernelBase::ParseKernelType(kernel_type, &op_type, &alias, &place);
......@@ -89,8 +88,8 @@ class LightPredictor {
insts.emplace_back(op, std::move(*it));
}
program_.reset(new RuntimeProgram(std::move(insts)));
CHECK(program.exec_scope_);
program_->set_exec_scope(program.exec_scope_);
CHECK(program.exec_scope());
program_->set_exec_scope(program.exec_scope());
}
private:
......
......@@ -24,13 +24,14 @@ cc_library(variable_lite SRCS variable.cc)
cc_library(op_registry_lite SRCS op_registry.cc DEPS framework_proto_lite)
cc_library(scope_lite SRCS scope.cc DEPS ${tensor_lite})
cc_library(cpu_info_lite SRCS cpu_info.cc)
cc_library(context_lite SRCS context.cc DEPS ${tensor_lite} any_lite cpu_info_lite)
lite_cc_library(context_lite SRCS context.cc DEPS ${tensor_lite} any_lite cpu_info_lite eigen3)
cc_library(op_lite SRCS op_lite.cc DEPS scope_lite op_registry_lite target_wrapper_lite
cpp_op_desc_lite ${tensor_lite})
cc_library(types_lite SRCS types.cc)
cc_library(type_system SRCS type_system.cc DEPS ${tensor_lite} target_wrapper_lite)
lite_cc_library(program_lite SRCS program.cc DEPS op_lite kernel_lite compatible_pb_lite model_parser_lite HVY_DEPS framework_proto)
lite_cc_library(program_lite SRCS program.cc DEPS op_lite kernel_lite compatible_pb_lite model_parser_lite HVY_DEPS framework_proto
PROFILE_DEPS basic_profiler_lite)
cc_library(optimizer_lite SRCS optimizer.cc DEPS mir_pass_manager model_parser_lite program_lite)
add_subdirectory(mir)
......
cc_library(mir_node SRCS node.cc DEPS framework_proto_lite)
cc_library(mir_ssa_graph SRCS ssa_graph.cc DEPS mir_node)
cc_library(mir_ssa_graph SRCS ssa_graph.cc DEPS mir_node program_lite)
cc_library(mir_pass SRCS pass.cc DEPS mir_ssa_graph)
cc_library(mir_pass_manager SRCS pass_manager.cc DEPS mir_pass mir_ssa_graph mir_passes)
cc_library(mir_pass_registry SRCS pass_registry.cc DEPS mir_pass_manager)
......@@ -20,14 +20,14 @@ if (LITE_WITH_LIGHT_WEIGHT_FRAMEWORK)
return()
endif()
cc_test(test_mir_pass_manager SRCS pass_manager_test.cc DEPS mir_pass_manager mir_passes)
cc_test(test_ssa_graph SRCS ssa_graph_test.cc DEPS
mir_ssa_graph scope_lite op_lite
fc_op_lite
${host_kernels}
mir_passes
mir_pass_manager
program_fake_utils
)
#cc_test(test_ssa_graph SRCS ssa_graph_test.cc DEPS
#mir_ssa_graph scope_lite op_lite
#fc_op_lite
#${host_kernels}
#mir_passes
#mir_pass_manager
#program_fake_utils
#)
# lite_cc_test(test_variable_place_infrence_pass SRCS variable_place_inference_pass_test.cc
# DEPS
# mul_op_lite
......
......@@ -29,9 +29,9 @@ class SGDCompute : public KernelLite<TARGET(kX86), PRECISION(kFloat)> {
using param_t = operators::ActivationParam;
void Run() override {
auto &context = context_->As<X86Context>();
auto &context = ctx_->As<X86Context>();
auto &sgd_param = *param_.get_mutable<operators::SGDParam>();
CHECK(context.x86_device_context);
CHECK(context.x86_device_context());
// param.Out->template mutable_data<T>();
......@@ -45,12 +45,12 @@ class SGDCompute : public KernelLite<TARGET(kX86), PRECISION(kFloat)> {
PADDLE_ENFORCE_EQ(grad->numel(), sz);
paddle::operators::jit::sgd_attr_t attr(1, sz, 1, sz, 1);
const T *lr = learning_rate->data<T>();
const T *param_data = param->data<T>();
const T *grad_data = grad->data<T>();
const T *lr = learning_rate->template data<T>();
const T *param_data = param->template data<T>();
const T *grad_data = grad->template data<T>();
int64_t rows_idx = 0;
T *out_data =
param_out->mutable_data<T>(context.x86_device_context->GetPlace());
T *out_data = param_out->template mutable_data<T>(
context.x86_device_context()->GetPlace());
auto sgd =
paddle::operators::jit::KernelFuncs<paddle::operators::jit::SgdTuple<T>,
......
......@@ -13,6 +13,11 @@ function prepare_for_codegen {
mkdir -p ./paddle/fluid/lite/gen_code
touch ./paddle/fluid/lite/gen_code/__generated_code__.cc
}
function check_need_ci {
git log -1 --oneline | grep "test=develop" || exit -1
}
function cmake_x86 {
prepare_for_codegen
cmake .. -DWITH_GPU=OFF -DWITH_MKLDNN=OFF -DLITE_WITH_X86=ON ${common_flags}
......@@ -28,6 +33,17 @@ function cmake_gpu {
cmake .. " -DWITH_GPU=ON {common_flags} -DLITE_WITH_GPU=ON"
}
function check_style {
export PATH=/usr/bin:$PATH
#pre-commit install
clang-format --version
if ! pre-commit run -a ; then
git diff
exit 1
fi
}
function cmake_arm {
# $1: ARM_TARGET_OS in "android" , "armlinux"
# $2: ARM_TARGET_ARCH_ABI in "arm64-v8a", "armeabi-v7a" ,"armeabi-v7a-hf"
......@@ -46,7 +62,8 @@ function cmake_arm {
function build {
file=$1
for _test in $(cat $file); do
make $_test -j$(expr $(nproc) - 2)
#make $_test -j$(expr $(nproc) - 2)
make $_test -j8
done
}
......@@ -58,7 +75,8 @@ function test_lite {
for _test in $(cat $file); do
# We move the build phase here to make the 'gen_code' test compiles after the
# corresponding test is executed and the C++ code generates.
make $_test -j$(expr $(nproc) - 2)
#make $_test -j$(expr $(nproc) - 2)
make $_test -j8
ctest -R $_test -V
done
}
......@@ -215,6 +233,14 @@ function main {
build_test_arm
shift
;;
check_style)
check_style
shift
;;
check_need_ci)
check_need_ci
shift
;;
*)
# unknown option
print_usage
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册