提交 9d25993b 编写于 作者: L Liangliang He

Fix gradle NDK build

上级 602300d6
stages:
- linting
- basic-build
- smoke-test
- build
- test
- extra
......@@ -17,7 +15,7 @@ pylint:
- pycodestyle $(find -name "*.py")
build_docs:
stage: basic-build
stage: build
script:
- cd docs
- make html
......@@ -35,38 +33,18 @@ build_docs:
- docs/_build
cmake_build_android-armeabi-v7a:
stage: basic-build
stage: build
script:
- sh tools/cmake-build-android-armeabi-v7a-full.sh
- LIBMACE32_FULL_SIZE=`stat -c%s cmake-build/android-armeabi-v7a-full/install/lib/libmace.so`
- if (( LIBMACE32_FULL_SIZE > 2200000 )) ; then echo "The libmace.so size too large"; exit 1; fi
cmake_build_android-arm64-v8:
stage: basic-build
stage: build
script:
- sh tools/cmake-build-android-arm64-v8a-full.sh
build_android_demo:
stage: basic-build
script:
- pushd examples/android/ && bash build.sh static && bash build.sh dynamic && popd
code_footprint_check:
stage: smoke-test
script:
- echo check shared library size
- LIBMACE64_FULL_SIZE=`stat -c%s build/android-arm64-v8a-full/install/lib/libmace.so`
- LIBMACE64_FULL_SIZE=`stat -c%s cmake-build/android-arm64-v8a-full/install/lib/libmace.so`
- if (( LIBMACE64_FULL_SIZE > 3100000 )) ; then echo "The libmace.so size too large"; exit 1; fi
- LIBMACE32_FULL_SIZE=`stat -c%s build/android-armeabi-v7a-full/install/lib/libmace.so`
- if (( LIBMACE32_FULL_SIZE > 2200000 )) ; then echo "The libmace.so size too large"; exit 1; fi
mace_cc_test_smoketest:
stage: smoke-test
script:
- echo tests
mace_cc_benchmark_smoketest:
stage: smoke-test
script:
- echo benchmark
bazel_build:
stage: build
......@@ -82,7 +60,17 @@ cmake_build:
only:
- triggers
cc_test:
build_android_demo:
stage: build
script:
- ANDROID_NDK_HOME_SAVED=${ANDROID_NDK_HOME}
- export ANDROID_NDK_HOME=/opt/android-ndk-r17b
- pushd examples/android/ && bash build.sh static && bash build.sh dynamic && popd
- export ANDROID_NDK_HOME=${ANDROID_NDK_HOME_SAVED}
only:
- triggers
mace_cc_test:
stage: test
script:
- if [ -z "$TARGET_SOCS" ]; then TARGET_SOCS=random; fi
......@@ -98,11 +86,6 @@ mace_cc_benchmark:
script:
- if [ -z "$TARGET_SOCS" ]; then TARGET_SOCS=random; fi
- python tools/bazel_adb_run.py --target="//test/ccbenchmark:mace_cc_benchmark" --run_target=True --stdout_processor=ops_benchmark_stdout_processor --target_abis=armeabi-v7a,arm64-v8a --target_socs=$TARGET_SOCS --args="--filter=.*SIGMOID.*"
ndk_compatibility_check:
stage: extra
script:
- echo ndk_compatibility_check
only:
- triggers
......@@ -120,20 +103,18 @@ model_tests:
DEVICE_CONF_FILE=generic-mobile-devices/devices.yml
fi
- if [ -z "$TARGET_SOCS" ]; then TARGET_SOCS=random; fi
- >
python tools/converter.py convert --config=${CONF_FILE} --target_socs=$TARGET_SOCS --model_graph_format=file --model_data_format=file --cl_mem_type=buffer
python tools/converter.py run --config=${CONF_FILE} --target_socs=$TARGET_SOCS --device_yml=${DEVICE_CONF_FILE} --round=1 --target_abis=armeabi-v7a,arm64 --validate --model_graph_format=file --model_data_format=file
python tools/converter.py run --config=${CONF_FILE} --target_socs=$TARGET_SOCS --device_yml=${DEVICE_CONF_FILE} --example --target_abis=armeabi-v7a,arm64 --round=1 --validate --model_graph_format=file --model_data_format=file
- python tools/converter.py convert --config=${CONF_FILE} --target_socs=$TARGET_SOCS --model_graph_format=file --model_data_format=file --cl_mem_type=buffer
- python tools/converter.py run --config=${CONF_FILE} --target_socs=$TARGET_SOCS --device_yml=${DEVICE_CONF_FILE} --round=1 --target_abis=armeabi-v7a,arm64 --validate --model_graph_format=file --model_data_format=file
- python tools/converter.py run --config=${CONF_FILE} --target_socs=$TARGET_SOCS --device_yml=${DEVICE_CONF_FILE} --example --target_abis=armeabi-v7a,arm64 --round=1 --validate --model_graph_format=file --model_data_format=file
- CONF_FILE=mace-models/mobilenet-v2/mobilenet-v2-host.yml
- >
python tools/converter.py convert --config=${CONF_FILE} --target_socs=$TARGET_SOCS --model_graph_format=file --model_data_format=file
python tools/converter.py run --config=${CONF_FILE} --target_socs=$TARGET_SOCS --round=1 --validate --model_graph_format=file --model_data_format=file --address_sanitizer
python tools/converter.py run --config=${CONF_FILE} --target_socs=$TARGET_SOCS --example --round=1 --validate --model_graph_format=file --model_data_format=file
python tools/converter.py benchmark --config=${CONF_FILE} --target_socs=$TARGET_SOCS --round=5 --model_graph_format=file --model_data_format=file
python tools/converter.py convert --config=${CONF_FILE} --target_socs=$TARGET_SOCS --model_graph_format=code --model_data_format=file
python tools/converter.py run --config=${CONF_FILE} --target_socs=$TARGET_SOCS --round=1 --validate --model_graph_format=code --model_data_format=file
python tools/converter.py run --config=${CONF_FILE} --target_socs=$TARGET_SOCS --example --round=1 --validate --model_graph_format=code --model_data_format=file
python tools/converter.py benchmark --config=${CONF_FILE} --target_socs=$TARGET_SOCS --round=5 --model_graph_format=code --model_data_format=file
- python tools/converter.py convert --config=${CONF_FILE} --target_socs=$TARGET_SOCS --model_graph_format=file --model_data_format=file
- python tools/converter.py run --config=${CONF_FILE} --target_socs=$TARGET_SOCS --round=1 --validate --model_graph_format=file --model_data_format=file --address_sanitizer
- python tools/converter.py run --config=${CONF_FILE} --target_socs=$TARGET_SOCS --example --round=1 --validate --model_graph_format=file --model_data_format=file
- python tools/converter.py benchmark --config=${CONF_FILE} --target_socs=$TARGET_SOCS --round=5 --model_graph_format=file --model_data_format=file
- python tools/converter.py convert --config=${CONF_FILE} --target_socs=$TARGET_SOCS --model_graph_format=code --model_data_format=file
- python tools/converter.py run --config=${CONF_FILE} --target_socs=$TARGET_SOCS --round=1 --validate --model_graph_format=code --model_data_format=file
- python tools/converter.py run --config=${CONF_FILE} --target_socs=$TARGET_SOCS --example --round=1 --validate --model_graph_format=code --model_data_format=file
- python tools/converter.py benchmark --config=${CONF_FILE} --target_socs=$TARGET_SOCS --round=5 --model_graph_format=code --model_data_format=file
- rm -rf mace-models
quantization_tests:
......@@ -151,11 +132,13 @@ quantization_tests:
- >
for CONF_FILE in mace-models/mobilenet-v1/mobilenet-v1-quantize-friendly.yml mace-models/mobilenet-v1/mobilenet-v1-quantize-retrain-for-check-only.yml mace-models/mobilenet-v1/mobilenet-v1-quantize-retrain-dsp.yml;
do
python tools/converter.py convert --config=${CONF_FILE} --target_socs=$TARGET_SOCS --model_graph_format=file --model_data_format=file
python tools/converter.py run --config=${CONF_FILE} --target_socs=$TARGET_SOCS --device_yml=${DEVICE_CONF_FILE} --round=1 --validate --model_graph_format=file --model_data_format=file
python tools/converter.py run --config=${CONF_FILE} --target_socs=$TARGET_SOCS --device_yml=${DEVICE_CONF_FILE} --example --round=1 --validate --layers=0 --model_graph_format=file --model_data_format=file
python tools/converter.py convert --config=${CONF_FILE} --target_socs=$TARGET_SOCS --model_graph_format=file --model_data_format=file || exit 1;
python tools/converter.py run --config=${CONF_FILE} --target_socs=$TARGET_SOCS --device_yml=${DEVICE_CONF_FILE} --round=1 --validate --model_graph_format=file --model_data_format=file || exit 1;
python tools/converter.py run --config=${CONF_FILE} --target_socs=$TARGET_SOCS --device_yml=${DEVICE_CONF_FILE} --example --round=1 --validate --layers=0 --model_graph_format=file --model_data_format=file || exit 1;
done
- rm -rf mace-models
only:
- triggers
dynamic_linking_test:
stage: extra
......@@ -171,7 +154,8 @@ dynamic_linking_test:
DEVICE_CONF_FILE=generic-mobile-devices/devices.yml
fi
- if [ -z "$TARGET_SOCS" ]; then TARGET_SOCS=random; fi
- >
python tools/converter.py convert --config=${CONF_FILE} --target_socs=$TARGET_SOCS --model_graph_format=file --model_data_format=file --cl_mem_type=buffer
python tools/converter.py run --config=${CONF_FILE} --target_socs=$TARGET_SOCS --device_yml=${DEVICE_CONF_FILE} --example --mace_lib_type=dynamic --target_abis=armeabi-v7a,arm64 --round=1 --validate --model_graph_format=file --model_data_format=file
- python tools/converter.py convert --config=${CONF_FILE} --target_socs=$TARGET_SOCS --model_graph_format=file --model_data_format=file --cl_mem_type=buffer
- python tools/converter.py run --config=${CONF_FILE} --target_socs=$TARGET_SOCS --device_yml=${DEVICE_CONF_FILE} --example --mace_lib_type=dynamic --target_abis=armeabi-v7a,arm64 --round=1 --validate --model_graph_format=file --model_data_format=file
- rm -rf mace-models
only:
- triggers
......@@ -18,7 +18,7 @@
[中文](README_zh.md)
**Mobile AI Compute Engine** (or **MACE** for short) is a deep learning inference framework optimized for
mobile heterogeneous computing on Android, iOS and Linux devices. The design focuses on the following
mobile heterogeneous computing on Android, iOS, Linux and Windows devices. The design focuses on the following
targets:
* Performance
* Runtime is optimized with NEON, OpenCL and Hexagon, and
......
......@@ -15,7 +15,7 @@
[加入我们](JOBS.md) |
[English](README.md)
**Mobile AI Compute Engine (MACE)** 是一个专为移动端异构计算平台(支持Android, iOS, Linux)优化的神经网络计算框架。
**Mobile AI Compute Engine (MACE)** 是一个专为移动端异构计算平台(支持Android, iOS, Linux, Windows)优化的神经网络计算框架。
主要从以下的角度做了专门的优化:
* 性能
* 代码经过NEON指令,OpenCL以及Hexagon HVX专门优化,并且采用
......
......@@ -35,8 +35,8 @@ cc_binary(
deps = [
"//external:gflags_nothreads",
"//mace/codegen:generated_mace_engine_factory",
"//mace/codegen:generated_libmace",
"//mace/utils:utils_hdrs",
"//mace/codegen:generated_models",
"//mace/libmace",
] + if_opencl_enabled([
"//mace/codegen:generated_opencl_binary",
"//mace/codegen:generated_opencl_parameter",
......@@ -70,9 +70,9 @@ cc_binary(
linkstatic = 0,
deps = [
"//external:gflags_nothreads",
"//mace/codegen:generated_libmace",
"//mace/codegen:generated_mace_engine_factory",
"//mace/utils:utils_hdrs",
"//mace/codegen:generated_models",
"//mace/libmace:libmace_dynamic",
] + if_opencl_enabled([
"//mace/codegen:generated_opencl_binary",
"//mace/codegen:generated_opencl_parameter",
......
......@@ -102,17 +102,20 @@ DEFINE_string(model_name,
"",
"model name in model deployment file");
DEFINE_string(input_node,
"input_node0,input_node1",
"input nodes, separated by comma");
"",
"input nodes, separated by comma,"
"example: input_node0,input_node1");
DEFINE_string(input_shape,
"1,224,224,3:1,1,1,10",
"input shapes, separated by colon and comma");
"",
"input shapes, separated by colon and comma, "
"example: 1,224,224,3:1,1,1,10");
DEFINE_string(output_node,
"output_node0,output_node1",
"output nodes, separated by comma");
DEFINE_string(output_shape,
"1,224,224,2:1,1,1,10",
"output shapes, separated by colon and comma");
"",
"output shapes, separated by colon and comma, "
"example: 1,224,224,2:1,1,1,10");
DEFINE_string(input_data_format,
"NHWC",
"input data formats, NONE|NHWC|NCHW");
......
......@@ -11,7 +11,11 @@ cc_library(
name = "generated_models",
srcs = glob(["models/*/*.cc"]),
hdrs = glob(["models/*/*.h"]),
copts = ["-Werror", "-Wextra", "-Wno-missing-field-initializers"],
copts = [
"-Werror",
"-Wextra",
"-Wno-missing-field-initializers",
],
deps = [
"//mace/core",
],
......@@ -24,39 +28,52 @@ encrypt_opencl_kernel_genrule()
cc_library(
name = "generated_opencl",
srcs = ["opencl/encrypt_opencl_kernel.cc"],
copts = ["-Werror", "-Wextra", "-Wno-missing-field-initializers"],
copts = [
"-Werror",
"-Wextra",
"-Wno-missing-field-initializers",
],
)
cc_library(
name = "generated_opencl_binary",
srcs = ["opencl/opencl_binary.cc"],
copts = ["-Werror", "-Wextra", "-Wno-missing-field-initializers"],
copts = [
"-Werror",
"-Wextra",
"-Wno-missing-field-initializers",
],
)
cc_library(
name = "generated_opencl_parameter",
srcs = ["opencl/opencl_parameter.cc"],
copts = ["-Werror", "-Wextra", "-Wno-missing-field-initializers"],
copts = [
"-Werror",
"-Wextra",
"-Wno-missing-field-initializers",
],
)
cc_library(
name = "generated_version",
srcs = ["version/version.cc"],
copts = ["-Werror", "-Wextra", "-Wno-missing-field-initializers"],
copts = [
"-Werror",
"-Wextra",
"-Wno-missing-field-initializers",
],
)
cc_library(
name = "generated_mace_engine_factory",
hdrs = glob(["engine/*.h"]),
copts = ["-Werror", "-Wextra", "-Wno-missing-field-initializers"],
copts = [
"-Werror",
"-Wextra",
"-Wno-missing-field-initializers",
],
deps = [
"//include:public_headers",
],
)
cc_library(
name = "generated_libmace",
srcs = glob(["lib/*"]),
linkstatic = 1,
visibility = ["//visibility:public"],
)
......@@ -21,9 +21,14 @@ fi
mkdir -p $(dirname $OUTPUT_FILENAME)
GIT_VERSION=$(git describe --tags --long)
MACE_SOURCE_DIR=$(dirname $(dirname $(dirname $(dirname $0))))
GIT_VERSION=$(git --git-dir=${MACE_SOURCE_DIR}/.git --work-tree=${MACE_SOURCE_DIR} describe --long --tags)
if [[ $? != 0 ]]; then
GIT_VERSION=unknown
GIT_VERSION=$(git describe --long --tags)
if [[ $? != 0 ]]; then
GIT_VERSION=unknown
fi
else
GIT_VERSION=${GIT_VERSION}
fi
......
......@@ -86,7 +86,6 @@ message OperatorDef {
repeated int32 out_max_byte_size = 104; // only support 32-bit len
}
// for hexagon mace-nnlib
message InputOutputInfo {
optional string name = 1;
optional int32 node_id = 2;
......@@ -103,7 +102,6 @@ message NetDef {
repeated Argument arg = 2;
repeated ConstTensor tensors = 3;
// for hexagon mace-nnlib
repeated InputOutputInfo input_info = 100;
repeated InputOutputInfo output_info = 101;
}
......@@ -10,17 +10,6 @@ load(
licenses(["notice"]) # Apache 2.0
cc_library(
name = "statistics",
srcs = ["statistics.cc"],
hdrs = ["statistics.h"],
copts = ["-Werror", "-Wextra", "-Wno-missing-field-initializers"],
visibility = ["//visibility:public"],
deps = [
"//mace/utils",
],
)
cc_binary(
name = "benchmark_model_static",
srcs = [
......@@ -34,12 +23,15 @@ cc_binary(
linkopts = if_openmp_enabled(["-fopenmp"]),
linkstatic = 1,
deps = [
":statistics",
"//external:gflags_nothreads",
"//mace/codegen:libmodels",
"//mace/codegen:generated_mace_engine_factory",
"//mace/libmace:libmace",
],
"//mace/codegen:generated_models",
"//mace/libmace",
"//mace/utils",
] + if_opencl_enabled([
"//mace/codegen:generated_opencl_binary",
"//mace/codegen:generated_opencl_parameter",
]),
)
cc_binary(
......@@ -55,7 +47,7 @@ cc_binary(
linkopts = [
"-lm",
] + if_openmp_enabled([
"-fopenmp"
"-fopenmp",
]) + if_android([
"-ldl",
"-pie",
......@@ -66,9 +58,12 @@ cc_binary(
":statistics",
"//external:gflags_nothreads",
"//mace/codegen:generated_mace_engine_factory",
"//mace/codegen:libmodels",
"//mace/codegen:generated_models",
"//mace/libmace:libmace_dynamic",
],
] + if_opencl_enabled([
"//mace/codegen:generated_opencl_binary",
"//mace/codegen:generated_opencl_parameter",
]),
)
cc_library(
......@@ -82,9 +77,13 @@ cc_library(
cc_binary(
name = "model_throughput_test",
srcs = ["model_throughput_test.cc"],
copts = [
"-Werror",
"-Wextra",
"-Wno-missing-field-initializers",
],
linkopts = if_openmp_enabled(["-fopenmp"]),
linkstatic = 1,
copts = ["-Werror", "-Wextra", "-Wno-missing-field-initializers"],
deps = [
":libmace_merged",
"//external:gflags_nothreads",
......
......@@ -26,7 +26,7 @@ cc_binary(
deps = [
"//external:gflags_nothreads",
"//mace/codegen:generated_mace_engine_factory",
"//mace/codegen:libmodels",
"//mace/codegen:generated_models",
"//mace/libmace",
],
)
......@@ -50,7 +50,7 @@ cc_binary(
deps = [
"//external:gflags_nothreads",
"//mace/codegen:generated_mace_engine_factory",
"//mace/codegen:libmodels",
"//mace/codegen:generated_models",
"//mace/libmace:libmace_dynamic",
],
)
......@@ -3,6 +3,7 @@
# By default, we don't distinct target and host platfroms.
build --distinct_host_configuration=false
build -s
build --verbose_failures
build --copt=-std=c++11
build --copt=-fPIC
......
......@@ -445,8 +445,8 @@ BUILD_TMP_GENERAL_OUTPUT_DIR_NAME = 'general'
MODEL_OUTPUT_DIR_NAME = 'model'
EXAMPLE_STATIC_NAME = "example_static"
EXAMPLE_DYNAMIC_NAME = "example_dynamic"
EXAMPLE_STATIC_TARGET = "//mace/examples/cli:" + EXAMPLE_STATIC_NAME
EXAMPLE_DYNAMIC_TARGET = "//mace/examples/cli:" + EXAMPLE_DYNAMIC_NAME
EXAMPLE_STATIC_TARGET = "//examples/cli:" + EXAMPLE_STATIC_NAME
EXAMPLE_DYNAMIC_TARGET = "//examples/cli:" + EXAMPLE_DYNAMIC_NAME
MACE_RUN_STATIC_NAME = "mace_run_static"
MACE_RUN_DYNAMIC_NAME = "mace_run_dynamic"
MACE_RUN_STATIC_TARGET = "//mace/tools/validation:" + MACE_RUN_STATIC_NAME
......@@ -473,8 +473,8 @@ MODEL_LIB_PATH = "bazel-bin/mace/codegen/libgenerated_models.a"
QUANTIZE_STAT_TARGET = "//mace/tools/quantization:quantize_stat"
BM_MODEL_STATIC_NAME = "benchmark_model_static"
BM_MODEL_DYNAMIC_NAME = "benchmark_model_dynamic"
BM_MODEL_STATIC_TARGET = "//mace/benchmark:" + BM_MODEL_STATIC_NAME
BM_MODEL_DYNAMIC_TARGET = "//mace/benchmark:" + BM_MODEL_DYNAMIC_NAME
BM_MODEL_STATIC_TARGET = "//mace/tools/benchmark:" + BM_MODEL_STATIC_NAME
BM_MODEL_DYNAMIC_TARGET = "//mace/tools/benchmark:" + BM_MODEL_DYNAMIC_NAME
################################
......
......@@ -949,7 +949,7 @@ def build_example(configs, target_abi, toolchain, enable_openmp, mace_lib_type,
model_lib_path = get_model_lib_output_path(library_name,
target_abi)
sh.cp("-f", model_lib_path, LIB_CODEGEN_DIR)
build_arg = "--per_file_copt=mace/examples/cli/example.cc@-DMODEL_GRAPH_FORMAT_CODE" # noqa
build_arg = "--per_file_copt=examples/cli/example.cc@-DMODEL_GRAPH_FORMAT_CODE" # noqa
if mace_lib_type == MACELibType.dynamic:
example_target = EXAMPLE_DYNAMIC_TARGET
......@@ -1064,7 +1064,7 @@ def build_benchmark_model(configs,
mace_check(os.path.exists(ENGINE_CODEGEN_DIR),
ModuleName.BENCHMARK,
"You should convert model first.")
build_arg = "--per_file_copt=mace/benchmark/benchmark_model.cc@-DMODEL_GRAPH_FORMAT_CODE" # noqa
build_arg = "--per_file_copt=mace/tools/benchmark/benchmark_model.cc@-DMODEL_GRAPH_FORMAT_CODE" # noqa
sh_commands.bazel_build(benchmark_target,
abi=target_abi,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册