提交 679aabac 编写于 作者: C Chunwei

Merge branch 'chunwei/support-use-kernel-seperate' into 'incubate/lite'

support deployment

See merge request inference/paddlelite!56
......@@ -109,15 +109,15 @@ build:mobile_armlinux:
- build:server
build:mobile_model_mobilenetv1:
build:mobile_model_resnet50:
tags:
- lite
stage: build_mobile
image: $MOBILE_LITE_DOCKER_IMAGE
script:
- export CCACHE_DIR=$CI_PROJECT_DIR/build_mobile_model_mobilenetv1
- ./paddle/fluid/lite/tools/build.sh build_test_arm_model_mobilenetv1
- export CCACHE_DIR=$CI_PROJECT_DIR/build_mobile_model_resnet50
- ./paddle/fluid/lite/tools/build.sh build_test_arm_model_resnet50
dependencies:
- build:server
......@@ -127,17 +127,18 @@ build:mobile_model_mobilenetv1:
paths:
- build.lite.android.armv8.gcc
- ~/.ccache
- $CI_PROJECT_DIR/build_mobile_model_mobilenetv1
- $CI_PROJECT_DIR/build_mobile_model_resnet50
build:mobile_model_mobilenetv2:
build:mobile_model_mobilenetv1:
tags:
- lite
stage: build_mobile
image: $MOBILE_LITE_DOCKER_IMAGE
script:
- export CCACHE_DIR=$CI_PROJECT_DIR/build_mobile_model_mobilenetv2
- ./paddle/fluid/lite/tools/build.sh build_test_arm_model_mobilenetv2
- export CCACHE_DIR=$CI_PROJECT_DIR/build_mobile_model_mobilenetv1
- ./paddle/fluid/lite/tools/build.sh build_test_arm_model_mobilenetv1
dependencies:
- build:server
......@@ -147,17 +148,17 @@ build:mobile_model_mobilenetv2:
paths:
- build.lite.android.armv8.gcc
- ~/.ccache
- $CI_PROJECT_DIR/build_mobile_model_mobilenetv2
- $CI_PROJECT_DIR/build_mobile_model_mobilenetv1
build:mobile_model_resnet50:
build:mobile_model_mobilenetv2:
tags:
- lite
stage: build_mobile
image: $MOBILE_LITE_DOCKER_IMAGE
script:
- export CCACHE_DIR=$CI_PROJECT_DIR/build_mobile_model_resnet50
- ./paddle/fluid/lite/tools/build.sh build_test_arm_model_resnet50
- export CCACHE_DIR=$CI_PROJECT_DIR/build_mobile_model_mobilenetv2
- ./paddle/fluid/lite/tools/build.sh build_test_arm_model_mobilenetv2
dependencies:
- build:server
......@@ -167,7 +168,7 @@ build:mobile_model_resnet50:
paths:
- build.lite.android.armv8.gcc
- ~/.ccache
- $CI_PROJECT_DIR/build_mobile_model_resnet50
- $CI_PROJECT_DIR/build_mobile_model_mobilenetv2
#build:mobile_model_inceptionv4:
# tags:
......
......@@ -5,7 +5,7 @@ cc_library(ir_params_sync_among_devices_pass SRCS ir_params_sync_among_devices_p
cc_library(ir_graph_to_program_pass SRCS ir_graph_to_program_pass.cc DEPS analysis_pass graph_to_program_pass)
cc_library(adjust_cudnn_workspace_size_pass SRCS adjust_cudnn_workspace_size_pass.cc DEPS analysis_pass graph_to_program_pass)
cc_library(analysis_passes SRCS use_passes.cc DEPS
cc_library(analysis_passes SRCS paddle_use_passes.cc DEPS
ir_graph_build_pass
ir_analysis_pass
ir_params_sync_among_devices_pass
......
......@@ -208,3 +208,33 @@ if (WITH_TESTING)
lite_download_and_uncompress(${LITE_MODEL_DIR} ${LITE_URL} "GoogleNet_inference.tar.gz")
endif()
endif()
# for publish
set(INFER_LITE_PUBLISH_ROOT "${CMAKE_BINARY_DIR}/inference_lite_lib" CACHE STRING "inference publish path")
message(STATUS "publish inference lib to ${INFER_LITE_PUBLISH_ROOT}")
# The final target for publish lite lib
add_custom_target(publish_inference_lite)
#cc_library(inference_cxx_lib DEPS cxx_api_lite)
add_custom_target(publish_inference_cxx_lib ${TARGET}
COMMAND mkdir -p "${INFER_LITE_PUBLISH_ROOT}/cxx/lib"
COMMAND mkdir -p "${INFER_LITE_PUBLISH_ROOT}/cxx/include"
COMMAND cp "${CMAKE_SOURCE_DIR}/paddle/fluid/lite/api/paddle_*.h" "${INFER_LITE_PUBLISH_ROOT}/cxx/include"
COMMAND cp "${CMAKE_BINARY_DIR}/paddle/fluid/lite/api/libpaddle_api_full.a" "${INFER_LITE_PUBLISH_ROOT}/cxx/lib"
)
add_dependencies(publish_inference_cxx_lib paddle_api_full)
add_dependencies(publish_inference_lite publish_inference_cxx_lib)
if (LITE_WITH_LIGHT_WEIGHT_FRAMEWORK)
#cc_library(inference_mobile_lib DEPS light_api_lite)
add_custom_target(publish_inference_mobile_lib ${TARGET}
COMMAND mkdir -p "${INFER_LITE_PUBLISH_ROOT}/mobile/lib"
COMMAND mkdir -p "${INFER_LITE_PUBLISH_ROOT}/mobile/include"
COMMAND cp "${CMAKE_SOURCE_DIR}/paddle/fluid/lite/api/paddle_*.h" "${INFER_LITE_PUBLISH_ROOT}/cxx/include"
COMMAND cp "${CMAKE_BINARY_DIR}/paddle/fluid/lite/api/libpaddle_api_light.a" "${INFER_LITE_PUBLISH_ROOT}/cxx/lib"
)
add_dependencies(publish_inference_mobile_lib paddle_api_light)
add_dependencies(publish_inference_lite publish_inference_mobile_lib)
endif()
......@@ -6,7 +6,7 @@ if(LITE_WITH_CUDA)
nv_test(test_cxx_api_lite_cuda SRCS cxx_api_test.cc DEPS cxx_api_lite_cuda)
endif()
lite_cc_library(place_lite SRCS place.cc DEPS glog)
lite_cc_library(place_lite SRCS paddle_place.cc DEPS glog)
lite_cc_library(lite_api_test_helper SRCS lite_api_test_helper.cc
DEPS scope_lite optimizer_lite target_wrapper_host model_parser_lite program_lite
......@@ -103,8 +103,11 @@ lite_cc_test(test_apis_lite SRCS apis_test.cc
lite_cc_library(cxx_api_impl_lite SRCS cxx_api_impl.cc DEPS cxx_api_lite)
lite_cc_library(light_api_impl_lite SRCS light_api_impl.cc DEPS light_api_lite)
lite_cc_library(paddle_api_lite SRCS paddle_api.cc DEPS cxx_api_impl_lite light_api_impl_lite)
lite_cc_test(test_paddle_api_lite SRCS paddle_api_test.cc DEPS cxx_api_lite light_api_lite paddle_api_lite
lite_cc_library(paddle_api_full SRCS paddle_api.cc DEPS cxx_api_impl_lite light_api_impl_lite)
lite_cc_library(paddle_api_light SRCS paddle_api.cc DEPS light_api_impl_lite)
lite_cc_test(test_paddle_api_lite SRCS paddle_api_test.cc DEPS cxx_api_lite light_api_lite paddle_api_full
ARGS --model_dir=${LITE_MODEL_DIR}/lite_naive_model SERIAL)
if (WITH_TESTING)
add_dependencies(test_paddle_api_lite test_apis_lite)
......
......@@ -20,10 +20,10 @@
#include <vector>
#include "paddle/fluid/lite/api/cxx_api.h"
#include "paddle/fluid/lite/api/light_api.h"
#include "paddle/fluid/lite/api/paddle_use_kernels.h"
#include "paddle/fluid/lite/api/paddle_use_ops.h"
#include "paddle/fluid/lite/api/paddle_use_passes.h"
#include "paddle/fluid/lite/core/mir/pass_registry.h"
#include "paddle/fluid/lite/core/mir/use_passes.h"
#include "paddle/fluid/lite/kernels/use_kernels.h"
#include "paddle/fluid/lite/operators/use_ops.h"
DEFINE_string(model_dir, "", "");
DEFINE_string(optimized_model, "", "");
......
......@@ -14,7 +14,7 @@
#include "paddle/fluid/lite/api/cxx_api.h"
#include <chrono> // NOLINT
#include "paddle/fluid/lite/core/mir/use_passes.h"
#include "paddle/fluid/lite/api/paddle_use_passes.h"
#include "paddle/fluid/lite/core/op_registry.h"
namespace paddle {
......
......@@ -17,11 +17,11 @@
#include <gtest/gtest.h>
#include <vector>
#include "paddle/fluid/lite/api/lite_api_test_helper.h"
#include "paddle/fluid/lite/api/paddle_use_kernels.h"
#include "paddle/fluid/lite/api/paddle_use_ops.h"
#include "paddle/fluid/lite/api/paddle_use_passes.h"
#include "paddle/fluid/lite/core/compatible_tensor.h"
#include "paddle/fluid/lite/core/mir/use_passes.h"
#include "paddle/fluid/lite/core/op_registry.h"
#include "paddle/fluid/lite/kernels/use_kernels.h"
#include "paddle/fluid/lite/operators/use_ops.h"
// For training.
DEFINE_string(startup_program_path, "", "");
......
......@@ -16,11 +16,11 @@
#include <gtest/gtest.h>
#include <vector>
#include "paddle/fluid/lite/api/cxx_api.h"
#include "paddle/fluid/lite/api/paddle_use_kernels.h"
#include "paddle/fluid/lite/api/paddle_use_ops.h"
#include "paddle/fluid/lite/api/paddle_use_passes.h"
#include "paddle/fluid/lite/api/test_helper.h"
#include "paddle/fluid/lite/core/mir/use_passes.h"
#include "paddle/fluid/lite/core/op_registry.h"
#include "paddle/fluid/lite/kernels/use_kernels.h"
#include "paddle/fluid/lite/operators/use_ops.h"
namespace paddle {
namespace lite {
......
......@@ -15,9 +15,9 @@
#include "paddle/fluid/lite/api/light_api.h"
#include <gflags/gflags.h>
#include <gtest/gtest.h>
#include "paddle/fluid/lite/core/mir/use_passes.h"
#include "paddle/fluid/lite/kernels/use_kernels.h"
#include "paddle/fluid/lite/operators/use_ops.h"
#include "paddle/fluid/lite/api/paddle_use_kernels.h"
#include "paddle/fluid/lite/api/paddle_use_ops.h"
#include "paddle/fluid/lite/api/paddle_use_passes.h"
DEFINE_string(optimized_model, "", "");
......
......@@ -16,11 +16,11 @@
#include <gtest/gtest.h>
#include <vector>
#include "paddle/fluid/lite/api/cxx_api.h"
#include "paddle/fluid/lite/api/paddle_use_kernels.h"
#include "paddle/fluid/lite/api/paddle_use_ops.h"
#include "paddle/fluid/lite/api/paddle_use_passes.h"
#include "paddle/fluid/lite/api/test_helper.h"
#include "paddle/fluid/lite/core/mir/use_passes.h"
#include "paddle/fluid/lite/core/op_registry.h"
#include "paddle/fluid/lite/kernels/use_kernels.h"
#include "paddle/fluid/lite/operators/use_ops.h"
namespace paddle {
namespace lite {
......
......@@ -16,11 +16,11 @@
#include <gtest/gtest.h>
#include <vector>
#include "paddle/fluid/lite/api/cxx_api.h"
#include "paddle/fluid/lite/api/paddle_use_kernels.h"
#include "paddle/fluid/lite/api/paddle_use_ops.h"
#include "paddle/fluid/lite/api/paddle_use_passes.h"
#include "paddle/fluid/lite/api/test_helper.h"
#include "paddle/fluid/lite/core/mir/use_passes.h"
#include "paddle/fluid/lite/core/op_registry.h"
#include "paddle/fluid/lite/kernels/use_kernels.h"
#include "paddle/fluid/lite/operators/use_ops.h"
namespace paddle {
namespace lite {
......
......@@ -22,7 +22,7 @@
#include <memory>
#include <string>
#include <vector>
#include "place.h" // NOLINT
#include "paddle_place.h" // NOLINT
namespace paddle {
namespace lite_api {
......
......@@ -14,10 +14,11 @@
#include "paddle/fluid/lite/api/paddle_api.h"
#include <gflags/gflags.h>
#include <glog/logging.h>
#include <gtest/gtest.h>
#include "paddle/fluid/lite/core/mir/use_passes.h"
#include "paddle/fluid/lite/kernels/use_kernels.h"
#include "paddle/fluid/lite/operators/use_ops.h"
#include "paddle/fluid/lite/api/paddle_use_kernels.h"
#include "paddle/fluid/lite/api/paddle_use_ops.h"
#include "paddle/fluid/lite/api/paddle_use_passes.h"
DEFINE_string(model_dir, "", "");
......
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/*
* This file defines some MACROS that explicitly determine the op, kernel, mir
* passes used in the inference lib.
*/
#pragma once
#define USE_LITE_OP(op_type__) \
extern int touch_op_##op_type__(); \
int LITE_OP_REGISTER_FAKE(op_type__) __attribute__((unused)) = \
touch_op_##op_type__();
#define USE_LITE_KERNEL(op_type__, target__, precision__, layout__, alias__) \
extern int touch_##op_type__##target__##precision__##layout__##alias__(); \
int op_type__##target__##precision__##layout__##alias__ \
__attribute__((unused)) = \
touch_##op_type__##target__##precision__##layout__##alias__();
#define USE_MIR_PASS(name__) \
extern bool mir_pass_registry##name__##_fake(); \
static bool mir_pass_usage##name__ __attribute__((unused)) = \
mir_pass_registry##name__##_fake();
#define LITE_OP_REGISTER_FAKE(op_type__) op_type__##__registry__
......@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/fluid/lite/api/place.h"
#include "paddle/fluid/lite/api/paddle_place.h"
#include <glog/logging.h>
#include "paddle/fluid/lite/utils/hash.h"
......
......@@ -17,7 +17,7 @@
*/
#pragma once
#include "paddle/fluid/lite/core/op_registry.h"
#include "paddle_lite_factory_helper.h" // NOLINT
USE_LITE_KERNEL(feed, kHost, kAny, kAny, def);
USE_LITE_KERNEL(fetch, kHost, kAny, kAny, def);
......@@ -61,6 +61,7 @@ USE_LITE_KERNEL(mul, kCUDA, kFloat, kNCHW, def);
USE_LITE_KERNEL(io_copy, kCUDA, kAny, kAny, host_to_device);
USE_LITE_KERNEL(io_copy, kCUDA, kAny, kAny, device_to_host);
#endif
#ifdef LITE_WITH_OPENCL
USE_LITE_KERNEL(elementwise_add, kOpenCL, kFloat, kNCHW, def);
#endif
......@@ -16,7 +16,7 @@
// ATTENTION This can only include in a .cc file.
#include "paddle/fluid/lite/core/op_registry.h"
#include "paddle_lite_factory_helper.h" // NOLINT
USE_LITE_OP(mul);
USE_LITE_OP(fc);
......
......@@ -13,7 +13,7 @@
// limitations under the License.
#pragma once
#include "paddle/fluid/lite/core/mir/pass_registry.h"
#include "paddle_lite_factory_helper.h" // NOLINT
USE_MIR_PASS(demo);
USE_MIR_PASS(static_kernel_pick_pass);
......
......@@ -16,11 +16,11 @@
#include <gtest/gtest.h>
#include <vector>
#include "paddle/fluid/lite/api/cxx_api.h"
#include "paddle/fluid/lite/api/paddle_use_kernels.h"
#include "paddle/fluid/lite/api/paddle_use_ops.h"
#include "paddle/fluid/lite/api/paddle_use_passes.h"
#include "paddle/fluid/lite/api/test_helper.h"
#include "paddle/fluid/lite/core/mir/use_passes.h"
#include "paddle/fluid/lite/core/op_registry.h"
#include "paddle/fluid/lite/kernels/use_kernels.h"
#include "paddle/fluid/lite/operators/use_ops.h"
namespace paddle {
namespace lite {
......
......@@ -31,11 +31,11 @@
#include <vector>
#include "paddle/fluid/lite/api/cxx_api.h"
#include "paddle/fluid/lite/api/lite_api_test_helper.h"
#include "paddle/fluid/lite/api/paddle_use_kernels.h"
#include "paddle/fluid/lite/api/paddle_use_ops.h"
#include "paddle/fluid/lite/api/paddle_use_passes.h"
#include "paddle/fluid/lite/core/compatible_tensor.h"
#include "paddle/fluid/lite/core/mir/use_passes.h"
#include "paddle/fluid/lite/core/op_registry.h"
#include "paddle/fluid/lite/kernels/use_kernels.h"
#include "paddle/fluid/lite/operators/use_ops.h"
// for googlenet
DEFINE_string(model_dir, "", "");
......
......@@ -18,9 +18,9 @@
#include <vector>
#include "paddle/fluid/framework/program_desc.h"
#include "paddle/fluid/lite/api/cxx_api.h"
#include "paddle/fluid/lite/api/paddle_use_passes.h"
#include "paddle/fluid/lite/core/compatible_tensor.h"
#include "paddle/fluid/lite/core/mir/graph_visualize_pass.h"
#include "paddle/fluid/lite/core/mir/use_passes.h"
#include "paddle/fluid/lite/core/op_registry.h"
#include "paddle/fluid/lite/core/program.h"
......
......@@ -18,9 +18,9 @@
#include <vector>
#include "paddle/fluid/framework/program_desc.h"
#include "paddle/fluid/lite/api/cxx_api.h"
#include "paddle/fluid/lite/api/paddle_use_passes.h"
#include "paddle/fluid/lite/core/compatible_tensor.h"
#include "paddle/fluid/lite/core/mir/graph_visualize_pass.h"
#include "paddle/fluid/lite/core/mir/use_passes.h"
#include "paddle/fluid/lite/core/op_registry.h"
#include "paddle/fluid/lite/core/program.h"
......
......@@ -17,7 +17,7 @@
#include <gtest/gtest.h>
#include <vector>
#include "paddle/fluid/lite/api/cxx_api.h"
#include "paddle/fluid/lite/core/mir/use_passes.h"
#include "paddle/fluid/lite/api/paddle_use_passes.h"
#include "paddle/fluid/lite/core/op_registry.h"
DEFINE_string(model_dir, "", "");
......
......@@ -15,6 +15,7 @@
#pragma once
#include <string>
#include "paddle/fluid/lite/api/paddle_lite_factory_helper.h"
#include "paddle/fluid/lite/core/mir/pass_manager.h"
namespace paddle {
......@@ -41,8 +42,3 @@ class PassRegistry {
bool mir_pass_registry##name__##_fake() { \
return mir_pass_registry##name__.Touch(); \
}
#define USE_MIR_PASS(name__) \
extern bool mir_pass_registry##name__##_fake(); \
static bool mir_pass_usage##name__ __attribute__((unused)) = \
mir_pass_registry##name__##_fake();
......@@ -16,8 +16,8 @@
#include <gtest/gtest.h>
#include <memory>
#include "paddle/fluid/framework/program_desc.h"
#include "paddle/fluid/lite/api/paddle_use_passes.h"
#include "paddle/fluid/lite/core/mir/graph_visualize_pass.h"
#include "paddle/fluid/lite/core/mir/use_passes.h"
#include "paddle/fluid/lite/core/op_registry.h"
#include "paddle/fluid/lite/core/program_fake_utils.h"
......
......@@ -13,7 +13,7 @@
// limitations under the License.
#include <gtest/gtest.h>
#include "paddle/fluid/lite/core/mir/use_passes.h"
#include "paddle/fluid/lite/api/paddle_use_passes.h"
#include "paddle/fluid/lite/core/optimizer.h"
#include "paddle/fluid/lite/core/program_fake_utils.h"
#include "paddle/fluid/lite/kernels/cuda/use_kernels.h"
......
......@@ -20,6 +20,7 @@
#include <unordered_map>
#include <utility>
#include <vector>
#include "paddle/fluid/lite/api/paddle_lite_factory_helper.h"
#include "paddle/fluid/lite/core/kernel.h"
#include "paddle/fluid/lite/core/op_lite.h"
#include "paddle/fluid/lite/core/target_wrapper.h"
......@@ -32,7 +33,6 @@ namespace lite {
using KernelFunc = std::function<void()>;
using KernelFuncCreator = std::function<std::unique_ptr<KernelFunc>()>;
class LiteOpRegistry final : public Factory<OpLite, std::shared_ptr<OpLite>> {
public:
static LiteOpRegistry &Global() {
......@@ -177,7 +177,6 @@ class KernelRegistor : public lite::Registor<KernelType> {
// Operator registry
#define LITE_OP_REGISTER_INSTANCE(op_type__) op_type__##__registry__instance__
#define LITE_OP_REGISTER_FAKE(op_type__) op_type__##__registry__
#define REGISTER_LITE_OP(op_type__, OpClass) \
static paddle::lite::OpLiteRegistor<OpClass> LITE_OP_REGISTER_INSTANCE( \
op_type__)(#op_type__); \
......@@ -185,11 +184,6 @@ class KernelRegistor : public lite::Registor<KernelType> {
return LITE_OP_REGISTER_INSTANCE(op_type__).Touch(); \
}
#define USE_LITE_OP(op_type__) \
extern int touch_op_##op_type__(); \
int LITE_OP_REGISTER_FAKE(op_type__) __attribute__((unused)) = \
touch_op_##op_type__();
// Kernel registry
#define LITE_KERNEL_REGISTER(op_type__, target__, precision__) \
op_type__##__##target__##__##precision__##__registor__
......@@ -219,12 +213,6 @@ class KernelRegistor : public lite::Registor<KernelType> {
TARGET(target__), PRECISION(precision__), DATALAYOUT(layout__)>( \
#op_type__ "/" #alias__)
#define USE_LITE_KERNEL(op_type__, target__, precision__, layout__, alias__) \
extern int touch_##op_type__##target__##precision__##layout__##alias__(); \
int op_type__##target__##precision__##layout__##alias__ \
__attribute__((unused)) = \
touch_##op_type__##target__##precision__##layout__##alias__();
#define LITE_KERNEL_INSTANCE(op_type__, target__, precision__, layout__, \
alias__) \
op_type__##target__##precision__##layout__##alias__
......
......@@ -16,10 +16,10 @@
#include <gtest/gtest.h>
#include <memory>
#include <utility>
#include "paddle/fluid/lite/api/paddle_use_passes.h"
#include "paddle/fluid/lite/core/mir/generate_program_pass.h"
#include "paddle/fluid/lite/core/mir/pass_manager.h"
#include "paddle/fluid/lite/core/mir/static_kernel_pick_pass.h"
#include "paddle/fluid/lite/core/mir/use_passes.h"
#include "paddle/fluid/lite/core/program_fake_utils.h"
namespace paddle {
......
......@@ -16,7 +16,7 @@
#include <iostream>
#include <sstream>
#include <string>
#include "paddle/fluid/lite/api/place.h"
#include "paddle/fluid/lite/api/paddle_place.h"
#include "paddle/fluid/lite/utils/cp_logging.h"
#ifdef LITE_WITH_CUDA
......
......@@ -85,6 +85,9 @@ function build_single {
function build {
make lite_compile_deps -j$NUM_CORES_FOR_COMPILE
# test publish inference lib
make publish_inference_lite
}
# It will eagerly test all lite related unittests.
......@@ -104,6 +107,7 @@ function build_test_server {
export LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/paddle/build/third_party/install/mklml/lib"
cmake_x86_for_CI
build
test_lite $TESTS_FILE
}
......@@ -223,6 +227,9 @@ function build_arm {
cmake_arm ${os} ${abi} ${lang}
build $TESTS_FILE
# test publish inference lib
make publish_inference_lite
}
# $1: ARM_TARGET_OS in "android" , "armlinux"
......
......@@ -16,10 +16,10 @@
#include <string>
#include <vector>
#include "paddle/fluid/lite/api/cxx_api.h"
#include "paddle/fluid/lite/core/mir/use_passes.h"
#include "paddle/fluid/lite/api/paddle_use_kernels.h"
#include "paddle/fluid/lite/api/paddle_use_ops.h"
#include "paddle/fluid/lite/api/paddle_use_passes.h"
#include "paddle/fluid/lite/core/op_registry.h"
#include "paddle/fluid/lite/kernels/use_kernels.h"
#include "paddle/fluid/lite/operators/use_ops.h"
#include "paddle/fluid/lite/tools/debug/debug_utils.h"
namespace paddle {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册