diff --git a/CMakeLists.txt b/CMakeLists.txt index bd25403836ea4055e546236258ca6d331d30c1e0..ae84f8eb31a353be636d507031325f743cdc2ec2 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -42,7 +42,17 @@ if(LITE_WITH_LIGHT_WEIGHT_FRAMEWORK) message(FATAL_ERROR "ARM_TARGET_ARCH_ABI must be in one of ${ARM_TARGET_ARCH_ABI_LIST}") endif() - message(STATUS "Lite ARM Compile ${ARM_TARGET_OS} with ${ARM_TARGET_ARCH_ABI}") + # check arch abi + if(NOT DEFINED ARM_TARGET_LANG) + set(ARM_TARGET_LANG "gcc" CACHE STRING "Choose ARM Target Language") + endif() + set(ARM_TARGET_LANG_LIST "gcc" "clang" "") + set_property(CACHE ARM_TARGET_LANG PROPERTY STRINGS ${ARM_TARGET_LANG_LIST}) + if (NOT ARM_TARGET_LANG IN_LIST ARM_TARGET_LANG_LIST) + message(FATAL_ERROR "ARM_TARGET_LANG must be in one of ${ARM_TARGET_LANG_LIST}") + endif() + + message(STATUS "Lite ARM Compile ${ARM_TARGET_OS} with ${ARM_TARGET_ARCH_ABI} ${ARM_TARGET_LANG}") include(cross_compiling/host) include(cross_compiling/armlinux) include(cross_compiling/android) @@ -158,6 +168,9 @@ include_directories("${PADDLE_SOURCE_DIR}") # for mobile if (WITH_LITE AND LITE_WITH_LIGHT_WEIGHT_FRAMEWORK) message(STATUS "Building the mobile framework") + if (ANDROID) + include(cross_compiling/findar) + endif() # include the necessary thirdparty dependencies include(external/gflags) # download, build, install gflags include(external/glog) # download, build, install glog diff --git a/cmake/cross_compiling/android.cmake b/cmake/cross_compiling/android.cmake index 24f2bf676d358ea96d14fb4233ca7fd8b27709f9..c3bdbe202f731596d59e2f464f4d1d0aae4bede2 100644 --- a/cmake/cross_compiling/android.cmake +++ b/cmake/cross_compiling/android.cmake @@ -31,7 +31,7 @@ if(NOT DEFINED ANDROID_API_LEVEL) endif() if(NOT DEFINED ANDROID_STL_TYPE) - set(ANDROID_STL_TYPE "c++_static" CACHE STRING "stl type") + set(ANDROID_STL_TYPE "c++_static" CACHE STRING "stl type") # can also use shared endif() if(ARM_TARGET_ARCH_ABI STREQUAL "armv7hf") @@ -71,8 +71,31 @@ if (NOT ANDROID_STL_TYPE IN_LIST ANDROID_STL_TYPE_LITS) message(FATAL_ERROR "ANDROID_STL_TYPE must be in one of ${ANDROID_STL_TYPE_LITS}") endif() +if(ARM_TARGET_LANG STREQUAL "gcc") + # gcc do not need set lang + set(ARM_TARGET_LANG "") +endif() + set(CMAKE_SYSTEM_NAME Android) set(CMAKE_SYSTEM_VERSION ${ANDROID_API_LEVEL}) set(CMAKE_ANDROID_ARCH_ABI ${ANDROID_ARCH_ABI}) set(CMAKE_ANDROID_NDK ${ANDROID_NDK}) +set(CMAKE_ANDROID_NDK_TOOLCHAIN_VERSION ${ARM_TARGET_LANG}) set(CMAKE_ANDROID_STL_TYPE ${ANDROID_STL_TYPE}) + +if (ARM_TARGET_LANG STREQUAL "clang") + if(ARM_TARGET_ARCH_ABI STREQUAL "armv8") + set(triple aarch64-v8a-linux-android) + elseif(ARM_TARGET_ARCH_ABI STREQUAL "armv7") + set(triple arm-v7a-linux-android) + else() + message(FATAL_ERROR "Clang do not support this ${ARM_TARGET_ARCH_ABI}, use armv8 or armv7") + endif() + + set(CMAKE_C_COMPILER clang) + set(CMAKE_C_COMPILER_TARGET ${triple}) + set(CMAKE_CXX_COMPILER clang++) + set(CMAKE_CXX_COMPILER_TARGET ${triple}) + + message(STATUS "CMAKE_CXX_COMPILER_TARGET: ${CMAKE_CXX_COMPILER_TARGET}") +endif() diff --git a/cmake/cross_compiling/findar.cmake b/cmake/cross_compiling/findar.cmake new file mode 100644 index 0000000000000000000000000000000000000000..bcb0dc70fd811a5041244dedb4a4bcf5b540dc3a --- /dev/null +++ b/cmake/cross_compiling/findar.cmake @@ -0,0 +1,33 @@ +# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +if(NOT ARM_TARGET_LANG STREQUAL "clang") + # only clang need find ar tool + return() +endif() + +if(NOT EXISTS "${CMAKE_CXX_COMPILER}") + message(ERROR "Can not find CMAKE_CXX_COMPILER ${CMAKE_CXX_COMPILER}") +endif() + +get_filename_component(AR_PATH ${CMAKE_CXX_COMPILER} PATH) + +find_file(AR_TOOL NAMES llvm-ar PATHS ${AR_PATH}) + +if(NOT AR_TOOL) + message(ERROR "Failed to find AR_TOOL in ${AR_PATH}") +else() + set(CMAKE_AR ${AR_TOOL}) + message(STATUS "Found CMAKE_AR : " ${CMAKE_AR}) +endif() diff --git a/cmake/external/gflags.cmake b/cmake/external/gflags.cmake index 42ce7c644f3e8ee51bb5fbce4391b9423ee22cf8..256e1bbebf0bd4fe0ce6f685a7901888c18aab1d 100644 --- a/cmake/external/gflags.cmake +++ b/cmake/external/gflags.cmake @@ -40,7 +40,8 @@ if(ANDROID) "-DCMAKE_SYSTEM_VERSION=${CMAKE_SYSTEM_VERSION}" "-DCMAKE_ANDROID_ARCH_ABI=${CMAKE_ANDROID_ARCH_ABI}" "-DCMAKE_ANDROID_NDK=${CMAKE_ANDROID_NDK}" - "-DCMAKE_ANDROID_STL_TYPE=${CMAKE_ANDROID_STL_TYPE}") + "-DCMAKE_ANDROID_STL_TYPE=${CMAKE_ANDROID_STL_TYPE}" + "-DCMAKE_ANDROID_NDK_TOOLCHAIN_VERSION=${CMAKE_ANDROID_NDK_TOOLCHAIN_VERSION}" ) endif() ExternalProject_Add( diff --git a/cmake/external/glog.cmake b/cmake/external/glog.cmake index 9ac9b8326431addb503acc10d3188a5f8f4e48a5..80abc2350caddb07aa6a326ac89affc58cb17399 100644 --- a/cmake/external/glog.cmake +++ b/cmake/external/glog.cmake @@ -46,7 +46,8 @@ if(ANDROID) "-DCMAKE_SYSTEM_VERSION=${CMAKE_SYSTEM_VERSION}" "-DCMAKE_ANDROID_ARCH_ABI=${CMAKE_ANDROID_ARCH_ABI}" "-DCMAKE_ANDROID_NDK=${CMAKE_ANDROID_NDK}" - "-DCMAKE_ANDROID_STL_TYPE=${CMAKE_ANDROID_STL_TYPE}") + "-DCMAKE_ANDROID_STL_TYPE=${CMAKE_ANDROID_STL_TYPE}" + "-DCMAKE_ANDROID_NDK_TOOLCHAIN_VERSION=${CMAKE_ANDROID_NDK_TOOLCHAIN_VERSION}") endif() ExternalProject_Add( diff --git a/cmake/external/gtest.cmake b/cmake/external/gtest.cmake index de44719803fc4f130d536c2354fa492a57e3e69a..57fd6812879970c07a26f3657983998fb3f9760a 100644 --- a/cmake/external/gtest.cmake +++ b/cmake/external/gtest.cmake @@ -58,7 +58,9 @@ IF(WITH_TESTING OR (WITH_DISTRIBUTE AND NOT WITH_GRPC)) "-DCMAKE_SYSTEM_VERSION=${CMAKE_SYSTEM_VERSION}" "-DCMAKE_ANDROID_ARCH_ABI=${CMAKE_ANDROID_ARCH_ABI}" "-DCMAKE_ANDROID_NDK=${CMAKE_ANDROID_NDK}" - "-DCMAKE_ANDROID_STL_TYPE=${CMAKE_ANDROID_STL_TYPE}") + "-DCMAKE_ANDROID_STL_TYPE=${CMAKE_ANDROID_STL_TYPE}" + "-DCMAKE_ANDROID_NDK_TOOLCHAIN_VERSION=${CMAKE_ANDROID_NDK_TOOLCHAIN_VERSION}" + ) endif() ExternalProject_Add( diff --git a/cmake/external/protobuf.cmake b/cmake/external/protobuf.cmake index 41cd1ebaf33a6ec7c61ee8c965eaa0bccbb618b8..6d2136223d39fed1bdacacea9ba363859b6b1c77 100644 --- a/cmake/external/protobuf.cmake +++ b/cmake/external/protobuf.cmake @@ -199,6 +199,7 @@ FUNCTION(build_protobuf TARGET_NAME BUILD_FOR_HOST) "-DCMAKE_ANDROID_ARCH_ABI=${CMAKE_ANDROID_ARCH_ABI}" "-DCMAKE_ANDROID_NDK=${CMAKE_ANDROID_NDK}" "-DCMAKE_ANDROID_STL_TYPE=${CMAKE_ANDROID_STL_TYPE}" + "-DCMAKE_ANDROID_NDK_TOOLCHAIN_VERSION=${CMAKE_ANDROID_NDK_TOOLCHAIN_VERSION}" "-DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER}" "-DCMAKE_C_COMPILER=${CMAKE_C_COMPILER}" "-DCMAKE_C_FLAGS=${CMAKE_C_FLAGS}" diff --git a/paddle/fluid/inference/analysis/passes/CMakeLists.txt b/paddle/fluid/inference/analysis/passes/CMakeLists.txt index a8d0c69a54ab39781613d26474098450398d4c1b..7b1bbbb585ad67e378cfbf0a88c7c10fef41621e 100644 --- a/paddle/fluid/inference/analysis/passes/CMakeLists.txt +++ b/paddle/fluid/inference/analysis/passes/CMakeLists.txt @@ -5,7 +5,7 @@ cc_library(ir_params_sync_among_devices_pass SRCS ir_params_sync_among_devices_p cc_library(ir_graph_to_program_pass SRCS ir_graph_to_program_pass.cc DEPS analysis_pass graph_to_program_pass) cc_library(adjust_cudnn_workspace_size_pass SRCS adjust_cudnn_workspace_size_pass.cc DEPS analysis_pass graph_to_program_pass) -cc_library(analysis_passes SRCS passes.cc DEPS +cc_library(analysis_passes SRCS use_passes.cc DEPS ir_graph_build_pass ir_analysis_pass ir_params_sync_among_devices_pass diff --git a/paddle/fluid/inference/analysis/passes/passes.cc b/paddle/fluid/inference/analysis/passes/use_passes.cc similarity index 100% rename from paddle/fluid/inference/analysis/passes/passes.cc rename to paddle/fluid/inference/analysis/passes/use_passes.cc index a55904ed536bad31c82888ede2db3178f3fd5e47..76043a53b75768bd85298ecb8dd911c68671673c 100644 --- a/paddle/fluid/inference/analysis/passes/passes.cc +++ b/paddle/fluid/inference/analysis/passes/use_passes.cc @@ -12,13 +12,13 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/fluid/inference/analysis/passes/passes.h" #include "paddle/fluid/inference/analysis/passes/adjust_cudnn_workspace_size_pass.h" #include "paddle/fluid/inference/analysis/passes/ir_analysis_pass.h" #include "paddle/fluid/inference/analysis/passes/ir_graph_build_pass.h" #include "paddle/fluid/inference/analysis/passes/ir_graph_to_program_pass.h" #include "paddle/fluid/inference/analysis/passes/ir_params_sync_among_devices_pass.h" #include "paddle/fluid/inference/analysis/passes/memory_optimize_pass.h" +#include "paddle/fluid/inference/analysis/passes/passes.h" namespace paddle { namespace inference { diff --git a/paddle/fluid/lite/CMakeLists.txt b/paddle/fluid/lite/CMakeLists.txt index fefc73c75478839c19e3040a4a95378934ad53d8..7b6dd0703d410ad228a11e60dda7ceea9f5a7983 100644 --- a/paddle/fluid/lite/CMakeLists.txt +++ b/paddle/fluid/lite/CMakeLists.txt @@ -10,6 +10,9 @@ message(STATUS "LITE_WITH_ARM:\t${LITE_WITH_ARM}") message(STATUS "LITE_WITH_PROFILE:\t${LITE_WITH_PROFILE}") set(LITE_MODEL_DIR "${THIRD_PARTY_PATH}/install") + +set(LITE_ON_MOBILE ${LITE_WITH_LIGHT_WEIGHT_FRAMEWORK}) + set(LITE_URL "http://paddle-inference-dist.bj.bcebos.com" CACHE STRING "inference download url") function(lite_download_and_uncompress INSTALL_DIR URL FILENAME) @@ -182,3 +185,11 @@ add_subdirectory(model_parser) add_subdirectory(utils) add_subdirectory(api) add_subdirectory(gen_code) + + +if (WITH_TESTING) + lite_download_and_uncompress(${LITE_MODEL_DIR} ${LITE_URL} "lite_naive_model.tar.gz") + if(LITE_WITH_LIGHT_WEIGHT_FRAMEWORK) + lite_download_and_uncompress(${LITE_MODEL_DIR} ${LITE_URL} "mobilenet_v2_relu.tar.gz") + endif() +endif() diff --git a/paddle/fluid/lite/api/CMakeLists.txt b/paddle/fluid/lite/api/CMakeLists.txt index a81d1c9db8d83540d227705d8cd46b2dd5405705..52961d0cc49187fa79e55942a1abaceed9dc2d19 100644 --- a/paddle/fluid/lite/api/CMakeLists.txt +++ b/paddle/fluid/lite/api/CMakeLists.txt @@ -1,20 +1,29 @@ -set(cxx_api_lite_deps scope_lite optimizer_lite target_wrapper_host model_parser_lite) +set(cxx_api_lite_deps + scope_lite optimizer_lite target_wrapper_host model_parser_lite program_lite) if(LITE_WITH_CUDA) set(cxx_api_lite_deps ${cxx_api_lite_deps} kernels_cuda) cc_library(cxx_api_lite_cuda SRCS cxx_api.cc DEPS ${cxx_api_lite_deps} target_wrapper_cuda) nv_test(test_cxx_api_lite_cuda SRCS cxx_api_test.cc DEPS cxx_api_lite_cuda) endif() -cc_library(cxx_api_lite SRCS cxx_api.cc DEPS ${cxx_api_lite_deps} ${ops_lite} program_lite) +lite_cc_library(lite_api_test_helper SRCS lite_api_test_helper.cc + DEPS scope_lite optimizer_lite target_wrapper_host model_parser_lite program_lite + ${ops_lite} ${host_kernels} + CUDA_DEPS kernels_cuda + X86_DEPS ${x86_kernels} + ) +lite_cc_library(cxx_api_lite SRCS cxx_api.cc DEPS lite_api_test_helper) set(light_api_deps - scope_lite target_wrapper_host model_parser_lite) + scope_lite target_wrapper_host model_parser_lite program_lite) if(LITE_WITH_CUDA) set(light_api_deps ${light_api_deps} target_wrapper_cuda) endif() -#cc_library(light_api_lite SRCS light_api.cc DEPS ${light_api_deps} ${ops_lite} ${host_kernels}) +lite_cc_library(light_api_lite SRCS light_api.cc + DEPS ${light_api_deps} ${ops_lite} ${host_kernels} + ) message(STATUS "get ops ${ops_lite}") message(STATUS "get Host kernels ${host_kernels}") @@ -24,24 +33,41 @@ include(ExternalProject) set(LITE_DEMO_INSTALL_DIR "${THIRD_PARTY_PATH}/inference_demo" CACHE STRING "A path setting inference demo download directories.") -if((NOT LITE_WITH_LIGHT_WEIGHT_FRAMEWORK) AND WITH_TESTING) +if(WITH_TESTING) + set(eval_model_dir "") + set(test_cxx_api_deps cxx_api_lite mir_passes ${ops_lite} ${host_kernels} ${x86_kernels}) + + if(LITE_WITH_LIGHT_WEIGHT_FRAMEWORK) + set(eval_model_dir ${LITE_MODEL_DIR}/mobilenet_v2_relu) + set(test_cxx_api_deps ${test_cxx_api_deps} ${arm_kernels}) + endif() lite_cc_test(test_cxx_api_lite SRCS cxx_api_test.cc - DEPS cxx_api_lite mir_passes - ${ops_lite} ${host_kernels} ${x86_kernels} + DEPS ${test_cxx_api_deps} ARGS --model_dir=${LITE_MODEL_DIR}/lite_naive_model - --optimized_model=${LITE_MODEL_DIR}/lite_naive_model_opt SERIAL) + --optimized_model=${LITE_MODEL_DIR}/lite_naive_model_opt + --eval_model_dir=eval_model_dir SERIAL) - lite_download_and_uncompress(${LITE_MODEL_DIR} ${LITE_URL} "lite_naive_model.tar.gz") add_dependencies(test_cxx_api_lite extern_lite_download_lite_naive_model_tar_gz) + if(LITE_WITH_LIGHT_WEIGHT_FRAMEWORK) + add_dependencies(test_cxx_api_lite extern_lite_download_mobilenet_v2_relu_tar_gz) + endif() endif() -if(NOT LITE_WITH_LIGHT_WEIGHT_FRAMEWORK AND WITH_TESTING) - add_dependencies(test_cxx_api_lite extern_lite_download_lite_naive_model_tar_gz) -endif() +# These tests needs CLI arguments, and is not supported in ARM CI. +# TODO(Superjomn) support latter. +if(NOT LITE_ON_MOBILE) + lite_cc_test(test_light_api SRCS light_api_test.cc + DEPS light_api_lite mir_passes + X86_DEPS ${x86_kernels} + ARGS --optimized_model=${LITE_MODEL_DIR}/lite_naive_model_opt + SERIAL) -# if(NOT LITE_WITH_LIGHT_WEIGHT_FRAMEWORK) -# lite_cc_test(test_light_api SRCS light_api_test.cc DEPS light_api_lite ARGS --optimized_model=${LITE_MODEL_DIR}/lite_naive_model_opt SERIAL) -# endif() + lite_cc_test(test_apis_lite SRCS apis_test.cc + DEPS cxx_api_lite light_api_lite ${ops_lite} mir_passes + X86_DEPS ${x86_kernels} + ARGS --model_dir=${LITE_MODEL_DIR}/lite_naive_model + --optimized_model=${LITE_MODEL_DIR}/lite_naive_model_opt SERIAL) +endif() lite_cc_binary(cxx_api_lite_bin SRCS cxx_api_bin.cc DEPS @@ -51,4 +77,3 @@ lite_cc_binary(cxx_api_lite_bin SRCS cxx_api_bin.cc mir_passes ${ops_lite} ${host_kernels} ARM_DEPS ${arm_kernels}) - diff --git a/paddle/fluid/lite/api/apis_test.cc b/paddle/fluid/lite/api/apis_test.cc new file mode 100644 index 0000000000000000000000000000000000000000..7dd6a1193754437a32957f081b3be3fd5c1fc403 --- /dev/null +++ b/paddle/fluid/lite/api/apis_test.cc @@ -0,0 +1,95 @@ +// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* + * We test multiple apis here. + */ +#include +#include +#include +#include "paddle/fluid/lite/api/cxx_api.h" +#include "paddle/fluid/lite/api/light_api.h" +#include "paddle/fluid/lite/core/mir/pass_registry.h" +#include "paddle/fluid/lite/core/mir/use_passes.h" +#include "paddle/fluid/lite/kernels/use_kernels.h" +#include "paddle/fluid/lite/operators/use_ops.h" + +DEFINE_string(model_dir, "", ""); +DEFINE_string(optimized_model, "", ""); + +namespace paddle { +namespace lite { + +void SetConstInput(lite::Tensor* x) { + x->Resize(DDim(std::vector({100, 100}))); + auto* data = x->mutable_data(); + for (int i = 0; i < 100 * 100; i++) { + data[i] = i; + } +} + +bool CompareTensors(const std::string& name, const ExecutorLite& cxx_api, + const LightPredictor& light_api) { + const auto* a = cxx_api.GetTensor(name); + const auto* b = light_api.GetTensor(name); + return TensorCompareWith(*a, *b); +} + +#ifndef LITE_WITH_LIGHT_WEIGHT_FRAMEWORK +TEST(CXXApi_LightApi, save_and_load_model) { + lite::ExecutorLite cxx_api; + lite::LightPredictor light_api; + + // CXXAPi + { + std::vector valid_places({Place{TARGET(kHost), PRECISION(kFloat)}, + Place{TARGET(kX86), PRECISION(kFloat)}}); + cxx_api.Build(FLAGS_model_dir, Place{TARGET(kCUDA), PRECISION(kFloat)}, + valid_places); + + auto* x = cxx_api.GetInput(0); + SetConstInput(x); + + cxx_api.Run(); + + LOG(INFO) << "Save optimized model to " << FLAGS_optimized_model; + cxx_api.SaveModel(FLAGS_optimized_model); + } + + // LightApi + { + light_api.Build(FLAGS_optimized_model); + + auto* x = light_api.GetInput(0); + SetConstInput(x); + + light_api.Run(); + } + + const auto* cxx_out = cxx_api.GetOutput(0); + const auto* light_out = light_api.GetOutput(0); + ASSERT_TRUE(TensorCompareWith(*cxx_out, *light_out)); + + std::vector tensors_with_order({ + "a", "fc_0.w_0", "scale_0.tmp_0", + }); + + for (const auto& tensor_name : tensors_with_order) { + ASSERT_TRUE(CompareTensors(tensor_name, cxx_api, light_api)); + } +} +#endif // LITE_WITH_LIGHT_WEIGHT_FRAMEWORK + +} // namespace lite +} // namespace paddle diff --git a/paddle/fluid/lite/api/cxx_api.h b/paddle/fluid/lite/api/cxx_api.h index 13679413958713dc2fb5e499c50e8dc94c0dbbde..ba2d784b942c04c169a19d4747352d9048fd6ff2 100644 --- a/paddle/fluid/lite/api/cxx_api.h +++ b/paddle/fluid/lite/api/cxx_api.h @@ -78,6 +78,11 @@ class ExecutorLite { return &fetch_list.at(offset); } + const lite::Tensor* GetTensor(const std::string& name) const { + auto* var = program_->exec_scope()->FindVar(name); + return &var->Get(); + } + void Run() { program_->Run(); } const framework::proto::ProgramDesc& program_desc() const { diff --git a/paddle/fluid/lite/api/cxx_api_bin.cc b/paddle/fluid/lite/api/cxx_api_bin.cc index f8229f2cea9a7e2b34c99235505a67b40d8d3f3d..77fce7abc1158f84f423f1530b965ef9686c0613 100644 --- a/paddle/fluid/lite/api/cxx_api_bin.cc +++ b/paddle/fluid/lite/api/cxx_api_bin.cc @@ -14,8 +14,9 @@ #include "paddle/fluid/lite/api/cxx_api.h" #include // NOLINT -#include "paddle/fluid/lite/core/mir/passes.h" +#include "paddle/fluid/lite/core/mir/use_passes.h" #include "paddle/fluid/lite/core/op_registry.h" + namespace paddle { namespace lite { diff --git a/paddle/fluid/lite/api/cxx_api_test.cc b/paddle/fluid/lite/api/cxx_api_test.cc index 430bd9b58f80e593e1c85bb6d6113df6962a58e5..1b337c06a981447fd8b8f87905ce5d3d10c56d8c 100644 --- a/paddle/fluid/lite/api/cxx_api_test.cc +++ b/paddle/fluid/lite/api/cxx_api_test.cc @@ -16,59 +16,34 @@ #include #include #include -#include "paddle/fluid/lite/core/mir/passes.h" +#include "paddle/fluid/lite/api/lite_api_test_helper.h" +#include "paddle/fluid/lite/core/compatible_tensor.h" +#include "paddle/fluid/lite/core/mir/use_passes.h" #include "paddle/fluid/lite/core/op_registry.h" - -DEFINE_string(model_dir, "", ""); -DEFINE_string(optimized_model, "", ""); +#include "paddle/fluid/lite/kernels/use_kernels.h" +#include "paddle/fluid/lite/operators/use_ops.h" // For training. DEFINE_string(startup_program_path, "", ""); DEFINE_string(main_program_path, "", ""); +// for eval +DEFINE_string(eval_model_dir, "", ""); + namespace paddle { namespace lite { +#ifndef LITE_WITH_LIGHT_WEIGHT_FRAMEWORK TEST(CXXApi, test) { - lite::ExecutorLite predictor; -#ifndef LITE_WITH_CUDA - std::vector valid_places({Place{TARGET(kHost), PRECISION(kFloat)}, - Place{TARGET(kX86), PRECISION(kFloat)}}); -#else - std::vector valid_places({ - Place{TARGET(kHost), PRECISION(kFloat), DATALAYOUT(kNCHW)}, - Place{TARGET(kCUDA), PRECISION(kFloat), DATALAYOUT(kNCHW)}, - Place{TARGET(kCUDA), PRECISION(kAny), DATALAYOUT(kNCHW)}, - Place{TARGET(kHost), PRECISION(kAny), DATALAYOUT(kNCHW)}, - Place{TARGET(kCUDA), PRECISION(kAny), DATALAYOUT(kAny)}, - Place{TARGET(kHost), PRECISION(kAny), DATALAYOUT(kAny)}, - }); -#endif - - predictor.Build(FLAGS_model_dir, - Place{TARGET(kX86), PRECISION(kFloat)}, // origin cuda - valid_places); - - auto* input_tensor = predictor.GetInput(0); - input_tensor->Resize(DDim(std::vector({100, 100}))); - auto* data = input_tensor->mutable_data(); - for (int i = 0; i < 100 * 100; i++) { - data[i] = i; - } - - // LOG(INFO) << "input " << *input_tensor; - - predictor.Run(); - - auto* out = predictor.GetOutput(0); + const lite::Tensor* out = RunHvyModel(); LOG(INFO) << out << " memory size " << out->data_size(); - LOG(INFO) << "out " << out->data()[0]; - LOG(INFO) << "out " << out->data()[1]; + for (int i = 0; i < 10; i++) { + LOG(INFO) << "out " << out->data()[i]; + } LOG(INFO) << "dims " << out->dims(); // LOG(INFO) << "out " << *out; } -#ifndef LITE_WITH_LIGHT_WEIGHT_FRAMEWORK TEST(CXXApi, save_model) { lite::ExecutorLite predictor; std::vector valid_places({Place{TARGET(kHost), PRECISION(kFloat)}, @@ -79,9 +54,7 @@ TEST(CXXApi, save_model) { LOG(INFO) << "Save optimized model to " << FLAGS_optimized_model; predictor.SaveModel(FLAGS_optimized_model); } -#endif // LITE_WITH_LIGHT_WEIGHT_FRAMEWORK -#ifndef LITE_WITH_LIGHT_WEIGHT_FRAMEWORK /*TEST(CXXTrainer, train) { Place prefer_place({TARGET(kHost), PRECISION(kFloat), DATALAYOUT(kNCHW)}); std::vector valid_places({prefer_place}); @@ -115,46 +88,37 @@ TEST(CXXApi, save_model) { }*/ #endif // LITE_WITH_LIGHT_WEIGHT_FRAMEWORK -} // namespace lite -} // namespace paddle +#ifdef LITE_WITH_ARM +TEST(CXXApi, eval) { + DeviceInfo::Init(); + lite::ExecutorLite predictor; + std::vector valid_places({Place{TARGET(kHost), PRECISION(kFloat)}, + Place{TARGET(kARM), PRECISION(kFloat)}}); -USE_LITE_OP(mul); -USE_LITE_OP(fc); -USE_LITE_OP(relu); -USE_LITE_OP(scale); -USE_LITE_OP(feed); -USE_LITE_OP(fetch); -USE_LITE_OP(io_copy); -USE_LITE_OP(elementwise_add) -USE_LITE_OP(elementwise_sub) -USE_LITE_OP(square) -USE_LITE_OP(softmax) -USE_LITE_OP(dropout) -USE_LITE_OP(concat) -USE_LITE_OP(conv2d) -USE_LITE_OP(depthwise_conv2d) -USE_LITE_OP(pool2d) -USE_LITE_KERNEL(feed, kHost, kAny, kAny, def); -USE_LITE_KERNEL(fetch, kHost, kAny, kAny, def); - -#ifdef LITE_WITH_X86 -USE_LITE_KERNEL(relu, kX86, kFloat, kNCHW, def); -USE_LITE_KERNEL(mul, kX86, kFloat, kNCHW, def); -USE_LITE_KERNEL(fc, kX86, kFloat, kNCHW, def); -USE_LITE_KERNEL(scale, kX86, kFloat, kNCHW, def); -USE_LITE_KERNEL(square, kX86, kFloat, kNCHW, def); -USE_LITE_KERNEL(elementwise_sub, kX86, kFloat, kNCHW, def); -USE_LITE_KERNEL(elementwise_add, kX86, kFloat, kNCHW, def); -USE_LITE_KERNEL(softmax, kX86, kFloat, kNCHW, def); -USE_LITE_KERNEL(dropout, kX86, kFloat, kNCHW, def); -USE_LITE_KERNEL(concat, kX86, kFloat, kNCHW, def); -USE_LITE_KERNEL(conv2d, kX86, kFloat, kNCHW, def); -USE_LITE_KERNEL(depthwise_conv2d, kX86, kFloat, kNCHW, def); -USE_LITE_KERNEL(pool2d, kX86, kFloat, kNCHW, def); -#endif + predictor.Build(FLAGS_eval_model_dir, Place{TARGET(kARM), PRECISION(kFloat)}, + valid_places); + + auto* input_tensor = predictor.GetInput(0); + input_tensor->Resize(DDim(std::vector({1, 3, 224, 224}))); + auto* data = input_tensor->mutable_data(); + for (int i = 0; i < input_tensor->dims().production(); i++) { + data[i] = 1; + } -#ifdef LITE_WITH_CUDA -USE_LITE_KERNEL(mul, kCUDA, kFloat, kNCHW, def); -USE_LITE_KERNEL(io_copy, kCUDA, kAny, kAny, host_to_device); -USE_LITE_KERNEL(io_copy, kCUDA, kAny, kAny, device_to_host); + predictor.Run(); + + auto* out = predictor.GetOutput(0); + std::vector results({0.00097802, 0.00099822, 0.00103093, 0.00100121, + 0.00098268, 0.00104065, 0.00099962, 0.00095181, + 0.00099694, 0.00099406}); + for (int i = 0; i < results.size(); ++i) { + EXPECT_NEAR(out->data()[i], results[i], 1e-5); + } + ASSERT_EQ(out->dims().size(), 2); + ASSERT_EQ(out->dims()[0], 1); + ASSERT_EQ(out->dims()[1], 1000); +} #endif + +} // namespace lite +} // namespace paddle diff --git a/paddle/fluid/lite/api/light_api.h b/paddle/fluid/lite/api/light_api.h index 474e5da78bd2cd201b17f9a223bd1a177861a532..5085909385c94e2e81b2cfa14167e8ce886060a3 100644 --- a/paddle/fluid/lite/api/light_api.h +++ b/paddle/fluid/lite/api/light_api.h @@ -22,6 +22,7 @@ #include #include #include +#include "paddle/fluid/lite/core/compatible_tensor.h" #include "paddle/fluid/lite/core/context.h" #include "paddle/fluid/lite/core/program.h" #include "paddle/fluid/lite/core/types.h" @@ -62,6 +63,11 @@ class LightPredictor { return &fetch_list.at(offset); } + const lite::Tensor* GetTensor(const std::string& name) const { + auto* var = program_->exec_scope()->FindVar(name); + return &var->Get(); + } + private: void BuildRuntimeProgram(const framework::proto::ProgramDesc& prog) { std::vector insts; @@ -72,9 +78,8 @@ class LightPredictor { // Create the kernels of the target places, and filter out the specific // kernel with the target alias. - for (auto& op : program.ops_) { - lite::pb::OpDesc desc(op->op_info()->desc()); - auto kernel_type = desc.GetAttr(kKernelTypeAttr).get(); + for (auto& op : program.ops()) { + auto kernel_type = op->op_info()->GetAttr(kKernelTypeAttr); std::string op_type, alias; Place place; KernelBase::ParseKernelType(kernel_type, &op_type, &alias, &place); @@ -89,8 +94,8 @@ class LightPredictor { insts.emplace_back(op, std::move(*it)); } program_.reset(new RuntimeProgram(std::move(insts))); - CHECK(program.exec_scope_); - program_->set_exec_scope(program.exec_scope_); + CHECK(program.exec_scope()); + program_->set_exec_scope(program.exec_scope()); } private: diff --git a/paddle/fluid/lite/api/light_api_test.cc b/paddle/fluid/lite/api/light_api_test.cc index b1e6741e09ebd075ef646730f9b5354baefca84f..faf53b8177a4d11fb33017599ecdb9dc650fbc43 100644 --- a/paddle/fluid/lite/api/light_api_test.cc +++ b/paddle/fluid/lite/api/light_api_test.cc @@ -15,6 +15,9 @@ #include "paddle/fluid/lite/api/light_api.h" #include #include +#include "paddle/fluid/lite/core/mir/use_passes.h" +#include "paddle/fluid/lite/kernels/use_kernels.h" +#include "paddle/fluid/lite/operators/use_ops.h" DEFINE_string(optimized_model, "", ""); @@ -33,29 +36,14 @@ TEST(LightAPI, load) { } predictor.Run(); + + const auto* output = predictor.GetOutput(0); + const float* raw_output = output->data(); + + for (int i = 0; i < 10; i++) { + LOG(INFO) << "out " << raw_output[i]; + } } } // namespace lite } // namespace paddle - -USE_LITE_OP(mul); -USE_LITE_OP(fc); -USE_LITE_OP(scale); -USE_LITE_OP(feed); -USE_LITE_OP(fetch); -USE_LITE_OP(io_copy); - -USE_LITE_KERNEL(feed, kHost, kAny, kAny, def); -USE_LITE_KERNEL(fetch, kHost, kAny, kAny, def); - -#ifdef LITE_WITH_X86 -USE_LITE_KERNEL(relu, kX86, kFloat, kNCHW, def); -USE_LITE_KERNEL(mul, kX86, kFloat, kNCHW, def); -USE_LITE_KERNEL(fc, kX86, kFloat, kNCHW, def); -USE_LITE_KERNEL(scale, kX86, kFloat, kNCHW, def); -USE_LITE_KERNEL(square, kX86, kFloat, kNCHW, def); -USE_LITE_KERNEL(elementwise_sub, kX86, kFloat, kNCHW, def); -USE_LITE_KERNEL(elementwise_add, kX86, kFloat, kNCHW, def); -USE_LITE_KERNEL(softmax, kX86, kFloat, kNCHW, def); -USE_LITE_KERNEL(dropout, kX86, kFloat, kNCHW, def); -#endif diff --git a/paddle/fluid/lite/api/lite_api_test_helper.cc b/paddle/fluid/lite/api/lite_api_test_helper.cc new file mode 100644 index 0000000000000000000000000000000000000000..b82541723308f4748e28c64affa6899bf2d9b727 --- /dev/null +++ b/paddle/fluid/lite/api/lite_api_test_helper.cc @@ -0,0 +1,60 @@ +// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/lite/api/lite_api_test_helper.h" +#include + +DEFINE_string(model_dir, "", ""); +DEFINE_string(optimized_model, "", ""); + +namespace paddle { +namespace lite { + +const lite::Tensor* RunHvyModel() { + lite::ExecutorLite predictor; +#ifndef LITE_WITH_CUDA + std::vector valid_places({Place{TARGET(kHost), PRECISION(kFloat)}, + Place{TARGET(kX86), PRECISION(kFloat)}}); +#else + std::vector valid_places({ + Place{TARGET(kHost), PRECISION(kFloat), DATALAYOUT(kNCHW)}, + Place{TARGET(kCUDA), PRECISION(kFloat), DATALAYOUT(kNCHW)}, + Place{TARGET(kCUDA), PRECISION(kAny), DATALAYOUT(kNCHW)}, + Place{TARGET(kHost), PRECISION(kAny), DATALAYOUT(kNCHW)}, + Place{TARGET(kCUDA), PRECISION(kAny), DATALAYOUT(kAny)}, + Place{TARGET(kHost), PRECISION(kAny), DATALAYOUT(kAny)}, + }); +#endif + + predictor.Build(FLAGS_model_dir, + Place{TARGET(kX86), PRECISION(kFloat)}, // origin cuda + valid_places); + + auto* input_tensor = predictor.GetInput(0); + input_tensor->Resize(DDim(std::vector({100, 100}))); + auto* data = input_tensor->mutable_data(); + for (int i = 0; i < 100 * 100; i++) { + data[i] = i; + } + + // LOG(INFO) << "input " << *input_tensor; + + predictor.Run(); + + const auto* out = predictor.GetOutput(0); + return out; +} + +} // namespace lite +} // namespace paddle diff --git a/paddle/fluid/lite/api/lite_api_test_helper.h b/paddle/fluid/lite/api/lite_api_test_helper.h new file mode 100644 index 0000000000000000000000000000000000000000..840de932f0146b7241ba030b02742e34e2c1b9b8 --- /dev/null +++ b/paddle/fluid/lite/api/lite_api_test_helper.h @@ -0,0 +1,31 @@ +// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include +#include "paddle/fluid/lite/api/cxx_api.h" +#include "paddle/fluid/lite/core/compatible_tensor.h" +#include "paddle/fluid/lite/core/op_registry.h" + +DECLARE_string(model_dir); +DECLARE_string(optimized_model); + +namespace paddle { +namespace lite { + +const lite::Tensor* RunHvyModel(); + +} // namespace lite +} // namespace paddle diff --git a/paddle/fluid/lite/arm/math/elementwise.cc b/paddle/fluid/lite/arm/math/elementwise.cc index 2a74e7ee4ec4be51b420b1fa2d2a1be7c3f148fb..7c1ea8d3a70451dd790a9eea516b74f58ec91d5e 100644 --- a/paddle/fluid/lite/arm/math/elementwise.cc +++ b/paddle/fluid/lite/arm/math/elementwise.cc @@ -65,9 +65,61 @@ void elementwise_add(const float* dinx, const float* diny, float* dout, } template <> -void elementwise_add_axis(const float* dinx, const float* diny, - float* dout, int batch, int channels, - int num) { +void elementwise_add_relu(const float* dinx, const float* diny, + float* dout, int num) { + int cnt = num >> 4; + int remain = num % 16; + float32x4_t vzero = vdupq_n_f32(0.f); +#pragma omp parallel for + for (int i = 0; i < cnt; i++) { + const float* dinx_ptr = dinx + (i << 4); + const float* diny_ptr = diny + (i << 4); + float* dout_ptr = dout + (i << 4); + + float32x4_t dinx0 = vld1q_f32(dinx_ptr); + float32x4_t dinx1 = vld1q_f32(dinx_ptr + 4); + float32x4_t dinx2 = vld1q_f32(dinx_ptr + 8); + float32x4_t dinx3 = vld1q_f32(dinx_ptr + 12); + + float32x4_t diny0 = vld1q_f32(diny_ptr); + float32x4_t diny1 = vld1q_f32(diny_ptr + 4); + float32x4_t diny2 = vld1q_f32(diny_ptr + 8); + float32x4_t diny3 = vld1q_f32(diny_ptr + 12); + + dinx0 = vaddq_f32(dinx0, diny0); + dinx1 = vaddq_f32(dinx1, diny1); + dinx2 = vaddq_f32(dinx2, diny2); + dinx3 = vaddq_f32(dinx3, diny3); + + // relu + dinx0 = vmaxq_f32(dinx0, vzero); + dinx1 = vmaxq_f32(dinx1, vzero); + dinx2 = vmaxq_f32(dinx2, vzero); + dinx3 = vmaxq_f32(dinx3, vzero); + + vst1q_f32(dout_ptr, dinx0); + vst1q_f32(dout_ptr + 4, dinx1); + vst1q_f32(dout_ptr + 8, dinx2); + vst1q_f32(dout_ptr + 12, dinx3); + } + if (remain > 0) { + const float* dinx_ptr = dinx + (cnt << 4); + const float* diny_ptr = diny + (cnt << 4); + float* dout_ptr = dout + (cnt << 4); + for (int i = 0; i < remain; i++) { + float tmp = *dinx_ptr + *diny_ptr; + *dout_ptr = tmp > 0.f ? tmp : 0.f; + dout_ptr++; + dinx_ptr++; + diny_ptr++; + } + } +} + +template <> +void elementwise_add_broadcast(const float* dinx, const float* diny, + float* dout, int batch, int channels, + int num) { #pragma omp parallel for collapse(2) for (int i = 0; i < batch; ++i) { for (int j = 0; j < channels; ++j) { @@ -127,6 +179,82 @@ void elementwise_add_axis(const float* dinx, const float* diny, } } +template <> +void elementwise_add_relu_broadcast(const float* dinx, const float* diny, + float* dout, int batch, int channels, + int num) { + float32x4_t vzero = vdupq_n_f32(0.f); +#pragma omp parallel for collapse(2) + for (int i = 0; i < batch; ++i) { + for (int j = 0; j < channels; ++j) { + int offset = (i * channels + j) * num; + const float* din_ptr = dinx + offset; + const float diny_data = diny[j]; + float* dout_ptr = dout + offset; + + int cnt = num >> 4; + int remain = num % 16; + float32x4_t rb = vdupq_n_f32(diny_data); + for (int k = 0; k < cnt; ++k) { + float32x4_t din0 = vld1q_f32(din_ptr); + float32x4_t din1 = vld1q_f32(din_ptr + 4); + float32x4_t din2 = vld1q_f32(din_ptr + 8); + float32x4_t din3 = vld1q_f32(din_ptr + 12); + + din0 = vaddq_f32(din0, rb); + din1 = vaddq_f32(din1, rb); + din2 = vaddq_f32(din2, rb); + din3 = vaddq_f32(din3, rb); + + // relu + din0 = vmaxq_f32(din0, vzero); + din1 = vmaxq_f32(din1, vzero); + din2 = vmaxq_f32(din2, vzero); + din3 = vmaxq_f32(din3, vzero); + + vst1q_f32(dout_ptr, din0); + vst1q_f32(dout_ptr + 4, din1); + vst1q_f32(dout_ptr + 8, din2); + vst1q_f32(dout_ptr + 12, din3); + din_ptr += 16; + dout_ptr += 16; + } + if (remain >= 8) { + float32x4_t din0 = vld1q_f32(din_ptr); + float32x4_t din1 = vld1q_f32(din_ptr + 4); + din0 = vaddq_f32(din0, rb); + din1 = vaddq_f32(din1, rb); + // relu + din0 = vmaxq_f32(din0, vzero); + din1 = vmaxq_f32(din1, vzero); + vst1q_f32(dout_ptr, din0); + vst1q_f32(dout_ptr + 4, din1); + din_ptr += 8; + dout_ptr += 8; + remain -= 8; + } + if (remain >= 4) { + float32x4_t din0 = vld1q_f32(din_ptr); + din0 = vaddq_f32(din0, rb); + // relu + din0 = vmaxq_f32(din0, vzero); + vst1q_f32(dout_ptr, din0); + din_ptr += 4; + dout_ptr += 4; + remain -= 4; + } + if (remain > 0) { + for (int p = 0; p < remain; p++) { + float tmp = *din_ptr + diny_data; + *dout_ptr = tmp > 0.f ? tmp : 0.f; + dout_ptr++; + din_ptr++; + } + } + } + } +} + } // namespace math } // namespace arm } // namespace lite diff --git a/paddle/fluid/lite/arm/math/elementwise.h b/paddle/fluid/lite/arm/math/elementwise.h index ca8f87895fcea80f9a1a178a0bf43b34c44182bb..9300d73753d695819af6ec7066fd95020457bd29 100644 --- a/paddle/fluid/lite/arm/math/elementwise.h +++ b/paddle/fluid/lite/arm/math/elementwise.h @@ -23,8 +23,15 @@ template void elementwise_add(const T* dinx, const T* diny, T* dout, int num); template -void elementwise_add_axis(const T* dinx, const T* diny, T* dout, int batch, - int channels, int num); +void elementwise_add_relu(const T* dinx, const T* diny, T* dout, int num); + +template +void elementwise_add_broadcast(const T* dinx, const T* diny, T* dout, int batch, + int channels, int num); + +template +void elementwise_add_relu_broadcast(const T* dinx, const T* diny, T* dout, + int batch, int channels, int num); } // namespace math } // namespace arm diff --git a/paddle/fluid/lite/core/CMakeLists.txt b/paddle/fluid/lite/core/CMakeLists.txt index 89101aa03272d98ac08d7830830de6acb9adf271..1e95668cddc722e32ea784fe2331380ea3a3940e 100644 --- a/paddle/fluid/lite/core/CMakeLists.txt +++ b/paddle/fluid/lite/core/CMakeLists.txt @@ -1,5 +1,5 @@ if (WITH_TESTING) - cc_library(lite_gtest_main SRCS lite_gtest_main.cc DEPS gtest) + cc_library(lite_gtest_main SRCS lite_gtest_main.cc DEPS gtest gflags) endif() lite_cc_library(target_wrapper_lite SRCS target_wrapper.cc DEPS target_wrapper_host @@ -59,4 +59,3 @@ lite_cc_test(test_type_system SRCS type_system_test.cc DEPS type_system utils_li lite_cc_test(test_types_lite SRCS types_test.cc DEPS types_lite) lite_cc_test(test_memory_lite SRCS memory_test.cc DEPS memory_lite) lite_cc_test(test_context_lite SRCS context_test.cc DEPS context_lite X86_DEPS operator) - diff --git a/paddle/fluid/lite/core/hvy_tensor.h b/paddle/fluid/lite/core/hvy_tensor.h index 748e80c2559718d278a08e3c568532e177c835eb..6dbef9bc86a5e207ea2be1baea2dc96bbc6c0309 100644 --- a/paddle/fluid/lite/core/hvy_tensor.h +++ b/paddle/fluid/lite/core/hvy_tensor.h @@ -86,6 +86,7 @@ class TensorHvy : public TensorBase { template T* mutable_data() { + memory_size_ = framework::product(data_.dims()) * sizeof(T); return data_.mutable_data(data_.dims(), platform::CPUPlace()); } template @@ -128,8 +129,11 @@ class TensorHvy : public TensorBase { const framework::LoDTensor& raw_tensor() const { return data_; } framework::LoDTensor& raw_tensor() { return data_; } + size_t memory_size() const { return memory_size_; } + private: framework::LoDTensor data_; + size_t memory_size_{}; }; } // namespace lite diff --git a/paddle/fluid/lite/core/lite_tensor.h b/paddle/fluid/lite/core/lite_tensor.h index 6cccdc0dd03527434ac1ac49f3e3fb8a78b26c34..9860265bbb342e91cfd8031eef6eb1062c98920f 100644 --- a/paddle/fluid/lite/core/lite_tensor.h +++ b/paddle/fluid/lite/core/lite_tensor.h @@ -90,6 +90,8 @@ class TensorLite : public TensorBase { void *mutable_data(size_t memory_size); void *mutable_data(TargetType target, size_t memory_size); + const void *raw_data() const { return buffer_->data(); } + size_t memory_size() const { return memory_size_; } bool IsInitialized() const { return buffer_->data(); } diff --git a/paddle/fluid/lite/core/mir/CMakeLists.txt b/paddle/fluid/lite/core/mir/CMakeLists.txt index 0d7fcf8b3b2843c0d36be24288743a86b8c7ea24..e67ade8cbef5c574ce911bee403a152a23aa045e 100644 --- a/paddle/fluid/lite/core/mir/CMakeLists.txt +++ b/paddle/fluid/lite/core/mir/CMakeLists.txt @@ -5,21 +5,25 @@ cc_library(mir_pass_manager SRCS pass_manager.cc DEPS mir_pass mir_ssa_graph mir cc_library(mir_pass_registry SRCS pass_registry.cc DEPS mir_pass_manager) add_subdirectory(fusion) +add_subdirectory(elimination) + cc_library(mir_passes - SRCS fc_fuse_pass.cc - conv_elementwise_add_activation_fuse_pass.cc - elementwise_add_activation_fuse_pass.cc - conv_bn_fuse_pass.cc - static_kernel_pick_pass.cc - variable_place_inference_pass.cc - type_target_transform_pass.cc - io_copy_kernel_pick_pass.cc - graph_visualize_pass.cc - generate_program_pass.cc - argument_type_display_pass.cc - demo_pass.cc - runtime_context_assign_pass.cc - DEPS mir_pass types_lite context_lite ${mir_fusers}) + SRCS + fusion/fc_fuse_pass.cc + fusion/conv_elementwise_add_activation_fuse_pass.cc + fusion/conv_bn_fuse_pass.cc + fusion/elementwise_add_activation_fuse_pass.cc + elimination/identity_scale_eliminate_pass.cc + static_kernel_pick_pass.cc + variable_place_inference_pass.cc + type_target_transform_pass.cc + io_copy_kernel_pick_pass.cc + graph_visualize_pass.cc + generate_program_pass.cc + argument_type_display_pass.cc + demo_pass.cc + runtime_context_assign_pass.cc + DEPS mir_pass types_lite context_lite ${mir_fusers}) #cc_test(test_ssa_graph SRCS ssa_graph_test.cc DEPS #mir_ssa_graph scope_lite op_lite @@ -73,7 +77,7 @@ message(STATUS "----> Ops lite: ${ops_lite}") message(STATUS "----> Host kernels: ${host_kernels}") message(STATUS "----> X86 kernels: ${x86_kernels}") -lite_cc_test(test_lite_fc_fuse SRCS fc_fuse_pass_test.cc +lite_cc_test(test_lite_fc_fuse SRCS fusion/fc_fuse_pass_test.cc DEPS cxx_api_lite mir_passes ${ops_lite} ${host_kernels} ${x86_kernels} ${arm_kernels} ARGS --model_dir=${LITE_MODEL_DIR}/lite_fc_model @@ -83,11 +87,11 @@ lite_download_and_uncompress(${LITE_MODEL_DIR} ${LITE_URL} "lite_fc_model.tar.gz add_dependencies(test_lite_fc_fuse extern_lite_download_lite_fc_model_tar_gz) -lite_cc_test(test_lite_conv_elementwise_add_activation_fuse - SRCS conv_elementwise_add_activation_fuse_pass_test.cc +lite_cc_test(test_lite_conv_elementwise_add_activation_fuse + SRCS fusion/conv_elementwise_add_activation_fuse_pass_test.cc DEPS cxx_api_lite mir_passes ${ops_lite} ${host_kernels} ${x86_kernels}) -lite_cc_test(test_lite_elementwise_add_activation_fuse - SRCS elementwise_add_activation_fuse_pass_test.cc +lite_cc_test(test_lite_elementwise_add_activation_fuse + SRCS fusion/elementwise_add_activation_fuse_pass_test.cc DEPS cxx_api_lite mir_passes ${ops_lite} ${host_kernels} ${x86_kernels}) diff --git a/paddle/fluid/lite/core/mir/elimination/CMakeLists.txt b/paddle/fluid/lite/core/mir/elimination/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..9fda8ec29a4da3a3a9b443448f10e27b93ce61e8 --- /dev/null +++ b/paddle/fluid/lite/core/mir/elimination/CMakeLists.txt @@ -0,0 +1,7 @@ +if (NOT LITE_WITH_LIGHT_WEIGHT_FRAMEWORK) + lite_cc_test(test_identity_scale_eliminate_pass_lite + SRCS identity_scale_eliminate_pass_test.cc + DEPS mir_passes program_lite proto_desc cpp_op_desc_lite + ${ops_lite} + ) +endif() diff --git a/paddle/fluid/lite/core/mir/elimination/identity_scale_eliminate_pass.cc b/paddle/fluid/lite/core/mir/elimination/identity_scale_eliminate_pass.cc new file mode 100644 index 0000000000000000000000000000000000000000..6f8aeb65c0592a184ee436f8cefeb9d241a6943f --- /dev/null +++ b/paddle/fluid/lite/core/mir/elimination/identity_scale_eliminate_pass.cc @@ -0,0 +1,72 @@ +// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/lite/core/mir/pass.h" +#include "paddle/fluid/lite/core/mir/pass_registry.h" +#include "paddle/fluid/lite/core/mir/pattern_matcher_high_api.h" + +namespace paddle { +namespace lite { +namespace mir { + +namespace { + +class Eliminator : public FuseBase { + public: + void BuildPattern() override { + auto* pre_op = OpNode("preop"); // the previous op's output need update + // TODO(Superjomn) check has only one output + auto* x = VarNode("x")->assert_is_op_input("scale", "X"); + auto* scale_op = OpNode("scale", "scale") + ->assert_op_attr("scale", 1.) + ->assert_op_attr("bias", 0.); + auto* out = VarNode("out")->assert_is_op_output("scale", "Out"); + + *pre_op >> *x >> *scale_op >> *out; + + // The pre_op will be eliminated, and a new output-updated op will insert. + x->AsIntermediate(); // x is pre_op's output, need to update + } + + private: + void InsertNewNode(SSAGraph* graph, const key2nodes_t& matched) override { + auto& pre_op = matched.at("preop")->AsStmt(); + auto op_info = *pre_op.op_info(); + + op_info.UpdateAllOutputs(matched.at("x")->AsArg().name, + matched.at("out")->AsArg().name); + pre_op.ResetOp(op_info, graph->valid_places()); + + GraphSafeRemoveNodes(graph, {matched.at("scale")}); + + IR_NODE_LINK_TO(matched.at("preop"), matched.at("out")); + } +}; + +} // namespace + +class IdentityScaleEliminatePass : public ProgramPass { + public: + void Apply(const std::unique_ptr& graph) override { + Eliminator eliminator; + eliminator(graph.get()); + } +}; + +} // namespace mir +} // namespace lite +} // namespace paddle + +REGISTER_MIR_PASS(identity_scale_eliminate_pass, + paddle::lite::mir::IdentityScaleEliminatePass); diff --git a/paddle/fluid/lite/core/mir/elimination/identity_scale_eliminate_pass_test.cc b/paddle/fluid/lite/core/mir/elimination/identity_scale_eliminate_pass_test.cc new file mode 100644 index 0000000000000000000000000000000000000000..89db35fe0e8b943b7691f51ed4febacee83ebd41 --- /dev/null +++ b/paddle/fluid/lite/core/mir/elimination/identity_scale_eliminate_pass_test.cc @@ -0,0 +1,93 @@ +// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include "paddle/fluid/framework/program_desc.h" +#include "paddle/fluid/lite/core/mir/graph_visualize_pass.h" +#include "paddle/fluid/lite/core/mir/pass_registry.h" +#include "paddle/fluid/lite/core/mir/ssa_graph.h" + +namespace paddle { +namespace lite { +namespace mir { + +std::unique_ptr BuildGraph(framework::ProgramDesc* program_desc, + const std::shared_ptr& scope, + const std::vector& valid_places) { + // Op list: + // (x)->feed -> (feed) -> scale -> (scale_out) -> fetch->(fetch) + // After pass + // (x)->feed->(scale_out)->fetch->(fetch) + auto* main_block = program_desc->MutableBlock(0); + auto* feed_op = main_block->AppendOp(); + auto* scale_op = main_block->AppendOp(); + auto* fetch_op = main_block->AppendOp(); + main_block->Var("x"); + main_block->Var("feed"); + main_block->Var("scale_out"); + main_block->Var("fetch_out"); + + scope->Var("x")->GetMutable(); + scope->Var("feed")->GetMutable(); + scope->Var("scale_out")->GetMutable(); + scope->Var("fetch_out")->GetMutable(); + + feed_op->SetType("feed"); + feed_op->SetInput("X", {"x"}); + feed_op->SetAttr("col", 1); + feed_op->SetOutput("Out", {"feed"}); + + scale_op->SetType("scale"); + scale_op->SetInput("X", {"feed"}); + scale_op->SetOutput("Out", {"scale_out"}); + scale_op->SetAttr("scale", 1.f); + scale_op->SetAttr("bias", 0.f); + scale_op->SetAttr("bias_after_scale", true); + + fetch_op->SetType("fetch"); + fetch_op->SetInput("X", {"scale_out"}); + fetch_op->SetOutput("Out", {"fetch"}); + fetch_op->SetAttr("col", 1); + + program_desc->Flush(); + + lite::Program program(*program_desc->Proto(), scope, valid_places); + auto graph = std::unique_ptr(new SSAGraph()); + graph->Build(program, valid_places); + + LOG(INFO) << Visualize(graph.get()); + + return graph; +} + +TEST(identity_test, test) { + framework::ProgramDesc program_desc; + std::vector places{{TARGET(kHost), PRECISION(kFloat)}}; + auto scope = std::make_shared(); + auto graph = BuildGraph(&program_desc, scope, places); + const int num_nodes = graph->nodes().size(); + auto pass = PassManager::Global().LookUp("identity_scale_eliminate_pass"); + ASSERT_TRUE(pass); + pass->Apply(graph); + ASSERT_EQ(graph->nodes().size(), num_nodes - 2UL); +} + +} // namespace mir +} // namespace lite +} // namespace paddle + +USE_LITE_OP(feed) +USE_LITE_OP(fetch) +USE_LITE_OP(scale) +USE_MIR_PASS(identity_scale_eliminate_pass) diff --git a/paddle/fluid/lite/core/mir/fusion/CMakeLists.txt b/paddle/fluid/lite/core/mir/fusion/CMakeLists.txt index 9139293c8aa59d5664e29afba97c02226f9338bf..db092e17679fb2f7ed33cda7d4e92b99b5039776 100644 --- a/paddle/fluid/lite/core/mir/fusion/CMakeLists.txt +++ b/paddle/fluid/lite/core/mir/fusion/CMakeLists.txt @@ -11,7 +11,7 @@ cc_library(fuse_elementwise_add_activation SRCS elementwise_add_activation_fuser.cc DEPS pattern_matcher_high_api) -set(mir_fusers +set(mir_fusers fuse_fc fuse_conv_elementwise_add_activation fuse_conv_bn diff --git a/paddle/fluid/lite/core/mir/conv_bn_fuse_pass.cc b/paddle/fluid/lite/core/mir/fusion/conv_bn_fuse_pass.cc similarity index 94% rename from paddle/fluid/lite/core/mir/conv_bn_fuse_pass.cc rename to paddle/fluid/lite/core/mir/fusion/conv_bn_fuse_pass.cc index 562ec7f45073a13f37c7f44ebcae0fb13fbb8b42..1e7d7bc5774c7902f2aea80678b05886f9482415 100644 --- a/paddle/fluid/lite/core/mir/conv_bn_fuse_pass.cc +++ b/paddle/fluid/lite/core/mir/fusion/conv_bn_fuse_pass.cc @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/fluid/lite/core/mir/conv_bn_fuse_pass.h" +#include "paddle/fluid/lite/core/mir/fusion/conv_bn_fuse_pass.h" #include #include #include "paddle/fluid/lite/core/mir/fusion/conv_bn_fuser.h" diff --git a/paddle/fluid/lite/core/mir/conv_bn_fuse_pass.h b/paddle/fluid/lite/core/mir/fusion/conv_bn_fuse_pass.h similarity index 100% rename from paddle/fluid/lite/core/mir/conv_bn_fuse_pass.h rename to paddle/fluid/lite/core/mir/fusion/conv_bn_fuse_pass.h diff --git a/paddle/fluid/lite/core/mir/fusion/conv_bn_fuse_pass_test.cc b/paddle/fluid/lite/core/mir/fusion/conv_bn_fuse_pass_test.cc index 79436a9fa3d71111a5e805a804a77b9bda137134..3a8573b4f8c13684c2077164805966b3887a7f4f 100644 --- a/paddle/fluid/lite/core/mir/fusion/conv_bn_fuse_pass_test.cc +++ b/paddle/fluid/lite/core/mir/fusion/conv_bn_fuse_pass_test.cc @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/fluid/lite/core/mir/conv_bn_fuse_pass.h" +#include "paddle/fluid/lite/core/mir/fusion/conv_bn_fuse_pass.h" #include #include #include diff --git a/paddle/fluid/lite/core/mir/fusion/conv_bn_fuser.cc b/paddle/fluid/lite/core/mir/fusion/conv_bn_fuser.cc index b9d858a990d59c9006e0cfbab9b0afda95350528..0a73d1e39d99ba0d3b4e4790bb689ed403e63f5e 100644 --- a/paddle/fluid/lite/core/mir/fusion/conv_bn_fuser.cc +++ b/paddle/fluid/lite/core/mir/fusion/conv_bn_fuser.cc @@ -70,7 +70,7 @@ void ConvBNFuser::BuildPattern() { void ConvBNFuser::InsertNewNode(SSAGraph* graph, const key2nodes_t& matched) { auto op_desc = GenOpDesc(matched); auto eltwise_op = LiteOpRegistry::Global().Create("elementwise_add"); - auto conv = matched.at("conv2d")->stmt()->op; + auto conv = matched.at("conv2d")->stmt()->op(); auto* scope = conv->scope(); auto& valid_places = conv->valid_places(); @@ -84,7 +84,7 @@ void ConvBNFuser::InsertNewNode(SSAGraph* graph, const key2nodes_t& matched) { ->GetMutable(); size_t bias_size = bn_scale_t->data_size(); auto bn_scale_d = bn_scale_t->mutable_data(); - CHECK(bias_size == conv_weight_dims[0]) + CHECK_EQ(bias_size, static_cast(conv_weight_dims[0])) << "The BN bias's size should be equal to the size of the first " << "dim size of the conv weights"; diff --git a/paddle/fluid/lite/core/mir/conv_elementwise_add_activation_fuse_pass.cc b/paddle/fluid/lite/core/mir/fusion/conv_elementwise_add_activation_fuse_pass.cc similarity index 94% rename from paddle/fluid/lite/core/mir/conv_elementwise_add_activation_fuse_pass.cc rename to paddle/fluid/lite/core/mir/fusion/conv_elementwise_add_activation_fuse_pass.cc index 27f6413c47b514d3203c5879d7ee7b9697d8cf5a..f4eb5a00ad24900fc97abbc6ce4e890d288e0872 100644 --- a/paddle/fluid/lite/core/mir/conv_elementwise_add_activation_fuse_pass.cc +++ b/paddle/fluid/lite/core/mir/fusion/conv_elementwise_add_activation_fuse_pass.cc @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/fluid/lite/core/mir/conv_elementwise_add_activation_fuse_pass.h" +#include "paddle/fluid/lite/core/mir/fusion/conv_elementwise_add_activation_fuse_pass.h" #include #include #include "paddle/fluid/lite/core/mir/fusion/conv_elementwise_add_activation_fuser.h" diff --git a/paddle/fluid/lite/core/mir/conv_elementwise_add_activation_fuse_pass.h b/paddle/fluid/lite/core/mir/fusion/conv_elementwise_add_activation_fuse_pass.h similarity index 100% rename from paddle/fluid/lite/core/mir/conv_elementwise_add_activation_fuse_pass.h rename to paddle/fluid/lite/core/mir/fusion/conv_elementwise_add_activation_fuse_pass.h diff --git a/paddle/fluid/lite/core/mir/conv_elementwise_add_activation_fuse_pass_test.cc b/paddle/fluid/lite/core/mir/fusion/conv_elementwise_add_activation_fuse_pass_test.cc similarity index 97% rename from paddle/fluid/lite/core/mir/conv_elementwise_add_activation_fuse_pass_test.cc rename to paddle/fluid/lite/core/mir/fusion/conv_elementwise_add_activation_fuse_pass_test.cc index 5a5fdc134b810b67df33b2d385d982a306a0dddc..e7751d801eaa1239a70e3fb0d128165029a31669 100644 --- a/paddle/fluid/lite/core/mir/conv_elementwise_add_activation_fuse_pass_test.cc +++ b/paddle/fluid/lite/core/mir/fusion/conv_elementwise_add_activation_fuse_pass_test.cc @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/fluid/lite/core/mir/conv_elementwise_add_activation_fuse_pass.h" +#include "paddle/fluid/lite/core/mir/fusion/conv_elementwise_add_activation_fuse_pass.h" #include #include #include @@ -20,7 +20,7 @@ #include "paddle/fluid/lite/api/cxx_api.h" #include "paddle/fluid/lite/core/compatible_tensor.h" #include "paddle/fluid/lite/core/mir/graph_visualize_pass.h" -#include "paddle/fluid/lite/core/mir/passes.h" +#include "paddle/fluid/lite/core/mir/use_passes.h" #include "paddle/fluid/lite/core/op_registry.h" #include "paddle/fluid/lite/core/program.h" diff --git a/paddle/fluid/lite/core/mir/fusion/conv_elementwise_add_activation_fuser.cc b/paddle/fluid/lite/core/mir/fusion/conv_elementwise_add_activation_fuser.cc index b26b758fb2318b7c9a645503687f994b73009310..a085b139c86725360b4939c979cec685bf11879b 100644 --- a/paddle/fluid/lite/core/mir/fusion/conv_elementwise_add_activation_fuser.cc +++ b/paddle/fluid/lite/core/mir/fusion/conv_elementwise_add_activation_fuser.cc @@ -65,7 +65,7 @@ void ConvElementwiseAddActivationFuser::InsertNewNode( SSAGraph* graph, const key2nodes_t& matched) { auto op_desc = GenOpDesc(matched); auto conv_op = LiteOpRegistry::Global().Create(conv_type_); - auto conv_old = matched.at("conv2d")->stmt()->op; + auto conv_old = matched.at("conv2d")->stmt()->op(); auto* scope = conv_old->scope(); auto& valid_places = conv_old->valid_places(); conv_op->Attach(op_desc, scope); diff --git a/paddle/fluid/lite/core/mir/fusion/conv_elementwise_add_relu_fuse_pass.cc b/paddle/fluid/lite/core/mir/fusion/conv_elementwise_add_relu_fuse_pass.cc new file mode 100644 index 0000000000000000000000000000000000000000..4ace19f304bf1f935c82d138e3980e85e417d6f8 --- /dev/null +++ b/paddle/fluid/lite/core/mir/fusion/conv_elementwise_add_relu_fuse_pass.cc @@ -0,0 +1,39 @@ +// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "conv_elementwise_add_relu_fuse_pass.h" +#include +#include +#include "paddle/fluid/lite/core/mir/fusion/conv_elementwise_add_relu_fuser.h" +#include "paddle/fluid/lite/core/mir/pass_registry.h" + +namespace paddle { +namespace lite { +namespace mir { + +void ConvElementwiseAddReLUFusePass::Apply( + const std::unique_ptr& graph) { + fusion::ConvElementwiseAddReLUFuser fuser("conv2d"); + fuser(graph.get()); + + fusion::ConvElementwiseAddReLUFuser depthwise_fuser("depthwise_conv2d"); + depthwise_fuser(graph.get()); +} + +} // namespace mir +} // namespace lite +} // namespace paddle + +REGISTER_MIR_PASS(lite_conv_elementwise_add_act_fuse_pass, + paddle::lite::mir::ConvElementwiseAddReLUFusePass); diff --git a/paddle/fluid/lite/core/mir/fusion/conv_elementwise_add_relu_fuse_pass.h b/paddle/fluid/lite/core/mir/fusion/conv_elementwise_add_relu_fuse_pass.h new file mode 100644 index 0000000000000000000000000000000000000000..4276f1ffc8c258b0b4266abd950fa1ccf541c4a7 --- /dev/null +++ b/paddle/fluid/lite/core/mir/fusion/conv_elementwise_add_relu_fuse_pass.h @@ -0,0 +1,32 @@ +// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include +#include +#include "paddle/fluid/lite/core/mir/pass.h" + +namespace paddle { +namespace lite { +namespace mir { + +class ConvElementwiseAddReLUFusePass : public ProgramPass { + public: + void Apply(const std::unique_ptr& graph) override; +}; + +} // namespace mir +} // namespace lite +} // namespace paddle diff --git a/paddle/fluid/lite/core/mir/fusion/conv_elementwise_add_relu_fuse_pass_test.cc b/paddle/fluid/lite/core/mir/fusion/conv_elementwise_add_relu_fuse_pass_test.cc new file mode 100644 index 0000000000000000000000000000000000000000..00c9eaf8c07ce4f853ee51c39c752c51bf0c6ccd --- /dev/null +++ b/paddle/fluid/lite/core/mir/fusion/conv_elementwise_add_relu_fuse_pass_test.cc @@ -0,0 +1,153 @@ +// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "conv_elementwise_add_relu_fuse_pass.h" +#include +#include +#include +#include "paddle/fluid/framework/program_desc.h" +#include "paddle/fluid/lite/api/cxx_api.h" +#include "paddle/fluid/lite/core/compatible_tensor.h" +#include "paddle/fluid/lite/core/mir/graph_visualize_pass.h" +#include "paddle/fluid/lite/core/mir/passes.h" +#include "paddle/fluid/lite/core/op_registry.h" +#include "paddle/fluid/lite/core/program.h" + +DEFINE_string(model_dir, "", ""); +DEFINE_string(optimized_model, "", ""); + +namespace paddle { +namespace lite { +namespace mir { +namespace fusion { + +std::unique_ptr BuildGraph(framework::ProgramDesc* program_desc, + const std::shared_ptr& scope, + const std::vector& valid_places) { + auto* main_block = program_desc->MutableBlock(0); + + auto* conv2d_1 = main_block->AppendOp(); + auto* conv2d_2 = main_block->AppendOp(); + auto* add_1 = main_block->AppendOp(); + auto* relu_1 = main_block->AppendOp(); + auto* add_2 = main_block->AppendOp(); + auto* relu_2 = main_block->AppendOp(); + + main_block->Var("input_1"); + main_block->Var("input_2"); + main_block->Var("filter_1"); + main_block->Var("filter_2"); + main_block->Var("conv2d_1_out"); + main_block->Var("conv2d_2_out"); + main_block->Var("bias_1"); + main_block->Var("add_1_out"); + main_block->Var("add_2_out"); + main_block->Var("relu_1_out"); + main_block->Var("out"); + + scope->Var("input_1")->GetMutable(); + scope->Var("input_2")->GetMutable(); + scope->Var("filter_1")->GetMutable(); + scope->Var("filter_2")->GetMutable(); + scope->Var("conv2d_1_out")->GetMutable(); + scope->Var("conv2d_2_out")->GetMutable(); + scope->Var("bias_1")->GetMutable(); + scope->Var("add_1_out")->GetMutable(); + scope->Var("add_2_out")->GetMutable(); + scope->Var("relu_1_out")->GetMutable(); + scope->Var("out")->GetMutable(); + + conv2d_1->SetType("conv2d"); + conv2d_1->SetInput("Input", {"input_1"}); + conv2d_1->SetInput("Filter", {"filter_1"}); + conv2d_1->SetOutput("Output", {"conv2d_1_out"}); + conv2d_1->SetAttr("strides", std::vector({1, 1})); + conv2d_1->SetAttr("paddings", std::vector({0, 0})); + conv2d_1->SetAttr("groups", 1); + conv2d_1->SetAttr("dilations", std::vector({1, 1})); + conv2d_1->SetAttr("fuse_relu", false); + + add_1->SetType("elementwise_add"); + add_1->SetInput("X", {"conv2d_1_out"}); + add_1->SetInput("Y", {"bias_1"}); + add_1->SetOutput("Out", {"add_1_out"}); + add_1->SetAttr("axis", 1); + + relu_1->SetType("relu"); + relu_1->SetInput("X", {"add_1_out"}); + relu_1->SetOutput("Out", {"relu_1_out"}); + + conv2d_2->SetType("conv2d"); + conv2d_2->SetInput("Input", {"input_2"}); + conv2d_2->SetInput("Filter", {"filter_2"}); + conv2d_2->SetOutput("Output", {"conv2d_2_out"}); + conv2d_2->SetAttr("strides", std::vector({1, 1})); + conv2d_2->SetAttr("paddings", std::vector({0, 0})); + conv2d_2->SetAttr("groups", 1); + conv2d_2->SetAttr("dilations", std::vector({1, 1})); + conv2d_2->SetAttr("fuse_relu", false); + + add_2->SetType("elementwise_add"); + add_2->SetInput("X", {"conv2d_2_out"}); + add_2->SetInput("Y", {"relu_1_out"}); + add_2->SetOutput("Out", {"add_2_out"}); + add_2->SetAttr("axis", 1); + + relu_2->SetType("relu"); + relu_2->SetInput("X", {"add_2_out"}); + relu_2->SetOutput("Out", {"out"}); + + program_desc->Flush(); + + lite::Program program(*program_desc->Proto(), scope, valid_places); + auto graph = std::unique_ptr(new SSAGraph()); + graph->Build(program, valid_places); + + return graph; +} + +TEST(conv_elementwise_add_relu_fuse_pass, graph_test) { + framework::ProgramDesc program_desc; + std::vector places{{TARGET(kHost), PRECISION(kFloat)}}; + auto scope = std::make_shared(); + auto graph = BuildGraph(&program_desc, scope, places); + + Visualize(graph.get()); + ASSERT_EQ(graph->nodes().size(), 11UL /*vars*/ + 6UL /*ops*/); + Visualize(graph.get()); +} + +TEST(conv_elementwise_add_relu_fuse_pass, fuse_test_op) { + framework::ProgramDesc program_desc; + std::vector places{{TARGET(kHost), PRECISION(kFloat)}}; + auto scope = std::make_shared(); + auto graph = BuildGraph(&program_desc, scope, places); + Visualize(graph.get()); + const int num_nodes = graph->nodes().size(); + auto* fuser = new ConvElementwiseAddReLUFusePass; + fuser->Apply(graph); + Visualize(graph.get()); + ASSERT_EQ(graph->nodes().size(), num_nodes - 5UL * 2 /*nodes removed */ + + 1UL * 2 /* fused fc node*/); +} + +} // namespace fusion +} // namespace mir +} // namespace lite +} // namespace paddle + +USE_LITE_OP(elementwise_add); +USE_LITE_OP(conv2d); +USE_LITE_OP(depthwise_conv2d); +USE_LITE_OP(relu); diff --git a/paddle/fluid/lite/core/mir/elementwise_add_activation_fuse_pass.cc b/paddle/fluid/lite/core/mir/fusion/elementwise_add_activation_fuse_pass.cc similarity index 93% rename from paddle/fluid/lite/core/mir/elementwise_add_activation_fuse_pass.cc rename to paddle/fluid/lite/core/mir/fusion/elementwise_add_activation_fuse_pass.cc index 9ce455dcdafb0d2e8f040bc3244495b2968eebd0..20d1eaa82a8cdaafb21252a60b7977e0b4bae1cd 100644 --- a/paddle/fluid/lite/core/mir/elementwise_add_activation_fuse_pass.cc +++ b/paddle/fluid/lite/core/mir/fusion/elementwise_add_activation_fuse_pass.cc @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/fluid/lite/core/mir/elementwise_add_activation_fuse_pass.h" +#include "paddle/fluid/lite/core/mir/fusion/elementwise_add_activation_fuse_pass.h" #include #include #include "paddle/fluid/lite/core/mir/fusion/elementwise_add_activation_fuser.h" diff --git a/paddle/fluid/lite/core/mir/elementwise_add_activation_fuse_pass.h b/paddle/fluid/lite/core/mir/fusion/elementwise_add_activation_fuse_pass.h similarity index 100% rename from paddle/fluid/lite/core/mir/elementwise_add_activation_fuse_pass.h rename to paddle/fluid/lite/core/mir/fusion/elementwise_add_activation_fuse_pass.h diff --git a/paddle/fluid/lite/core/mir/elementwise_add_activation_fuse_pass_test.cc b/paddle/fluid/lite/core/mir/fusion/elementwise_add_activation_fuse_pass_test.cc similarity index 96% rename from paddle/fluid/lite/core/mir/elementwise_add_activation_fuse_pass_test.cc rename to paddle/fluid/lite/core/mir/fusion/elementwise_add_activation_fuse_pass_test.cc index f70da091bda94c5e88c0022ad9aa97828b9fb947..4b7742e059d8206d880859968c7f11a17ca213b7 100644 --- a/paddle/fluid/lite/core/mir/elementwise_add_activation_fuse_pass_test.cc +++ b/paddle/fluid/lite/core/mir/fusion/elementwise_add_activation_fuse_pass_test.cc @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/fluid/lite/core/mir/elementwise_add_activation_fuse_pass.h" +#include "paddle/fluid/lite/core/mir/fusion/elementwise_add_activation_fuse_pass.h" #include #include #include @@ -20,7 +20,7 @@ #include "paddle/fluid/lite/api/cxx_api.h" #include "paddle/fluid/lite/core/compatible_tensor.h" #include "paddle/fluid/lite/core/mir/graph_visualize_pass.h" -#include "paddle/fluid/lite/core/mir/passes.h" +#include "paddle/fluid/lite/core/mir/use_passes.h" #include "paddle/fluid/lite/core/op_registry.h" #include "paddle/fluid/lite/core/program.h" diff --git a/paddle/fluid/lite/core/mir/fusion/elementwise_add_activation_fuser.cc b/paddle/fluid/lite/core/mir/fusion/elementwise_add_activation_fuser.cc index 83b916eea3e47947083d4a41406d2ebd6918dfd2..cafbc42d85b1f8159b0d5b010847348d1150777d 100644 --- a/paddle/fluid/lite/core/mir/fusion/elementwise_add_activation_fuser.cc +++ b/paddle/fluid/lite/core/mir/fusion/elementwise_add_activation_fuser.cc @@ -54,7 +54,7 @@ void ElementwiseAddActivationFuser::InsertNewNode(SSAGraph* graph, auto op_desc = GenOpDesc(matched); auto op = LiteOpRegistry::Global().Create("fusion_elementwise_add_activation"); - auto old_op = matched.at("add")->stmt()->op; + auto old_op = matched.at("add")->stmt()->op(); auto* scope = old_op->scope(); auto& valid_places = old_op->valid_places(); op->Attach(op_desc, scope); diff --git a/paddle/fluid/lite/core/mir/fc_fuse_pass.cc b/paddle/fluid/lite/core/mir/fusion/fc_fuse_pass.cc similarity index 94% rename from paddle/fluid/lite/core/mir/fc_fuse_pass.cc rename to paddle/fluid/lite/core/mir/fusion/fc_fuse_pass.cc index 008f05ce5cbd5f6f14d67e79f732e51ab2aa3ddd..f50db9c17b3dd81ad37558996f58164c057abd97 100644 --- a/paddle/fluid/lite/core/mir/fc_fuse_pass.cc +++ b/paddle/fluid/lite/core/mir/fusion/fc_fuse_pass.cc @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/fluid/lite/core/mir/fc_fuse_pass.h" +#include "paddle/fluid/lite/core/mir/fusion/fc_fuse_pass.h" #include #include #include "paddle/fluid/lite/core/mir/fusion/fc_fuser.h" diff --git a/paddle/fluid/lite/core/mir/fc_fuse_pass.h b/paddle/fluid/lite/core/mir/fusion/fc_fuse_pass.h similarity index 100% rename from paddle/fluid/lite/core/mir/fc_fuse_pass.h rename to paddle/fluid/lite/core/mir/fusion/fc_fuse_pass.h diff --git a/paddle/fluid/lite/core/mir/fc_fuse_pass_test.cc b/paddle/fluid/lite/core/mir/fusion/fc_fuse_pass_test.cc similarity index 97% rename from paddle/fluid/lite/core/mir/fc_fuse_pass_test.cc rename to paddle/fluid/lite/core/mir/fusion/fc_fuse_pass_test.cc index 35efedb57971d19551ee144e47f87bcfd4d73ce4..b64a436f925d291929079703c8687930b97a8a13 100644 --- a/paddle/fluid/lite/core/mir/fc_fuse_pass_test.cc +++ b/paddle/fluid/lite/core/mir/fusion/fc_fuse_pass_test.cc @@ -12,12 +12,12 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/fluid/lite/core/mir/fc_fuse_pass.h" +#include "fc_fuse_pass.h" #include #include #include #include "paddle/fluid/lite/api/cxx_api.h" -#include "paddle/fluid/lite/core/mir/passes.h" +#include "paddle/fluid/lite/core/mir/use_passes.h" #include "paddle/fluid/lite/core/op_registry.h" DEFINE_string(model_dir, "", ""); diff --git a/paddle/fluid/lite/core/mir/fusion/fc_fuser.cc b/paddle/fluid/lite/core/mir/fusion/fc_fuser.cc index a8b6336595c0fe63d64d75d6434fcfd559c185c9..bb350c731c657c54d071dd490bbba953fbbe83fd 100644 --- a/paddle/fluid/lite/core/mir/fusion/fc_fuser.cc +++ b/paddle/fluid/lite/core/mir/fusion/fc_fuser.cc @@ -46,7 +46,7 @@ void FcFuser::BuildPattern() { void FcFuser::InsertNewNode(SSAGraph* graph, const key2nodes_t& matched) { auto op_desc = GenOpDesc(matched); auto fc_op = LiteOpRegistry::Global().Create("fc"); - auto mul = matched.at("mul")->stmt()->op; + auto mul = matched.at("mul")->stmt()->op(); auto* scope = mul->scope(); auto& valid_places = mul->valid_places(); fc_op->Attach(op_desc, scope); diff --git a/paddle/fluid/lite/core/mir/generate_program_pass.cc b/paddle/fluid/lite/core/mir/generate_program_pass.cc index e74c71b778b4faa53d82beac66dba46d7f3668a5..97586d7484204a4eccee9385c05aafbc11460f62 100644 --- a/paddle/fluid/lite/core/mir/generate_program_pass.cc +++ b/paddle/fluid/lite/core/mir/generate_program_pass.cc @@ -24,12 +24,12 @@ namespace lite { namespace mir { void GenerateProgramPass::Apply(const std::unique_ptr& graph) { - LOG(INFO) << "final program \n" << Visualize(graph.get()); + VLOG(4) << "final program \n" << Visualize(graph.get()); for (auto& item : graph->StmtTopologicalOrder()) { if (item->IsStmt()) { auto& stmt = item->AsStmt(); VLOG(4) << stmt; - insts_.emplace_back(stmt.op, std::move(stmt.valid_kernels.front())); + insts_.emplace_back(stmt.op(), std::move(stmt.kernels().front())); } } } diff --git a/paddle/fluid/lite/core/mir/graph_visualize_pass.cc b/paddle/fluid/lite/core/mir/graph_visualize_pass.cc index 6a13bafd67ca710691f1a20a62ea411c90064e85..90a99b5deb199cc69e6732eddc60b77964b92d03 100644 --- a/paddle/fluid/lite/core/mir/graph_visualize_pass.cc +++ b/paddle/fluid/lite/core/mir/graph_visualize_pass.cc @@ -39,7 +39,7 @@ std::string Visualize(mir::SSAGraph* graph) { if (node.IsArg()) { key = node.AsArg().name; } else { - key = node.AsStmt().op_type + std::to_string(id++); + key = node.AsStmt().op_type() + std::to_string(id++); } if (node.IsStmt()) { diff --git a/paddle/fluid/lite/core/mir/io_copy_kernel_pick_pass.cc b/paddle/fluid/lite/core/mir/io_copy_kernel_pick_pass.cc index ebf9e5a57bfb9395cbd661c4e69ec2980eebbd17..9f38ce01ba12627b8eae3ff51d5de620c971fc46 100644 --- a/paddle/fluid/lite/core/mir/io_copy_kernel_pick_pass.cc +++ b/paddle/fluid/lite/core/mir/io_copy_kernel_pick_pass.cc @@ -25,11 +25,11 @@ class IoCopyKernelPickPass : public StmtPass { for (auto& node : graph->mutable_nodes()) { if (!node.IsStmt()) continue; auto& inst = node.AsStmt(); - if (inst.op_type != "io_copy") continue; + if (inst.op_type() != "io_copy") continue; LOG(INFO) << "....> picking a IO COPY kernel"; - auto& kernels = node.AsStmt().valid_kernels; + auto& kernels = node.AsStmt().kernels(); CHECK(!kernels.empty()) << "No valid kernels found for IoCopy Op"; const auto* inty = node.inlinks.front()->AsArg().type; const auto* outy = node.outlinks.front()->AsArg().type; diff --git a/paddle/fluid/lite/core/mir/node.cc b/paddle/fluid/lite/core/mir/node.cc index 711ff508f23c7d5218a7d788e90b3fe58f154018..814df2b61a268f6ef71989f831edd53c9bfdb41d 100644 --- a/paddle/fluid/lite/core/mir/node.cc +++ b/paddle/fluid/lite/core/mir/node.cc @@ -13,3 +13,62 @@ // limitations under the License. #include "paddle/fluid/lite/core/mir/node.h" +#include "paddle/fluid/lite/core/op_registry.h" + +namespace paddle { +namespace lite { + +const OpInfo *mir::Node::Stmt::op_info() const { + CHECK(op_); + return op_->op_info(); +} + +Place mir::Node::Stmt::place() const { + CHECK(!valid_kernels_.empty()); + return valid_kernels_.front()->place(); +} + +KernelBase &mir::Node::Stmt::picked_kernel() { + CHECK(!valid_kernels_.empty()) << "no kernel for " << op_type(); + return *valid_kernels_.front(); +} + +OpInfo *mir::Node::Stmt::mutable_op_info() { + CHECK(op_); + return op_->mutable_op_info(); +} + +void mir::Node::Stmt::ResetOp(const cpp::OpDesc &op_desc, + const std::vector &valid_places, + lite::Scope *scope) { + CHECK((op_ && op_->scope()) || scope) << "Either scope should be set"; + lite::Scope *the_scope = scope ? scope : op_->scope(); + op_->Attach(op_desc, the_scope); + // Recreate the kernels with the latest OpInfo. + valid_kernels_.clear(); + + if (!op_ || op_->op_info()->Type() != op_desc.Type()) { + op_ = LiteOpRegistry::Global().Create(op_desc.Type()); + CHECK(op_) << "No op found for " << op_desc.Type(); + } + valid_kernels_ = op_->CreateKernels(valid_places); +} + +std::ostream &mir::operator<<(std::ostream &os, const mir::Node::Stmt &other) { + os << "Statement " << other.op_type() << " " << other.place(); + return os; +} + +mir::Node::Arg &mir::Node::AsArg(const std::string &name, int id) { + auto &x = AsArg(); + x.name = name; + x.id = id; + return x; +} +mir::Node::Arg &mir::Node::AsArg(const std::string &name) { + auto &x = AsArg(); + x.name = name; + return x; +} +} // namespace lite +} // namespace paddle diff --git a/paddle/fluid/lite/core/mir/node.h b/paddle/fluid/lite/core/mir/node.h index a5fd90dac482d434afb624216aad875e12350c36..08b7a963e797b31ab015dab4761fb5f41d855faa 100644 --- a/paddle/fluid/lite/core/mir/node.h +++ b/paddle/fluid/lite/core/mir/node.h @@ -41,32 +41,40 @@ class Node { kUnk, }; - struct Stmt { - std::string op_type; + class Stmt { // The kernel instances this Statement contains. - std::vector> valid_kernels; + std::vector> valid_kernels_; // TODO(Superjomn) make this a shared_ptr for resource safety. - std::shared_ptr op; // we hold op to run InferShape + std::shared_ptr op_; // we hold op to run InferShape - const OpInfo* op_info() { - CHECK(op); - return op->op_info(); - } + public: + // Refresh the operator and kernels with the latest OpInfo. + void ResetOp(const cpp::OpDesc& op_desc, + const std::vector& valid_places, + lite::Scope* scope = nullptr); - Place place() const { - CHECK(!valid_kernels.empty()); - return valid_kernels.front()->place(); - } + std::string op_type() const { return op_info()->Type(); } + const OpInfo* op_info() const; + OpInfo* mutable_op_info(); - KernelBase& picked_kernel() { - CHECK(!valid_kernels.empty()) << "no kernel for " << op_type; - return *valid_kernels.front(); + void SetKernels(std::vector>&& kernels) { + valid_kernels_ = std::move(kernels); } - - friend std::ostream& operator<<(std::ostream& os, const Stmt& other) { - os << "Statement " << other.op_type << " " << other.place(); - return os; + std::vector>& kernels() { + return valid_kernels_; } + + void SetOp(const std::shared_ptr& op) { op_ = op; } + const std::shared_ptr op() const { return op_; } + + Place place() const; + + KernelBase& picked_kernel(); + + friend std::ostream& operator<<(std::ostream& os, const Stmt& other); + + // Description. + std::string desc; }; struct Arg { @@ -78,26 +86,16 @@ class Node { bool is_weight{false}; }; - Arg& AsArg(const std::string& name, int id) { - auto& x = AsArg(); - x.name = name; - x.id = id; - return x; - } + Arg& AsArg(const std::string& name, int id); - Arg& AsArg(const std::string& name) { - auto& x = AsArg(); - x.name = name; - return x; - } + Arg& AsArg(const std::string& name); Stmt& AsStmt(const std::string& op_type, std::vector>&& kernels, const std::shared_ptr& op) { auto& x = AsStmt(); - x.op_type = op_type; - x.op = op; - x.valid_kernels = std::move(kernels); + x.SetOp(op); + x.SetKernels(std::move(kernels)); return x; } @@ -142,7 +140,7 @@ class Node { } if (other.IsStmt()) { auto& arg = other.AsStmt(); - os << "Statement " << arg.op_type; + os << "Statement " << arg.op_type(); } return os; } diff --git a/paddle/fluid/lite/core/mir/pattern_matcher.h b/paddle/fluid/lite/core/mir/pattern_matcher.h index ff9fbce35ddf3f601a441bb6105dc658505cbe0e..76ed5f1dd0fdbe337bd2baf63ec9664773b00f8b 100644 --- a/paddle/fluid/lite/core/mir/pattern_matcher.h +++ b/paddle/fluid/lite/core/mir/pattern_matcher.h @@ -139,14 +139,13 @@ struct PMNode { template PMNode* assert_op_attr(const std::string& attr_name, const T& attr) { - asserts_.emplace_back([=](Node* x) { + asserts_.push_back([=](const Node* x) { if (x && x->IsStmt()) { auto* op_info = x->stmt()->op_info(); return op_info->HasAttr(attr_name) && op_info->GetAttr(attr_name) == attr; - } else { - return false; } + return false; }); return this; } diff --git a/paddle/fluid/lite/core/mir/pattern_matcher_high_api.cc b/paddle/fluid/lite/core/mir/pattern_matcher_high_api.cc index 57bba3aad140b4c8f8e1a2c6db27792773c018cd..9f0b2e1f3225d708f0e71c255bad2eec71628f76 100644 --- a/paddle/fluid/lite/core/mir/pattern_matcher_high_api.cc +++ b/paddle/fluid/lite/core/mir/pattern_matcher_high_api.cc @@ -41,6 +41,7 @@ void FuseBase::DeleteInterNodes(SSAGraph *graph) { } } + LOG(INFO) << "keys: " << key2nodes_.size(); std::unordered_set nodes2rm; for (auto &matched : key2nodes_) { for (const auto &key : keys) { diff --git a/paddle/fluid/lite/core/mir/pattern_matcher_high_api.h b/paddle/fluid/lite/core/mir/pattern_matcher_high_api.h index b3a23c654bdb36974fd1a0419c199ba04a1d66bf..7c3f890383d75ff364db5f9018827d2ddd5e9507 100644 --- a/paddle/fluid/lite/core/mir/pattern_matcher_high_api.h +++ b/paddle/fluid/lite/core/mir/pattern_matcher_high_api.h @@ -49,7 +49,13 @@ class FuseBase { virtual void BuildPattern() = 0; // Generate an operator desc with a matched subgraph. - virtual cpp::OpDesc GenOpDesc(const key2nodes_t& matched) = 0; + virtual cpp::OpDesc GenOpDesc(const key2nodes_t& matched) { + return cpp::OpDesc(); + } + + PMNode* OpNode(const std::string& key) { + return GetOrCreateNode(key)->assert_is_op(); + } PMNode* OpNode(const std::string& key, const std::string& op_type); diff --git a/paddle/fluid/lite/core/mir/pattern_matcher_high_api_test.cc b/paddle/fluid/lite/core/mir/pattern_matcher_high_api_test.cc index 7a46bb9a93d95b9379c961d8044fbdfcd04e7ab4..d0844b0b7ef2fa805e042bdf9b66cd478a0de5d0 100644 --- a/paddle/fluid/lite/core/mir/pattern_matcher_high_api_test.cc +++ b/paddle/fluid/lite/core/mir/pattern_matcher_high_api_test.cc @@ -52,7 +52,7 @@ class FcFuser : public FuseBase { void InsertNewNode(SSAGraph* graph, const key2nodes_t& matched) override { auto op_desc = GenOpDesc(matched); auto fc_op = LiteOpRegistry::Global().Create("fc"); - auto mul = matched.at("mul")->stmt()->op; + auto mul = matched.at("mul")->stmt()->op(); auto* scope = mul->scope(); auto& valid_places = mul->valid_places(); fc_op->Attach(op_desc, scope); @@ -90,7 +90,7 @@ std::unique_ptr BuildGraph(framework::ProgramDesc* program_desc, main_block->Var("w"); main_block->Var("out"); - scope->Var("w")->GetMutable(); + scope->Var("x")->GetMutable(); scope->Var("b")->GetMutable(); scope->Var("mul_out")->GetMutable(); scope->Var("w")->GetMutable(); diff --git a/paddle/fluid/lite/core/mir/pattern_matcher_test.cc b/paddle/fluid/lite/core/mir/pattern_matcher_test.cc index 3b082060fe21731000394f6941e0803af7da74d6..8f2ca38f1cc13e80219aad33fd9c5e03cba52283 100644 --- a/paddle/fluid/lite/core/mir/pattern_matcher_test.cc +++ b/paddle/fluid/lite/core/mir/pattern_matcher_test.cc @@ -23,19 +23,19 @@ namespace mir { void BuildGraph(SSAGraph* g) { g->mutable_nodes().emplace_back(); Node& o1 = g->mutable_nodes().back(); - o1.AsStmt().op_type = "op1"; + o1.AsStmt().desc = "op1"; g->mutable_nodes().emplace_back(); Node& o2 = g->mutable_nodes().back(); - o2.AsStmt().op_type = "op2"; + o2.AsStmt().desc = "op2"; g->mutable_nodes().emplace_back(); Node& o3 = g->mutable_nodes().back(); - o3.AsStmt().op_type = "op3"; + o3.AsStmt().desc = "op3"; g->mutable_nodes().emplace_back(); Node& o4 = g->mutable_nodes().back(); - o4.AsStmt().op_type = "op4"; + o4.AsStmt().desc = "op4"; g->mutable_nodes().emplace_back(); Node& o5 = g->mutable_nodes().back(); - o5.AsStmt().op_type = "op5"; + o5.AsStmt().desc = "op5"; g->mutable_nodes().emplace_back(); Node& v1 = g->mutable_nodes().back(); v1.AsArg("var1"); @@ -108,11 +108,11 @@ TEST(PatternMatcher, MarkPMNodesInGraph) { // v2 -> o3(a node named o3) auto* o2 = x.pattern_.NewNode([](const Node* node) { // The teller can be any condition, such as op type, or variable's shape. - return node && node->IsStmt() && node->stmt()->op_type == "op2"; + return node && node->IsStmt() && node->stmt()->desc == "op2"; }); auto* o3 = x.pattern_.NewNode([](const Node* node) { // The teller can be any condition, such as op type, or variable's shape. - return node && node->IsStmt() && node->stmt()->op_type == "op3"; + return node && node->IsStmt() && node->stmt()->desc == "op3"; }); auto* v2 = x.pattern_.NewNode([](const Node* node) { // The teller can be any condition, such as op type, or variable's shape. @@ -153,8 +153,8 @@ TEST(PatternMatcher, MultiSubgraph) { // op -> var auto* any_op = x.mutable_pattern()->NewNode( [](const Node* node) { - return node->IsStmt() && (node->stmt()->op_type == "op2" || - node->stmt()->op_type == "op3"); + return node->IsStmt() && + (node->stmt()->desc == "op2" || node->stmt()->desc == "op3"); }, "OP0"); auto* any_var = @@ -170,9 +170,9 @@ TEST(PatternMatcher, MultiSubgraph) { int count = 0; PatternMatcher::handle_t handle = [&](const PatternMatcher::subgraph_t& s, SSAGraph* g) { - LOG(INFO) << "Detect " << s.at(any_op)->stmt()->op_type << " -> " + LOG(INFO) << "Detect " << s.at(any_op)->stmt()->desc << " -> " << s.at(any_var)->arg()->name << " -> " - << s.at(any_op1)->stmt()->op_type; + << s.at(any_op1)->stmt()->desc; count++; }; @@ -197,12 +197,12 @@ TEST(PatternMatcher, IntermediateCheck) { PatternMatcher matcher; auto* op2 = matcher.mutable_pattern()->NewNode( [](const Node* x) { - return x && x->IsStmt() && x->stmt()->op_type == "op2"; + return x && x->IsStmt() && x->stmt()->desc == "op2"; }, "op2"); auto* op3 = matcher.mutable_pattern()->NewNode( [](const Node* x) { - return x && x->IsStmt() && x->stmt()->op_type == "op3"; + return x && x->IsStmt() && x->stmt()->desc == "op3"; }, "op3"); auto* v2 = matcher.mutable_pattern() diff --git a/paddle/fluid/lite/core/mir/ssa_graph.cc b/paddle/fluid/lite/core/mir/ssa_graph.cc index b44cb0fa808962cde4a1d4c4cc0a640854c66851..7df9e2da42fc0fd3313a571b5e6429835e57695a 100644 --- a/paddle/fluid/lite/core/mir/ssa_graph.cc +++ b/paddle/fluid/lite/core/mir/ssa_graph.cc @@ -24,8 +24,10 @@ namespace lite { namespace mir { bool SSAGraph::CheckBidirectionalConnection() { - LOG(INFO) << "node count " << node_storage_.size(); + VLOG(4) << "node count " << node_storage_.size(); for (auto &node : node_storage_) { + if (node.IsStmt()) VLOG(4) << node.AsStmt().op_info()->Type(); + if (node.IsArg()) VLOG(4) << node.AsArg().name << " " << node.AsArg().id; for (auto *in : node.inlinks) { CHECK(in->outlinks.end() != std::find(in->outlinks.begin(), in->outlinks.end(), &node)); @@ -121,6 +123,7 @@ void SSAGraph::Build(const Program &program, std::unordered_map arg_update_node_map_; for (auto &op : program.ops()) { + VLOG(3) << op->op_info()->Type(); auto *op_node = GraphCreateInstructNode(op, valid_places); for (const std::string &name : op->op_info()->input_names()) { mir::Node *arg_node = nullptr; diff --git a/paddle/fluid/lite/core/mir/ssa_graph.h b/paddle/fluid/lite/core/mir/ssa_graph.h index 7c0e6cef498c5c555c1cee6ab334e6be556a9897..0a6f4022dd90f45013ae52f795ecd1f1591c0f7a 100644 --- a/paddle/fluid/lite/core/mir/ssa_graph.h +++ b/paddle/fluid/lite/core/mir/ssa_graph.h @@ -65,6 +65,10 @@ class SSAGraph : GraphBase { Node *GraphCreateInstructNode(const std::shared_ptr &op, const std::vector &valid_places); + // Device related attributes + const std::vector &valid_places() const { return valid_places_; } + void SetValidPlaces(const std::vector &x) { valid_places_ = x; } + private: mir::Node *Argument(const std::string &name); // Check the bidirectional connection. @@ -89,6 +93,7 @@ class SSAGraph : GraphBase { private: std::list node_storage_; std::map arguments_; + std::vector valid_places_; }; // Remove the link between a -> b. diff --git a/paddle/fluid/lite/core/mir/ssa_graph_test.cc b/paddle/fluid/lite/core/mir/ssa_graph_test.cc index 520fcf6e7502660aa4dcc3886f6a7af0b70abe58..f1a014e018368f55ad903053c68be93f16d2a8e9 100644 --- a/paddle/fluid/lite/core/mir/ssa_graph_test.cc +++ b/paddle/fluid/lite/core/mir/ssa_graph_test.cc @@ -17,7 +17,7 @@ #include #include "paddle/fluid/framework/program_desc.h" #include "paddle/fluid/lite/core/mir/graph_visualize_pass.h" -#include "paddle/fluid/lite/core/mir/passes.h" +#include "paddle/fluid/lite/core/mir/use_passes.h" #include "paddle/fluid/lite/core/op_registry.h" #include "paddle/fluid/lite/core/program_fake_utils.h" diff --git a/paddle/fluid/lite/core/mir/static_kernel_pick_pass.cc b/paddle/fluid/lite/core/mir/static_kernel_pick_pass.cc index 9d48c123a0c8e322f3ec6eb2b9788b9f115e9247..93ee96bbf0a0d1be8a4be7b4ce5f8c9e9b616498 100644 --- a/paddle/fluid/lite/core/mir/static_kernel_pick_pass.cc +++ b/paddle/fluid/lite/core/mir/static_kernel_pick_pass.cc @@ -37,9 +37,9 @@ void StaticKernelPickPass::Apply(const std::unique_ptr& graph) { if (!node.IsStmt()) continue; auto& instruct = node.AsStmt(); std::vector>> scored; - CHECK(!instruct.valid_kernels.empty()) << "No kernels found for " - << instruct.op_type; - for (auto&& kernel : instruct.valid_kernels) { + CHECK(!instruct.kernels().empty()) << "No kernels found for " + << instruct.op_type(); + for (auto&& kernel : instruct.kernels()) { size_t score = KernelGrade(*kernel); scored.emplace_back(score, std::move(kernel)); } @@ -49,9 +49,9 @@ void StaticKernelPickPass::Apply(const std::unique_ptr& graph) { // Move kernel back // Just keep a single best kernel. // TODO(Superjomn) reconsider this. - instruct.valid_kernels.clear(); - instruct.valid_kernels.emplace_back(std::move(scored.front().second)); - VLOG(2) << "pick " << instruct.valid_kernels.front()->name(); + instruct.kernels().clear(); + instruct.kernels().emplace_back(std::move(scored.front().second)); + VLOG(2) << "pick " << instruct.kernels().front()->name(); } } diff --git a/paddle/fluid/lite/core/mir/type_target_transform_pass.cc b/paddle/fluid/lite/core/mir/type_target_transform_pass.cc index 12dd2dcff0607bea46f41e7f5698ad2fb7e12404..951e3423e56f07d45fde38484769a5de5c67f2cc 100644 --- a/paddle/fluid/lite/core/mir/type_target_transform_pass.cc +++ b/paddle/fluid/lite/core/mir/type_target_transform_pass.cc @@ -62,7 +62,7 @@ void TypeTargetTransformPass::ComplementInputs(SSAGraph* graph, Node* inst_node, CHECK(in->AsArg().type); if (!TargetCompatibleTo(*in->AsArg().type, *decl_arg_type)) { LOG(INFO) << "found Target unmatched tensor: " << in->AsArg().name - << " for kernel " << inst.op->DebugString() << " " + << " for kernel " << inst.op()->DebugString() << " " << *in->AsArg().type << " -> " << *decl_arg_type; // Add an IoCopy instruction to make the input compatible with other dist. AddIoCopyInst(*in->AsArg().type, *decl_arg_type, in, graph, inst_node, @@ -89,7 +89,7 @@ void TypeTargetTransformPass::AddIoCopyInst( CHECK(io_copy_op) << "create op [" << io_copy_op << "] failed"; // CHECK(io_copy_op); // Create the new var manually. - inst_node->AsStmt().op->scope()->Var(io_copy_output_name); + inst_node->AsStmt().op()->scope()->Var(io_copy_output_name); // Create IoCopy Instruction. cpp::OpDesc op_desc; @@ -97,7 +97,7 @@ void TypeTargetTransformPass::AddIoCopyInst( op_desc.SetInput("Input", {in->AsArg().name}); op_desc.SetOutput("Out", {io_copy_output_name}); - io_copy_op->Attach(op_desc, inst_node->AsStmt().op->scope()); + io_copy_op->Attach(op_desc, inst_node->AsStmt().op()->scope()); auto kernels = io_copy_op->CreateKernels(valid_places); io_copy_inst->AsStmt("io_copy", std::move(kernels), io_copy_op); @@ -113,19 +113,19 @@ void TypeTargetTransformPass::AddIoCopyInst( DirectedLink(io_copy_output_arg, inst_node); // reset opdesc and update kernel information - UpdateInputTo(inst_node->AsStmt().op->mutable_op_info(), in->AsArg().name, + UpdateInputTo(inst_node->AsStmt().op()->mutable_op_info(), in->AsArg().name, io_copy_output_name); - inst_node->AsStmt().op->Attach(*inst_node->AsStmt().op->op_info(), - inst_node->AsStmt().op->scope()); + inst_node->AsStmt().ResetOp(*inst_node->AsStmt().op_info(), + graph->valid_places()); std::string tmp; if (inst_node->AsStmt().op_info()->GetInputArgname("a", &tmp)) { CHECK(false) << "get old a " << tmp; } - for (auto& kernel : inst_node->AsStmt().valid_kernels) { - inst_node->AsStmt().op->AttachKernel(kernel.get()); + for (auto& kernel : inst_node->AsStmt().kernels()) { + inst_node->AsStmt().op()->AttachKernel(kernel.get()); } graph->CheckValid(); diff --git a/paddle/fluid/lite/core/mir/passes.h b/paddle/fluid/lite/core/mir/use_passes.h similarity index 91% rename from paddle/fluid/lite/core/mir/passes.h rename to paddle/fluid/lite/core/mir/use_passes.h index cea15e12f64e94f4b4acfdd91e73a2abf4f05dee..5203ad3f141b4580aab8eaea4170d19831049e07 100644 --- a/paddle/fluid/lite/core/mir/passes.h +++ b/paddle/fluid/lite/core/mir/use_passes.h @@ -15,12 +15,6 @@ #pragma once #include "paddle/fluid/lite/core/mir/pass_registry.h" -namespace paddle { -namespace lite { -namespace mir {} // namespace mir -} // namespace lite -} // namespace paddle - #ifndef LITE_WITH_LIGHT_WEIGHT_FRAMEWORK USE_MIR_PASS(demo); USE_MIR_PASS(static_kernel_pick_pass); @@ -30,9 +24,11 @@ USE_MIR_PASS(generate_program_pass); USE_MIR_PASS(io_copy_kernel_pick_pass); USE_MIR_PASS(argument_type_display_pass); #endif + USE_MIR_PASS(runtime_context_assign_pass); USE_MIR_PASS(lite_conv_bn_fuse_pass); USE_MIR_PASS(graph_visualze); USE_MIR_PASS(lite_fc_fuse_pass); +USE_MIR_PASS(identity_scale_eliminate_pass); USE_MIR_PASS(lite_conv_elementwise_add_activation_fuse_pass); USE_MIR_PASS(lite_elementwise_add_activation_fuse_pass); diff --git a/paddle/fluid/lite/core/mir/variable_place_inference_pass.h b/paddle/fluid/lite/core/mir/variable_place_inference_pass.h index 2128c6d2014bf8879743ebf7190b3a95a3bc4186..0a5b3c341ab7e6661903bb189bf4ee8452ccec32 100644 --- a/paddle/fluid/lite/core/mir/variable_place_inference_pass.h +++ b/paddle/fluid/lite/core/mir/variable_place_inference_pass.h @@ -39,7 +39,7 @@ class VariablePlaceInferencePass : public DebugPass { for (const auto& v : graph->inputs()) { // the feed op might in the inputs if (v->IsStmt()) { - LOG(INFO) << "found kernel in inputs " << v->AsStmt().op_type; + LOG(INFO) << "found kernel in inputs " << v->AsStmt().op_type(); continue; } } @@ -59,10 +59,10 @@ class VariablePlaceInferencePass : public DebugPass { for (auto& x : graph->StmtTopologicalOrder()) { auto& inst = x->AsStmt(); // The IoCopyOp is a tool operator, it won't support the type inference. - if (inst.op_type == "io_copy") continue; + if (inst.op_type() == "io_copy") continue; // LOG(INFO) << "- inferencing type " << // deal with inputs - VLOG(4) << "inferencing op " << inst.op_type; + VLOG(4) << "Infering op " << inst.op_info()->Repr(); // TODO(zhaolong): Add check if the node's name in op's arguments. auto get_argname = [&]( @@ -90,12 +90,14 @@ class VariablePlaceInferencePass : public DebugPass { } } + VLOG(3) << "inst " << inst.op_info()->Repr(); for (auto* x_out : x->outlinks) { std::string node_name = x_out->AsArg().name; std::string arg_name = get_argname(node_name, inst.op_info()->outputs()); CHECK(arg_name.size() > 0) << "can not found op arguments for node " - << node_name; + << node_name << " in Inst " + << inst.op_type(); VLOG(3) << "-- output arg_name " << arg_name; auto type = inst.picked_kernel().GetOutputDeclType(arg_name); if (!x_out->AsArg().type) { diff --git a/paddle/fluid/lite/core/mir/variable_place_inference_pass_test.cc b/paddle/fluid/lite/core/mir/variable_place_inference_pass_test.cc index d6b8561c378cb2c18c159d6432cb09ac0a08ca0c..60fb873670029160c5895372f07b38834b0c9cb5 100644 --- a/paddle/fluid/lite/core/mir/variable_place_inference_pass_test.cc +++ b/paddle/fluid/lite/core/mir/variable_place_inference_pass_test.cc @@ -13,7 +13,7 @@ // limitations under the License. #include -#include "paddle/fluid/lite/core/mir/passes.h" +#include "paddle/fluid/lite/core/mir/use_passes.h" #include "paddle/fluid/lite/core/optimizer.h" #include "paddle/fluid/lite/core/program_fake_utils.h" #include "paddle/fluid/lite/kernels/cuda/use_kernels.h" diff --git a/paddle/fluid/lite/core/op_lite.cc b/paddle/fluid/lite/core/op_lite.cc index 484d22abf52dda9832b524146114e2b2e093bb99..31c339a5e63a2c49134d43b8357bae519bf3a29f 100644 --- a/paddle/fluid/lite/core/op_lite.cc +++ b/paddle/fluid/lite/core/op_lite.cc @@ -61,7 +61,6 @@ std::vector> OpLite::CreateKernels( targets.insert(place.target); } - // CHECK(!kernels.empty()) << "No kernel found for Op " << op_type_; VLOG(2) << "op " << op_type_ << " get " << kernels.size() << " kernels"; return kernels; } @@ -83,7 +82,7 @@ bool OpLite::Attach(const cpp::OpDesc &opdesc, lite::Scope *scope) { scope_ = scope; op_info_.reset( new OpInfo(opdesc)); // Force clean the out-of-date infomation. - return AttachImpl(opdesc, scope); + return AttachImpl(*op_info(), scope); } const Tensor *OpLite::GetTensor(lite::Scope *scope, diff --git a/paddle/fluid/lite/core/op_lite.h b/paddle/fluid/lite/core/op_lite.h index 41aa3bb0f6da1d3002ef3d8d6274244c19687fdb..cd7d9ef84494f2d07859d7008187119ff75eefb1 100644 --- a/paddle/fluid/lite/core/op_lite.h +++ b/paddle/fluid/lite/core/op_lite.h @@ -54,9 +54,7 @@ class OpLite : public Registry { OpLite() = default; explicit OpLite(const std::string &type) : op_type_(type) {} explicit OpLite(const std::vector &valid_places) - : valid_places_(valid_places) { - LOG(INFO) << "valid places " << valid_places.size(); - } + : valid_places_(valid_places) {} void SetValidPlaces(const std::vector &places) { VLOG(3) << "valid places " << valid_places_.size(); @@ -199,6 +197,22 @@ class OpInfo : public cpp::OpDesc { } return false; } + + void UpdateAllInputs(const std::string &from, const std::string &to) { + for (auto &item : inputs_) { + for (auto &var : item.second) { + if (var == from) var = to; + } + } + } + + void UpdateAllOutputs(const std::string &from, const std::string &to) { + for (auto &item : outputs_) { + for (auto &var : item.second) { + if (var == from) var = to; + } + } + } }; } // namespace lite diff --git a/paddle/fluid/lite/core/optimizer.h b/paddle/fluid/lite/core/optimizer.h index c2c1121f53e100ffc747579d6ad826459b47c169..b936a139cbcede98cdf79ca744abab04f87d93f4 100644 --- a/paddle/fluid/lite/core/optimizer.h +++ b/paddle/fluid/lite/core/optimizer.h @@ -43,6 +43,8 @@ class Optimizer { CHECK(!graph_) << "duplicate optimize found"; graph_.reset(new mir::SSAGraph); graph_->Build(program, valid_places); + graph_->SetValidPlaces(valid_places); + SpecifyKernelPickTactic(kernel_pick_factor); InitTargetTypeTransformPass(); @@ -51,6 +53,7 @@ class Optimizer { "lite_conv_bn_fuse_pass", // "lite_conv_elementwise_add_activation_fuse_pass", // "lite_fc_fuse_pass", // + "identity_scale_eliminate_pass", // #ifdef LITE_WITH_LIGHT_WEIGHT_FRAMEWORK "lite_elementwise_add_activation_fuse_pass", // #endif diff --git a/paddle/fluid/lite/core/optimizer_test.cc b/paddle/fluid/lite/core/optimizer_test.cc index ae543dc1b19768a9147af1c3114b46c546318eb2..4d66f769811737d568f7942779744af751cca2af 100644 --- a/paddle/fluid/lite/core/optimizer_test.cc +++ b/paddle/fluid/lite/core/optimizer_test.cc @@ -18,8 +18,8 @@ #include #include "paddle/fluid/lite/core/mir/generate_program_pass.h" #include "paddle/fluid/lite/core/mir/pass_manager.h" -#include "paddle/fluid/lite/core/mir/passes.h" #include "paddle/fluid/lite/core/mir/static_kernel_pick_pass.h" +#include "paddle/fluid/lite/core/mir/use_passes.h" #include "paddle/fluid/lite/core/program_fake_utils.h" namespace paddle { diff --git a/paddle/fluid/lite/core/profile/basic_profiler.cc b/paddle/fluid/lite/core/profile/basic_profiler.cc index 86d5cd39ea99a3b1433a0eadc4ffc06b00a221c7..75b1a48d3adea9be3e9f15da2b0f1001dd3c414f 100644 --- a/paddle/fluid/lite/core/profile/basic_profiler.cc +++ b/paddle/fluid/lite/core/profile/basic_profiler.cc @@ -19,7 +19,7 @@ namespace lite { namespace profile { const int BasicTimer::data_w = 10; -const int BasicTimer::name_w = 10; +const int BasicTimer::name_w = 15; } // namespace profile } // namespace lite diff --git a/paddle/fluid/lite/core/program.h b/paddle/fluid/lite/core/program.h index 2f3e078462a7a5ff61217aa6c10b6e3973a29143..46da1815f197a2107a2ab3c3d844f1c4d87b44f2 100644 --- a/paddle/fluid/lite/core/program.h +++ b/paddle/fluid/lite/core/program.h @@ -140,7 +140,7 @@ class RuntimeProgram { void Run() { for (auto& inst : instructions_) { - VLOG(4) << ">> Running kernel: " << inst; + VLOG(3) << ">> Running kernel: " << inst.op()->op_info()->Repr(); inst.Run(); } } diff --git a/paddle/fluid/lite/core/tensor.h b/paddle/fluid/lite/core/tensor.h index 27677e23a27366d052001a6828f12d1cfcc5decb..2c001c84e4c98f68ebc90729ba8bfbc4acdde6d3 100644 --- a/paddle/fluid/lite/core/tensor.h +++ b/paddle/fluid/lite/core/tensor.h @@ -91,6 +91,18 @@ class DDimBase { return os; } + friend bool operator==(const DDimBase &a, const DDimBase &b) { + if (a.size() != b.size()) return false; + for (size_t i = 0; i < a.size(); i++) { + if (a[i] != b[i]) return false; + } + return true; + } + + friend bool operator!=(const DDimBase &a, const DDimBase &b) { + return !(a == b); + } + private: DDimT *self() { return static_cast(this); } const DDimT *const_self() const { return static_cast(this); } @@ -154,6 +166,7 @@ class TensorBase { const void *raw_data() const { return const_self()->data(); } size_t data_size() const { return const_self()->dims().production(); } + size_t memory_size() const { return const_self()->memory_size(); } void ShareDataWith(const TensorBase &other) { self()->ShareDataWith(other); } void CopyDataFrom(const TensorBase &other) { self()->CopyDataFrom(other); } @@ -175,5 +188,12 @@ class TensorBase { } }; +template +bool TensorCompareWith(const TensorT &a, const TensorT &b) { + if (a.dims() != b.dims()) return false; + if (memcmp(a.raw_data(), b.raw_data(), a.data_size()) != 0) return false; + return true; +} + } // namespace lite } // namespace paddle diff --git a/paddle/fluid/lite/kernels/arm/CMakeLists.txt b/paddle/fluid/lite/kernels/arm/CMakeLists.txt index 040b80c113162e0a87325c0bf353522677dbc9c8..95c8b95ec16aef37c6642df98c2b011b1d3a15a8 100644 --- a/paddle/fluid/lite/kernels/arm/CMakeLists.txt +++ b/paddle/fluid/lite/kernels/arm/CMakeLists.txt @@ -11,7 +11,7 @@ cc_library(scale_compute_arm SRCS scale_compute.cc DEPS ${lite_kernel_deps} math cc_library(softmax_compute_arm SRCS softmax_compute.cc DEPS ${lite_kernel_deps} math_arm) cc_library(conv_compute_arm SRCS conv_compute.cc DEPS ${lite_kernel_deps} math_arm) cc_library(batch_norm_compute_arm SRCS batch_norm_compute.cc DEPS ${lite_kernel_deps} math_arm) -cc_library(elementwise_add_compute_arm SRCS elementwise_add_compute.cc DEPS ${lite_kernel_deps} math_arm) +cc_library(elementwise_compute_arm SRCS elementwise_compute.cc DEPS ${lite_kernel_deps} math_arm) cc_library(pool_compute_arm SRCS pool_compute.cc DEPS ${lite_kernel_deps} math_arm) cc_library(split_compute_arm SRCS split_compute.cc DEPS ${lite_kernel_deps} math_arm) cc_library(concat_compute_arm SRCS concat_compute.cc DEPS ${lite_kernel_deps} math_arm) @@ -24,7 +24,7 @@ lite_cc_test(test_scale_compute_arm SRCS scale_compute_test.cc DEPS scale_comput lite_cc_test(test_softmax_compute_arm SRCS softmax_compute_test.cc DEPS softmax_compute_arm) lite_cc_test(test_conv_compute_arm SRCS conv_compute_test.cc DEPS conv_compute_arm) lite_cc_test(test_batch_norm_compute_arm SRCS batch_norm_compute_test.cc DEPS batch_norm_compute_arm) -lite_cc_test(test_elementwise_add_compute_arm SRCS elementwise_add_compute_test.cc DEPS elementwise_add_compute_arm) +lite_cc_test(test_elementwise_compute_arm SRCS elementwise_compute_test.cc DEPS elementwise_compute_arm) lite_cc_test(test_pool_compute_arm SRCS pool_compute_test.cc DEPS pool_compute_arm) lite_cc_test(test_mul_compute_arm SRCS mul_compute_test.cc DEPS mul_compute_arm) lite_cc_test(test_split_compute_arm SRCS split_compute_test.cc DEPS split_compute_arm) @@ -40,7 +40,7 @@ set(arm_kernels softmax_compute_arm conv_compute_arm batch_norm_compute_arm - elementwise_add_compute_arm + elementwise_compute_arm pool_compute_arm split_compute_arm concat_compute_arm diff --git a/paddle/fluid/lite/kernels/arm/elementwise_add_compute.cc b/paddle/fluid/lite/kernels/arm/elementwise_add_compute.cc deleted file mode 100644 index e9d9f4927b7ee18b3e18efa69a00dcb1c813bf3b..0000000000000000000000000000000000000000 --- a/paddle/fluid/lite/kernels/arm/elementwise_add_compute.cc +++ /dev/null @@ -1,65 +0,0 @@ -// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "paddle/fluid/lite/kernels/arm/elementwise_add_compute.h" -#include "paddle/fluid/lite/arm/math/funcs.h" - -namespace paddle { -namespace lite { -namespace kernels { -namespace arm { - -void ElementwiseAddCompute::Run() { - auto& param = Param(); - const float* x_data = param.X->data(); - const float* y_data = param.Y->data(); - float* out_data = param.Out->mutable_data(); - int axis = param.axis; - auto x_dims = param.X->dims(); - auto y_dims = param.Y->dims(); - if (axis < 0) { - axis = x_dims.size() - y_dims.size(); - } - if (x_dims.size() == y_dims.size()) { - lite::arm::math::elementwise_add(x_data, y_data, out_data, - x_dims.production()); - } else { - int batch = 1; - int channels = 1; - int num = 1; - for (int i = 0; i < axis; ++i) { - batch *= x_dims[i]; - } - for (int i = 0; i < y_dims.size(); ++i) { - channels *= y_dims[i]; - } - for (int i = y_dims.size() + axis; i < x_dims.size(); ++i) { - num *= x_dims[i]; - } - lite::arm::math::elementwise_add_axis(x_data, y_data, out_data, batch, - channels, num); - } -} - -} // namespace arm -} // namespace kernels -} // namespace lite -} // namespace paddle - -REGISTER_LITE_KERNEL(elementwise_add, kARM, kFloat, kNCHW, - paddle::lite::kernels::arm::ElementwiseAddCompute, def) - .BindInput("X", {LiteType::GetTensorTy(TARGET(kARM))}) - .BindInput("Y", {LiteType::GetTensorTy(TARGET(kARM))}) - .BindOutput("Out", {LiteType::GetTensorTy(TARGET(kARM))}) - .Finalize(); diff --git a/paddle/fluid/lite/kernels/arm/elementwise_add_compute_test.cc b/paddle/fluid/lite/kernels/arm/elementwise_add_compute_test.cc deleted file mode 100644 index 20b998dc6cfa8a9606fcf0f716470366fdd60338..0000000000000000000000000000000000000000 --- a/paddle/fluid/lite/kernels/arm/elementwise_add_compute_test.cc +++ /dev/null @@ -1,143 +0,0 @@ -// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "paddle/fluid/lite/kernels/arm/elementwise_add_compute.h" -#include -#include -#include "paddle/fluid/lite/core/op_registry.h" - -namespace paddle { -namespace lite { -namespace kernels { -namespace arm { - -TEST(elementwise_add_arm, retrive_op) { - auto elementwise_add = - KernelRegistry::Global().Create( - "elementwise_add"); - ASSERT_FALSE(elementwise_add.empty()); - ASSERT_TRUE(elementwise_add.front()); -} - -TEST(elementwise_add_arm, init) { - ElementwiseAddCompute elementwise_add; - ASSERT_EQ(elementwise_add.precision(), PRECISION(kFloat)); - ASSERT_EQ(elementwise_add.target(), TARGET(kARM)); -} - -template -void elementwise_add_compute_ref(const operators::ElementwiseParam& param) { - const dtype* x_data = param.X->data(); - const dtype* y_data = param.Y->data(); - dtype* out_data = param.Out->mutable_data(); - auto x_dims = param.X->dims(); - auto y_dims = param.Y->dims(); - int axis = param.axis; - if (axis < 0) { - axis = x_dims.size() - y_dims.size(); - } - int batch = 1; - int channels = 1; - int num = 1; - for (int i = 0; i < axis; ++i) { - batch *= x_dims[i]; - } - for (int i = 0; i < y_dims.size(); ++i) { - channels *= y_dims[i]; - } - for (int i = y_dims.size() + axis; i < x_dims.size(); ++i) { - num *= x_dims[i]; - } - for (int i = 0; i < batch; ++i) { - for (int j = 0; j < channels; ++j) { - int offset = (i * channels + j) * num; - const dtype* din_ptr = x_data + offset; - const dtype diny_data = y_data[j]; - dtype* dout_ptr = out_data + offset; - for (int k = 0; k < num; ++k) { - *dout_ptr = *din_ptr + diny_data; - dout_ptr++; - din_ptr++; - } - } - } -} - -TEST(elementwise_add, compute) { - ElementwiseAddCompute elementwise_add; - operators::ElementwiseParam param; - lite::Tensor x, y, output, output_ref; - - for (auto n : {1, 3, 4, 11}) { - for (auto c : {1, 3, 4, 11}) { - for (auto h : {1, 3, 4, 11}) { - for (auto w : {1, 3, 4, 11}) { - for (auto axis : {-1, 0, 1, 2, 3}) { - for (auto yd : - {std::vector({n}), std::vector({c}), - std::vector({h}), std::vector({w}), - std::vector({n, c}), std::vector({c, h}), - std::vector({h, w}), std::vector({n, c, h}), - std::vector({c, h, w}), - std::vector({n, c, h, w})}) { - auto x_dim = DDim(std::vector({n, c, h, w})); - auto y_dim = DDim(yd); - int axis_t = axis < 0 ? x_dim.size() - y_dim.size() : axis; - - if (axis_t + y_dim.size() > 4) continue; - bool flag = false; - for (int i = 0; i < y_dim.size(); i++) { - if (x_dim[i + axis_t] != y_dim[i]) flag = true; - } - if (flag) continue; - - x.Resize(x_dim); - y.Resize(y_dim); - output.Resize(x_dim); - output_ref.Resize(x_dim); - auto* x_data = x.mutable_data(); - auto* y_data = y.mutable_data(); - auto* output_data = output.mutable_data(); - auto* output_ref_data = output_ref.mutable_data(); - for (int i = 0; i < x_dim.production(); i++) { - x_data[i] = i; - } - for (int i = 0; i < y_dim.production(); i++) { - y_data[i] = i; - } - param.X = &x; - param.Y = &y; - param.axis = axis; - param.Out = &output; - elementwise_add.SetParam(param); - elementwise_add.Run(); - param.Out = &output_ref; - elementwise_add_compute_ref(param); - for (int i = 0; i < output.dims().production(); i++) { - EXPECT_NEAR(output_data[i], output_ref_data[i], 1e-5); - } - } - } - } - } - } - } -} - -} // namespace arm -} // namespace kernels -} // namespace lite -} // namespace paddle - -USE_LITE_KERNEL(elementwise_add, kARM, kFloat, kNCHW, def); diff --git a/paddle/fluid/lite/kernels/arm/elementwise_compute.cc b/paddle/fluid/lite/kernels/arm/elementwise_compute.cc new file mode 100644 index 0000000000000000000000000000000000000000..c3b9b41cde1e70ecef580f72cfbb6c558258631d --- /dev/null +++ b/paddle/fluid/lite/kernels/arm/elementwise_compute.cc @@ -0,0 +1,111 @@ +// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/lite/kernels/arm/elementwise_compute.h" +#include +#include "paddle/fluid/lite/arm/math/funcs.h" + +namespace paddle { +namespace lite { +namespace kernels { +namespace arm { + +inline bool is_broadcast(const DDim& x_dims, const DDim& y_dims, int axis, + int* pre, int* n, int* post) { + if (axis < 0) { + axis = x_dims.size() - y_dims.size(); + } + if (x_dims.size() == y_dims.size()) { + return false; + } + *pre = 1; + *n = 1; + *post = 1; + for (int i = 0; i < axis; ++i) { + (*pre) *= x_dims[i]; + } + for (int i = 0; i < y_dims.size(); ++i) { + CHECK_EQ(x_dims[i + axis], y_dims[i]) << "Broadcast dimension mismatch."; + (*n) *= y_dims[i]; + } + for (int i = axis + y_dims.size(); i < x_dims.size(); ++i) { + (*post) *= x_dims[i]; + } + return true; +} + +void ElementwiseAddCompute::Run() { + auto& param = Param(); + const float* x_data = param.X->data(); + const float* y_data = param.Y->data(); + float* out_data = param.Out->mutable_data(); + int axis = param.axis; + auto x_dims = param.X->dims(); + auto y_dims = param.Y->dims(); + int pre, n, post; + if (is_broadcast(x_dims, y_dims, axis, &pre, &n, &post)) { + lite::arm::math::elementwise_add_broadcast(x_data, y_data, out_data, pre, n, + post); + } else { + lite::arm::math::elementwise_add(x_data, y_data, out_data, + x_dims.production()); + } +} + +void ElementwiseAddActivationCompute::Run() { + auto& param = Param(); + const float* x_data = param.X->data(); + const float* y_data = param.Y->data(); + float* out_data = param.Out->mutable_data(); + int axis = param.axis; + std::string act_type = param.act_type; + auto x_dims = param.X->dims(); + auto y_dims = param.Y->dims(); + int pre, n, post; + if (is_broadcast(x_dims, y_dims, axis, &pre, &n, &post)) { + if (act_type == "relu") { + lite::arm::math::elementwise_add_relu_broadcast(x_data, y_data, out_data, + pre, n, post); + } else { + LOG(FATAL) << "unsupported Activation type: " << act_type; + } + } else { + if (act_type == "relu") { + lite::arm::math::elementwise_add_relu(x_data, y_data, out_data, + x_dims.production()); + } else { + LOG(FATAL) << "unsupported Activation type: " << act_type; + } + } +} + +} // namespace arm +} // namespace kernels +} // namespace lite +} // namespace paddle + +REGISTER_LITE_KERNEL(elementwise_add, kARM, kFloat, kNCHW, + paddle::lite::kernels::arm::ElementwiseAddCompute, def) + .BindInput("X", {LiteType::GetTensorTy(TARGET(kARM))}) + .BindInput("Y", {LiteType::GetTensorTy(TARGET(kARM))}) + .BindOutput("Out", {LiteType::GetTensorTy(TARGET(kARM))}) + .Finalize(); + +REGISTER_LITE_KERNEL( + fusion_elementwise_add_activation, kARM, kFloat, kNCHW, + paddle::lite::kernels::arm::ElementwiseAddActivationCompute, def) + .BindInput("X", {LiteType::GetTensorTy(TARGET(kARM))}) + .BindInput("Y", {LiteType::GetTensorTy(TARGET(kARM))}) + .BindOutput("Out", {LiteType::GetTensorTy(TARGET(kARM))}) + .Finalize(); diff --git a/paddle/fluid/lite/kernels/arm/elementwise_add_compute.h b/paddle/fluid/lite/kernels/arm/elementwise_compute.h similarity index 85% rename from paddle/fluid/lite/kernels/arm/elementwise_add_compute.h rename to paddle/fluid/lite/kernels/arm/elementwise_compute.h index 9939509d0be25eadccdb563e802c98291dea751b..bb80c61221eea2acaad397895d3fbad880e9dce3 100644 --- a/paddle/fluid/lite/kernels/arm/elementwise_add_compute.h +++ b/paddle/fluid/lite/kernels/arm/elementwise_compute.h @@ -30,6 +30,14 @@ class ElementwiseAddCompute virtual ~ElementwiseAddCompute() = default; }; +class ElementwiseAddActivationCompute + : public KernelLite { + public: + void Run() override; + + virtual ~ElementwiseAddActivationCompute() = default; +}; + } // namespace arm } // namespace kernels } // namespace lite diff --git a/paddle/fluid/lite/kernels/arm/elementwise_compute_test.cc b/paddle/fluid/lite/kernels/arm/elementwise_compute_test.cc new file mode 100644 index 0000000000000000000000000000000000000000..9f87e6628bc2ae6d91af4638119954393f2d98cf --- /dev/null +++ b/paddle/fluid/lite/kernels/arm/elementwise_compute_test.cc @@ -0,0 +1,292 @@ +// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/lite/kernels/arm/elementwise_compute.h" +#include +#include +#include +#include "paddle/fluid/lite/core/op_registry.h" + +namespace paddle { +namespace lite { +namespace kernels { +namespace arm { + +TEST(elementwise_add_arm, retrive_op) { + auto elementwise_add = + KernelRegistry::Global().Create( + "elementwise_add"); + ASSERT_FALSE(elementwise_add.empty()); + ASSERT_TRUE(elementwise_add.front()); +} + +TEST(elementwise_add_arm, init) { + ElementwiseAddCompute elementwise_add; + ASSERT_EQ(elementwise_add.precision(), PRECISION(kFloat)); + ASSERT_EQ(elementwise_add.target(), TARGET(kARM)); +} + +template +void elementwise_compute_ref(const operators::ElementwiseParam& param, + const std::string elt_type, + const std::string act_type) { + const dtype* x_data = param.X->data(); + const dtype* y_data = param.Y->data(); + dtype* out_data = param.Out->mutable_data(); + auto x_dims = param.X->dims(); + auto y_dims = param.Y->dims(); + int axis = param.axis; + if (axis < 0) { + axis = x_dims.size() - y_dims.size(); + } + int batch = 1; + int channels = 1; + int num = 1; + for (int i = 0; i < axis; ++i) { + batch *= x_dims[i]; + } + for (int i = 0; i < y_dims.size(); ++i) { + channels *= y_dims[i]; + } + for (int i = y_dims.size() + axis; i < x_dims.size(); ++i) { + num *= x_dims[i]; + } + // do elementwise add/sub/max... + if (elt_type == "add") { + for (int i = 0; i < batch; ++i) { + for (int j = 0; j < channels; ++j) { + int offset = (i * channels + j) * num; + const dtype* din_ptr = x_data + offset; + const dtype diny_data = y_data[j]; + dtype* dout_ptr = out_data + offset; + for (int k = 0; k < num; ++k) { + *dout_ptr = *din_ptr + diny_data; + dout_ptr++; + din_ptr++; + } + } + } + } else if (elt_type == "sub") { + for (int i = 0; i < batch; ++i) { + for (int j = 0; j < channels; ++j) { + int offset = (i * channels + j) * num; + const dtype* din_ptr = x_data + offset; + const dtype diny_data = y_data[j]; + dtype* dout_ptr = out_data + offset; + for (int k = 0; k < num; ++k) { + *dout_ptr = *din_ptr - diny_data; + dout_ptr++; + din_ptr++; + } + } + } + } else { + LOG(FATAL) << "unsupported Elementwise type: " << elt_type; + } + // do activation relu/sigmod... + if (act_type.size() > 0) { + if (act_type == "relu") { + for (int i = 0; i < batch; ++i) { + for (int j = 0; j < channels; ++j) { + dtype* dout_ptr = out_data + (i * channels + j) * num; + for (int k = 0; k < num; ++k) { + *dout_ptr = *dout_ptr > 0.0f ? *dout_ptr : 0.0f; + dout_ptr++; + } + } + } + } else { + LOG(FATAL) << "unsupported Activation type: " << elt_type; + } + } +} + +TEST(elementwise_add, compute) { + ElementwiseAddCompute elementwise_add; + operators::ElementwiseParam param; + lite::Tensor x, y, output, output_ref; + +#if 1 + for (auto n : {1, 3, 4}) { + for (auto c : {1, 3, 4}) { + for (auto h : {1, 3, 4}) { + for (auto w : {1, 3, 4}) { + for (auto axis : {-1, 0, 1, 3}) { + for (auto yd : + {std::vector({n}), std::vector({c}), + std::vector({h}), std::vector({w}), + std::vector({n, c}), std::vector({c, h}), + std::vector({c, h, w}), + std::vector({n, c, h, w})}) { +#else + for (auto n : {1, 3, 4, 11}) { + for (auto c : {1, 3, 4, 11}) { + for (auto h : {1, 3, 4, 11}) { + for (auto w : {1, 3, 4, 11}) { + for (auto axis : {-1, 0, 1, 2, 3}) { + for (auto yd : + {std::vector({n}), std::vector({c}), + std::vector({h}), std::vector({w}), + std::vector({n, c}), std::vector({c, h}), + std::vector({h, w}), std::vector({n, c, h}), + std::vector({c, h, w}), + std::vector({n, c, h, w})}) { +#endif + auto x_dim = DDim(std::vector({n, c, h, w})); + auto y_dim = DDim(yd); + int axis_t = axis < 0 ? x_dim.size() - y_dim.size() : axis; + + if (axis_t + y_dim.size() > 4) continue; + bool flag = false; + for (int i = 0; i < y_dim.size(); i++) { + if (x_dim[i + axis_t] != y_dim[i]) flag = true; + } + if (flag) continue; + + x.Resize(x_dim); + y.Resize(y_dim); + output.Resize(x_dim); + output_ref.Resize(x_dim); + auto* x_data = x.mutable_data(); + auto* y_data = y.mutable_data(); + auto* output_data = output.mutable_data(); + auto* output_ref_data = output_ref.mutable_data(); + for (int i = 0; i < x_dim.production(); i++) { + x_data[i] = i; + } + for (int i = 0; i < y_dim.production(); i++) { + y_data[i] = i; + } + param.X = &x; + param.Y = &y; + param.axis = axis; + param.Out = &output; + elementwise_add.SetParam(param); + elementwise_add.Run(); + param.Out = &output_ref; + elementwise_compute_ref(param, "add", ""); + for (int i = 0; i < output.dims().production(); i++) { + EXPECT_NEAR(output_data[i], output_ref_data[i], 1e-5); + } + } + } + } + } + } + } +} + +TEST(fusion_elementwise_add_activation_arm, retrive_op) { + auto fusion_elementwise_add_activation = + KernelRegistry::Global().Create( + "fusion_elementwise_add_activation"); + ASSERT_FALSE(fusion_elementwise_add_activation.empty()); + ASSERT_TRUE(fusion_elementwise_add_activation.front()); +} + +TEST(fusion_elementwise_add_activation_arm, init) { + ElementwiseAddActivationCompute fusion_elementwise_add_activation; + ASSERT_EQ(fusion_elementwise_add_activation.precision(), PRECISION(kFloat)); + ASSERT_EQ(fusion_elementwise_add_activation.target(), TARGET(kARM)); +} + +TEST(fusion_elementwise_add_activation_arm, compute) { + ElementwiseAddActivationCompute fusion_elementwise_add_activation; + operators::FusionElementwiseActivationParam param; + lite::Tensor x, y, output, output_ref; + +#if 1 + for (auto act_type : {"relu"}) { + for (auto n : {1, 3, 4}) { + for (auto c : {1, 3, 4}) { + for (auto h : {1, 3, 4}) { + for (auto w : {1, 3, 4}) { + for (auto axis : {-1, 0, 1, 3}) { + for (auto yd : + {std::vector({n}), std::vector({c}), + std::vector({h}), std::vector({w}), + std::vector({n, c}), std::vector({h, w}), + std::vector({n, c, h}), + std::vector({n, c, h, w})}) { +#else + for (auto act_type : {"relu"}) { + for (auto n : {1, 3, 4, 11}) { + for (auto c : {1, 3, 4, 11}) { + for (auto h : {1, 3, 4, 11}) { + for (auto w : {1, 3, 4, 11}) { + for (auto axis : {-1, 0, 1, 2, 3}) { + for (auto yd : + {std::vector({n}), std::vector({c}), + std::vector({h}), std::vector({w}), + std::vector({n, c}), std::vector({c, h}), + std::vector({h, w}), + std::vector({n, c, h}), + std::vector({c, h, w}), + std::vector({n, c, h, w})}) { +#endif + auto x_dim = DDim(std::vector({n, c, h, w})); + auto y_dim = DDim(yd); + int axis_t = axis < 0 ? x_dim.size() - y_dim.size() : axis; + + if (axis_t + y_dim.size() > 4) continue; + bool flag = false; + for (int i = 0; i < y_dim.size(); i++) { + if (x_dim[i + axis_t] != y_dim[i]) flag = true; + } + if (flag) continue; + + x.Resize(x_dim); + y.Resize(y_dim); + output.Resize(x_dim); + output_ref.Resize(x_dim); + auto* x_data = x.mutable_data(); + auto* y_data = y.mutable_data(); + auto* output_data = output.mutable_data(); + auto* output_ref_data = output_ref.mutable_data(); + for (int i = 0; i < x_dim.production(); i++) { + float sign = i % 3 == 0 ? -1.0f : 1.0f; + x_data[i] = i * sign; + } + for (int i = 0; i < y_dim.production(); i++) { + float sign = i % 2 == 0 ? 0.5f : -0.5f; + y_data[i] = i * sign; + } + param.X = &x; + param.Y = &y; + param.axis = axis; + param.Out = &output; + param.act_type = act_type; + fusion_elementwise_add_activation.SetParam(param); + fusion_elementwise_add_activation.Run(); + param.Out = &output_ref; + elementwise_compute_ref(param, "add", act_type); + for (int i = 0; i < output.dims().production(); i++) { + EXPECT_NEAR(output_data[i], output_ref_data[i], 1e-5); + } + } + } + } + } + } + } + } +} + +} // namespace arm +} // namespace kernels +} // namespace lite +} // namespace paddle + +USE_LITE_KERNEL(elementwise_add, kARM, kFloat, kNCHW, def); +USE_LITE_KERNEL(fusion_elementwise_add_activation, kARM, kFloat, kNCHW, def); diff --git a/paddle/fluid/lite/kernels/arm/softmax_compute_test.cc b/paddle/fluid/lite/kernels/arm/softmax_compute_test.cc index 80a64f4eaf74288d0fff6431ad1707afcf1b9eb2..a984a5b3ade2f79e373bd8c571a658c04a8a61cd 100644 --- a/paddle/fluid/lite/kernels/arm/softmax_compute_test.cc +++ b/paddle/fluid/lite/kernels/arm/softmax_compute_test.cc @@ -80,12 +80,19 @@ TEST(softmax_arm, compute) { lite::Tensor x; lite::Tensor output; lite::Tensor output_ref; - +#if 1 + for (auto n : {1, 3}) { + for (auto c : {1, 4}) { + for (auto h : {5, 1}) { + for (auto w : {1, 6}) { + for (auto axis : {-2, -1, 0, 1, 2}) { +#else for (auto n : {1, 3, 4, 11}) { for (auto c : {1, 3, 11, 4}) { for (auto h : {3, 1, 11, 4}) { for (auto w : {1, 3, 4, 12}) { for (auto axis : {-4, -3, -2, -1, 0, 1, 2, 3}) { +#endif x.Resize(DDim(std::vector({n, c, h, w}))); output.Resize(DDim(std::vector({n, c, h, w}))); output_ref.Resize(DDim(std::vector({n, c, h, w}))); diff --git a/paddle/fluid/lite/kernels/use_kernels.h b/paddle/fluid/lite/kernels/use_kernels.h new file mode 100644 index 0000000000000000000000000000000000000000..2c06092e3856467c031abaf36c63bd61aef65bae --- /dev/null +++ b/paddle/fluid/lite/kernels/use_kernels.h @@ -0,0 +1,56 @@ +// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once +/* + * ATTENTION this header file can only include in .cc file. + */ + +USE_LITE_KERNEL(feed, kHost, kAny, kAny, def); +USE_LITE_KERNEL(fetch, kHost, kAny, kAny, def); + +#ifdef LITE_WITH_X86 +USE_LITE_KERNEL(relu, kX86, kFloat, kNCHW, def); +USE_LITE_KERNEL(mul, kX86, kFloat, kNCHW, def); +USE_LITE_KERNEL(fc, kX86, kFloat, kNCHW, def); +USE_LITE_KERNEL(scale, kX86, kFloat, kNCHW, def); +USE_LITE_KERNEL(square, kX86, kFloat, kNCHW, def); +USE_LITE_KERNEL(elementwise_sub, kX86, kFloat, kNCHW, def); +USE_LITE_KERNEL(elementwise_add, kX86, kFloat, kNCHW, def); +USE_LITE_KERNEL(softmax, kX86, kFloat, kNCHW, def); +USE_LITE_KERNEL(dropout, kX86, kFloat, kNCHW, def); +USE_LITE_KERNEL(concat, kX86, kFloat, kNCHW, def); +USE_LITE_KERNEL(conv2d, kX86, kFloat, kNCHW, def); +USE_LITE_KERNEL(depthwise_conv2d, kX86, kFloat, kNCHW, def); +USE_LITE_KERNEL(pool2d, kX86, kFloat, kNCHW, def); +#endif + +#ifdef LITE_WITH_ARM +USE_LITE_KERNEL(fc, kARM, kFloat, kNCHW, def); +USE_LITE_KERNEL(mul, kARM, kFloat, kNCHW, def); +USE_LITE_KERNEL(scale, kARM, kFloat, kNCHW, def); +USE_LITE_KERNEL(conv2d, kARM, kFloat, kNCHW, def); +USE_LITE_KERNEL(batch_norm, kARM, kFloat, kNCHW, def); +USE_LITE_KERNEL(relu, kARM, kFloat, kNCHW, def); +USE_LITE_KERNEL(depthwise_conv2d, kARM, kFloat, kNCHW, def); +USE_LITE_KERNEL(pool2d, kARM, kFloat, kNCHW, def); +USE_LITE_KERNEL(elementwise_add, kARM, kFloat, kNCHW, def); +USE_LITE_KERNEL(softmax, kARM, kFloat, kNCHW, def); +#endif + +#ifdef LITE_WITH_CUDA +USE_LITE_KERNEL(mul, kCUDA, kFloat, kNCHW, def); +USE_LITE_KERNEL(io_copy, kCUDA, kAny, kAny, host_to_device); +USE_LITE_KERNEL(io_copy, kCUDA, kAny, kAny, device_to_host); +#endif diff --git a/paddle/fluid/lite/kernels/x86/CMakeLists.txt b/paddle/fluid/lite/kernels/x86/CMakeLists.txt index c2845fb9b21b2e4d0bb7ff378676d4531212db52..35c61376153e64690f40836812079a20c6c4dc49 100644 --- a/paddle/fluid/lite/kernels/x86/CMakeLists.txt +++ b/paddle/fluid/lite/kernels/x86/CMakeLists.txt @@ -18,6 +18,18 @@ cc_library(concat_compute_x86 SRCS concat_compute.cc DEPS ${lite_kernel_deps} ) cc_library(conv_compute_x86 SRCS conv_compute.cc DEPS ${lite_kernel_deps} blas im2col vol2col) cc_library(pool_compute_x86 SRCS pool_compute.cc DEPS ${lite_kernel_deps} pooling) +lite_cc_test(test_fc_compute_x86 SRCS fc_compute_test.cc DEPS fc_compute_x86) +lite_cc_test(test_conv2d_compute_x86 SRCS conv_compute_test.cc DEPS conv_compute_x86) +lite_cc_test(test_pool2d_compute_x86 SRCS pool_compute_test.cc DEPS pool_compute_x86) +lite_cc_test(test_concat_compute_x86 SRCS concat_compute_test.cc DEPS concat_compute_x86) +lite_cc_test(test_softmax_compute_x86 SRCS softmax_compute_test.cc DEPS softmax_compute_x86) +lite_cc_test(test_elementwise_compute_x86 SRCS elementwise_compute_test.cc DEPS elementwise_compute_x86) +lite_cc_test(test_relu_compute_x86 SRCS relu_compute_test.cc DEPS relu_compute_x86) +lite_cc_test(test_mul_compute_x86 SRCS mul_compute_test.cc DEPS mul_compute_x86 operator) +lite_cc_test(test_scale_compute_x86 SRCS scale_compute_test.cc DEPS scale_compute_x86) +lite_cc_test(test_dropout_compute_x86 SRCS dropout_compute_test.cc DEPS dropout_compute_x86) + + set(x86_kernels activation_compute_x86 elementwise_compute_x86 diff --git a/paddle/fluid/lite/kernels/x86/concat_compute.cc b/paddle/fluid/lite/kernels/x86/concat_compute.cc index 23ae8ca505559cb1fc45b5976f6203a86128ddf0..4e1872951d74335a3bad97597a0104fe54f52d25 100644 --- a/paddle/fluid/lite/kernels/x86/concat_compute.cc +++ b/paddle/fluid/lite/kernels/x86/concat_compute.cc @@ -12,88 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include -#include "paddle/fluid/lite/core/kernel.h" -#include "paddle/fluid/lite/core/op_registry.h" -#include "paddle/fluid/lite/core/types.h" -#include "paddle/fluid/operators/strided_memcpy.h" - -namespace paddle { -namespace lite { -namespace kernels { -namespace x86 { - -template -class ConcatCompute : public KernelLite { - public: - using param_t = operators::ConcatParam; - - void Run() override { - auto& param = *param_.get_mutable(); - int64_t axis = static_cast(param.axis); - auto out = param.output; - - if (axis == 0 && param.x.size() < 10) { - size_t output_offset = 0; - for (auto* in : param.x) { - if (!in || in->dims().production() == 0UL) { - continue; - } - auto in_stride = framework::stride_numel(in->dims().data()); - auto out_stride = framework::stride_numel(out->dims().data()); - paddle::operators::StridedNumelCopyWithAxis( - platform::CPUDeviceContext(), axis, - out->mutable_data() + output_offset, out_stride, in->data(), - in_stride, in_stride[axis]); - - output_offset += in_stride[axis]; - } - } else { - std::vector inputs; - for (size_t j = 0; j < param.x.size(); ++j) { - if (param.x[j] && param.x[j]->dims().production() > 0) { - inputs.push_back(*param.x[j]); - } else { - continue; - } - } - - int num = inputs.size(); - int rows = 1; - auto dim_0 = inputs[0].dims(); - for (int i = 0; i < axis; ++i) { - rows *= dim_0[i]; - } - int out_rows = rows, out_cols = 0; - - std::vector input_cols(inputs.size()); - for (int i = 0; i < num; ++i) { - int t_cols = inputs[i].dims().production() / rows; - out_cols += t_cols; - input_cols[i] = t_cols; - } - // computation - auto output_data = param.output->template mutable_data(); - int col_idx = 0; - for (int j = 0; j < num; ++j) { - int col_len = input_cols[j]; - auto input_data = inputs[j].data(); - for (int k = 0; k < out_rows; ++k) { - std::memcpy(output_data + k * out_cols + col_idx, - input_data + k * col_len, sizeof(T) * col_len); - } - col_idx += col_len; - } - } - } - - virtual ~ConcatCompute() = default; -}; - -} // namespace x86 -} // namespace kernels -} // namespace lite -} // namespace paddle +#include "paddle/fluid/lite/kernels/x86/concat_compute.h" REGISTER_LITE_KERNEL(concat, kX86, kFloat, kNCHW, paddle::lite::kernels::x86::ConcatCompute, def) diff --git a/paddle/fluid/lite/kernels/x86/concat_compute.h b/paddle/fluid/lite/kernels/x86/concat_compute.h new file mode 100644 index 0000000000000000000000000000000000000000..67c2f40f2c197ca3fb1c09fca4a9145a27c4a6fd --- /dev/null +++ b/paddle/fluid/lite/kernels/x86/concat_compute.h @@ -0,0 +1,98 @@ +// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +#pragma once + +#include +#include +#include "paddle/fluid/lite/core/kernel.h" +#include "paddle/fluid/lite/core/op_registry.h" +#include "paddle/fluid/lite/core/types.h" +#include "paddle/fluid/operators/strided_memcpy.h" + +namespace paddle { +namespace lite { +namespace kernels { +namespace x86 { + +template +class ConcatCompute : public KernelLite { + public: + using param_t = operators::ConcatParam; + + void Run() override { + auto& param = *param_.get_mutable(); + int64_t axis = static_cast(param.axis); + auto out = param.output; + + if (axis == 0 && param.x.size() < 10) { + size_t output_offset = 0; + for (auto* in : param.x) { + if (!in || in->dims().production() == 0UL) { + continue; + } + auto in_stride = framework::stride_numel(in->dims().data()); + auto out_stride = framework::stride_numel(out->dims().data()); + paddle::operators::StridedNumelCopyWithAxis( + platform::CPUDeviceContext(), axis, + out->mutable_data() + output_offset, out_stride, in->data(), + in_stride, in_stride[axis]); + + output_offset += in_stride[axis]; + } + } else { + std::vector inputs; + for (size_t j = 0; j < param.x.size(); ++j) { + if (param.x[j] && param.x[j]->dims().production() > 0) { + inputs.push_back(*param.x[j]); + } else { + continue; + } + } + + int num = inputs.size(); + int rows = 1; + auto dim_0 = inputs[0].dims(); + for (int i = 0; i < axis; ++i) { + rows *= dim_0[i]; + } + int out_rows = rows, out_cols = 0; + + std::vector input_cols(inputs.size()); + for (int i = 0; i < num; ++i) { + int t_cols = inputs[i].dims().production() / rows; + out_cols += t_cols; + input_cols[i] = t_cols; + } + // computation + auto output_data = param.output->template mutable_data(); + int col_idx = 0; + for (int j = 0; j < num; ++j) { + int col_len = input_cols[j]; + auto input_data = inputs[j].data(); + for (int k = 0; k < out_rows; ++k) { + std::memcpy(output_data + k * out_cols + col_idx, + input_data + k * col_len, sizeof(T) * col_len); + } + col_idx += col_len; + } + } + } + + virtual ~ConcatCompute() = default; +}; + +} // namespace x86 +} // namespace kernels +} // namespace lite +} // namespace paddle diff --git a/paddle/fluid/lite/kernels/x86/concat_compute_test.cc b/paddle/fluid/lite/kernels/x86/concat_compute_test.cc new file mode 100644 index 0000000000000000000000000000000000000000..aa50dae9eb9e2bd2aef980cce6546972f5cdf89e --- /dev/null +++ b/paddle/fluid/lite/kernels/x86/concat_compute_test.cc @@ -0,0 +1,83 @@ +// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/lite/kernels/x86/concat_compute.h" +#include +#include +#include +#include "paddle/fluid/lite/core/op_registry.h" + +namespace paddle { +namespace lite { +namespace kernels { +namespace x86 { + +TEST(concat_x86, retrive_op) { + auto concat = + KernelRegistry::Global().Create( + "concat"); + ASSERT_FALSE(concat.empty()); + ASSERT_TRUE(concat.front()); +} + +TEST(concat_x86, init) { + ConcatCompute concat; + ASSERT_EQ(concat.precision(), PRECISION(kFloat)); + ASSERT_EQ(concat.target(), TARGET(kX86)); +} + +TEST(concat_x86, run_test) { + lite::Tensor x1, x2, out; + constexpr int batch_size = 1; + std::vector x1_shape{batch_size, 1, 3, 3}; + x1.Resize(lite::DDim(x1_shape)); + std::vector x2_shape{batch_size, 1, 3, 3}; + x2.Resize(lite::DDim(x2_shape)); + + std::vector x = {&x1, &x2}; + + std::vector out_shape{batch_size, 2, 3, 3}; + out.Resize(lite::DDim(out_shape)); + + auto x1_data = x1.mutable_data(); + auto x2_data = x2.mutable_data(); + auto out_data = out.mutable_data(); + + for (int64_t i = 0; i < x1.dims().production(); i++) { + x1_data[i] = 1; + x2_data[i] = 2; + } + + ConcatCompute concat; + operators::ConcatParam param; + param.x = x; + param.output = &out; + param.axis = 1; + + concat.SetParam(param); + concat.Run(); + + std::cout << "output: "; + for (int i = 0; i < out.dims().production(); i++) { + std::cout << out_data[i] << " "; + } + std::cout << std::endl; +} + +} // namespace x86 +} // namespace kernels +} // namespace lite +} // namespace paddle + +USE_LITE_KERNEL(concat, kX86, kFloat, kNCHW, def); diff --git a/paddle/fluid/lite/kernels/x86/conv_compute.cc b/paddle/fluid/lite/kernels/x86/conv_compute.cc index b29161c1c60a3b628a97c2ad015ee3dcb1c601aa..7b674a038de00327443ee68196ee6a83e7923cea 100644 --- a/paddle/fluid/lite/kernels/x86/conv_compute.cc +++ b/paddle/fluid/lite/kernels/x86/conv_compute.cc @@ -12,144 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include -#include -#include -#include "paddle/fluid/framework/eigen.h" -#include "paddle/fluid/lite/core/kernel.h" -#include "paddle/fluid/lite/core/op_registry.h" -#include "paddle/fluid/lite/core/types.h" -#include "paddle/fluid/lite/operators/conv_op.h" -#include "paddle/fluid/operators/math/blas.h" -#include "paddle/fluid/operators/math/depthwise_conv.h" -#include "paddle/fluid/operators/math/im2col.h" -#include "paddle/fluid/operators/math/vol2col.h" - -namespace paddle { -namespace lite { -namespace kernels { -namespace x86 { - -inline bool IsExpand(const std::vector& filter_dim, - const std::vector& strides, - const std::vector& paddings, - const std::vector& dilations) { - bool filter_1 = true, strides_1 = true, padding_0 = true, dilation_1 = true; - for (size_t j = 0; j < strides.size(); ++j) { - filter_1 = filter_1 && (static_cast(filter_dim[j + 2]) == 1); - strides_1 = strides_1 && (strides[j] == 1); - padding_0 = padding_0 && (paddings[j] == 0); - dilation_1 = dilation_1 && (dilations[j] == 1); - } - return !(filter_1 && strides_1 && padding_0 && dilation_1); -} - -template -class Conv2dCompute : public KernelLite { - public: - using param_t = operators::ConvParam; - void Run() override { - auto& param = *param_.get_mutable(); - lite::Tensor filter = *param.filter; - param.output->template mutable_data(); - - const int batch_size = static_cast(param.x->dims()[0]); - - std::vector filter_shape_vec(filter.dims().Vectorize()); - std::vector output_shape_vec(param.output->dims().Vectorize()); - - size_t data_dim = filter_shape_vec.size() - 2; - std::vector col_shape_vec(1 + 2 * data_dim); - col_shape_vec[0] = param.x->dims()[1] / param.groups; - for (size_t j = 0; j < data_dim; ++j) { - col_shape_vec[j + 1] = filter_shape_vec[j + 2]; - col_shape_vec[j + 1 + data_dim] = output_shape_vec[j + 2]; - } - lite::DDim col_shape(col_shape_vec); - lite::DDim col_matrix_shape = col_shape.Flattern2D(data_dim + 1); - bool is_expand = IsExpand(filter_shape_vec, param.strides, param.paddings, - param.dilations); - - lite::Tensor col; - lite::Tensor col_matrix; - if (is_expand) { - col.Resize(col_shape); - col.mutable_data(); - col_matrix.ShareDataWith(col); - col_matrix.Resize(col_matrix_shape); - } - lite::DDim input_shape = param.x->dims().Slice(1, param.x->dims().size()); - - lite::DDim filter_matrix_shape(std::vector{ - filter.dims()[0], filter.dims().production() / filter.dims()[0]}); - filter.Resize(filter_matrix_shape); - - lite::DDim output_matrix_shape(std::vector{ - param.output->dims()[1], - param.output->dims().production() / - (param.output->dims()[0] * param.output->dims()[1])}); - - int in_step = static_cast(param.x->dims()[1]) / param.groups; - int out_step = static_cast(param.output->dims()[1]) / param.groups; - - paddle::operators::math::Vol2ColFunctor - vol2col; - paddle::operators::math::Im2ColFunctor< - paddle::operators::math::ColFormat::kCFO, platform::CPUDeviceContext, T> - im2col; - auto blas = paddle::operators::math::GetBlas( - platform::CPUDeviceContext()); - for (int i = 0; i < batch_size; i++) { - lite::Tensor in_batch; - in_batch.ShareDataWith( - param.x->raw_tensor().Slice(i, i + 1).Resize(input_shape.data())); - lite::Tensor out_batch; - out_batch.ShareDataWith(param.output->raw_tensor().Slice(i, i + 1).Resize( - output_matrix_shape.data())); - - for (int g = 0; g < param.groups; g++) { - lite::Tensor in_slice; - in_slice.ShareDataWith( - in_batch.raw_tensor().Slice(g * in_step, (g + 1) * in_step)); - - if (!is_expand) { - col.ShareDataWith(in_slice); - col_matrix.ShareDataWith(col); - col_matrix.Resize(col_matrix_shape); - } else if (data_dim == 2U) { - // im2col - im2col(platform::CPUDeviceContext(), in_slice.raw_tensor(), - param.dilations, param.strides, - std::vector{param.paddings[0], param.paddings[1], - param.paddings[0], param.paddings[1]}, - &(col.raw_tensor())); - } else if (data_dim == 3U) { - // vol2col - vol2col(platform::CPUDeviceContext(), in_slice.raw_tensor(), - param.dilations, param.strides, param.paddings, - &(col.raw_tensor())); - } - - // gemm - lite::Tensor out_slice; - out_slice.ShareDataWith( - out_batch.raw_tensor().Slice(g * out_step, (g + 1) * out_step)); - lite::Tensor filter_slice; - filter_slice.ShareDataWith( - filter.raw_tensor().Slice(g * out_step, (g + 1) * out_step)); - blas.MatMul(filter_slice.raw_tensor(), false, col_matrix.raw_tensor(), - false, T(1.0), &(out_slice.raw_tensor()), T(0.0)); - } - } - } - - virtual ~Conv2dCompute() = default; -}; - -} // namespace x86 -} // namespace kernels -} // namespace lite -} // namespace paddle +#include "paddle/fluid/lite/kernels/x86/conv_compute.h" REGISTER_LITE_KERNEL(conv2d, kX86, kFloat, kNCHW, paddle::lite::kernels::x86::Conv2dCompute, def) diff --git a/paddle/fluid/lite/kernels/x86/conv_compute.h b/paddle/fluid/lite/kernels/x86/conv_compute.h new file mode 100644 index 0000000000000000000000000000000000000000..4b3087792921ac689db3906160663e75ef0c7ed0 --- /dev/null +++ b/paddle/fluid/lite/kernels/x86/conv_compute.h @@ -0,0 +1,153 @@ +// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +#pragma once + +#include +#include +#include +#include "paddle/fluid/framework/eigen.h" +#include "paddle/fluid/lite/core/kernel.h" +#include "paddle/fluid/lite/core/op_registry.h" +#include "paddle/fluid/lite/core/types.h" +#include "paddle/fluid/lite/operators/conv_op.h" +#include "paddle/fluid/operators/math/blas.h" +#include "paddle/fluid/operators/math/depthwise_conv.h" +#include "paddle/fluid/operators/math/im2col.h" +#include "paddle/fluid/operators/math/vol2col.h" + +namespace paddle { +namespace lite { +namespace kernels { +namespace x86 { + +inline bool IsExpand(const std::vector& filter_dim, + const std::vector& strides, + const std::vector& paddings, + const std::vector& dilations) { + bool filter_1 = true, strides_1 = true, padding_0 = true, dilation_1 = true; + for (size_t j = 0; j < strides.size(); ++j) { + filter_1 = filter_1 && (static_cast(filter_dim[j + 2]) == 1); + strides_1 = strides_1 && (strides[j] == 1); + padding_0 = padding_0 && (paddings[j] == 0); + dilation_1 = dilation_1 && (dilations[j] == 1); + } + return !(filter_1 && strides_1 && padding_0 && dilation_1); +} + +template +class Conv2dCompute : public KernelLite { + public: + using param_t = operators::ConvParam; + void Run() override { + auto& param = *param_.get_mutable(); + lite::Tensor filter = *param.filter; + param.output->template mutable_data(); + + const int batch_size = static_cast(param.x->dims()[0]); + + std::vector filter_shape_vec(filter.dims().Vectorize()); + std::vector output_shape_vec(param.output->dims().Vectorize()); + + size_t data_dim = filter_shape_vec.size() - 2; + std::vector col_shape_vec(1 + 2 * data_dim); + col_shape_vec[0] = param.x->dims()[1] / param.groups; + for (size_t j = 0; j < data_dim; ++j) { + col_shape_vec[j + 1] = filter_shape_vec[j + 2]; + col_shape_vec[j + 1 + data_dim] = output_shape_vec[j + 2]; + } + lite::DDim col_shape(col_shape_vec); + lite::DDim col_matrix_shape = col_shape.Flattern2D(data_dim + 1); + bool is_expand = IsExpand(filter_shape_vec, param.strides, param.paddings, + param.dilations); + + lite::Tensor col; + lite::Tensor col_matrix; + if (is_expand) { + col.Resize(col_shape); + col.mutable_data(); + col_matrix.ShareDataWith(col); + col_matrix.Resize(col_matrix_shape); + } + lite::DDim input_shape = param.x->dims().Slice(1, param.x->dims().size()); + + lite::DDim filter_matrix_shape(std::vector{ + filter.dims()[0], filter.dims().production() / filter.dims()[0]}); + filter.Resize(filter_matrix_shape); + + lite::DDim output_matrix_shape(std::vector{ + param.output->dims()[1], + param.output->dims().production() / + (param.output->dims()[0] * param.output->dims()[1])}); + + int in_step = static_cast(param.x->dims()[1]) / param.groups; + int out_step = static_cast(param.output->dims()[1]) / param.groups; + + paddle::operators::math::Vol2ColFunctor + vol2col; + paddle::operators::math::Im2ColFunctor< + paddle::operators::math::ColFormat::kCFO, platform::CPUDeviceContext, T> + im2col; + auto blas = paddle::operators::math::GetBlas( + platform::CPUDeviceContext()); + for (int i = 0; i < batch_size; i++) { + lite::Tensor in_batch; + in_batch.ShareDataWith( + param.x->raw_tensor().Slice(i, i + 1).Resize(input_shape.data())); + lite::Tensor out_batch; + out_batch.ShareDataWith(param.output->raw_tensor().Slice(i, i + 1).Resize( + output_matrix_shape.data())); + + for (int g = 0; g < param.groups; g++) { + lite::Tensor in_slice; + in_slice.ShareDataWith( + in_batch.raw_tensor().Slice(g * in_step, (g + 1) * in_step)); + + if (!is_expand) { + col.ShareDataWith(in_slice); + col_matrix.ShareDataWith(col); + col_matrix.Resize(col_matrix_shape); + } else if (data_dim == 2U) { + // im2col + im2col(platform::CPUDeviceContext(), in_slice.raw_tensor(), + param.dilations, param.strides, + std::vector{param.paddings[0], param.paddings[1], + param.paddings[0], param.paddings[1]}, + &(col.raw_tensor())); + } else if (data_dim == 3U) { + // vol2col + vol2col(platform::CPUDeviceContext(), in_slice.raw_tensor(), + param.dilations, param.strides, param.paddings, + &(col.raw_tensor())); + } + + // gemm + lite::Tensor out_slice; + out_slice.ShareDataWith( + out_batch.raw_tensor().Slice(g * out_step, (g + 1) * out_step)); + lite::Tensor filter_slice; + filter_slice.ShareDataWith( + filter.raw_tensor().Slice(g * out_step, (g + 1) * out_step)); + blas.MatMul(filter_slice.raw_tensor(), false, col_matrix.raw_tensor(), + false, T(1.0), &(out_slice.raw_tensor()), T(0.0)); + } + } + } + + virtual ~Conv2dCompute() = default; +}; + +} // namespace x86 +} // namespace kernels +} // namespace lite +} // namespace paddle diff --git a/paddle/fluid/lite/kernels/x86/conv_compute_test.cc b/paddle/fluid/lite/kernels/x86/conv_compute_test.cc new file mode 100644 index 0000000000000000000000000000000000000000..be57153b4b55a1b68cbb0663d4b6dd0a15de5224 --- /dev/null +++ b/paddle/fluid/lite/kernels/x86/conv_compute_test.cc @@ -0,0 +1,92 @@ +// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/lite/kernels/x86/conv_compute.h" +#include +#include +#include "paddle/fluid/lite/core/op_registry.h" + +namespace paddle { +namespace lite { +namespace kernels { +namespace x86 { + +TEST(conv_x86, retrive_op) { + auto conv2d = + KernelRegistry::Global().Create( + "conv2d"); + ASSERT_FALSE(conv2d.empty()); + ASSERT_TRUE(conv2d.front()); +} + +TEST(conv2d_x86, init) { + Conv2dCompute conv2d; + ASSERT_EQ(conv2d.precision(), PRECISION(kFloat)); + ASSERT_EQ(conv2d.target(), TARGET(kX86)); +} + +TEST(conv2d_x86, run_test) { + lite::Tensor x, filter, b, out; + constexpr int batch_size = 1; + std::vector x_shape{batch_size, 3, 3, 3}; + x.Resize(lite::DDim(x_shape)); + std::vector filter_shape{1, 3, 3, 3}; + filter.Resize(lite::DDim(filter_shape)); + std::vector b_shape{1, 3, 1, 1}; + b.Resize(lite::DDim(b_shape)); + std::vector out_shape{batch_size, 1, 1, 1}; + out.Resize(lite::DDim(out_shape)); + + auto x_data = x.mutable_data(); + auto filter_data = filter.mutable_data(); + auto b_data = b.mutable_data(); + auto out_data = out.mutable_data(); + + for (int64_t i = 0; i < x.dims().production(); i++) { + x_data[i] = 1; + } + for (int64_t i = 0; i < filter.dims().production(); i++) { + filter_data[i] = 1; + } + for (int64_t i = 0; i < b.dims().production(); i++) { + b_data[i] = 0; + } + + Conv2dCompute conv2d; + operators::ConvParam param; + + param.x = &x; + param.filter = &filter; + param.bias = &b; + param.output = &out; + param.strides = {1, 1}; + param.paddings = {0, 0}; + param.groups = 1; + param.dilations = {1, 1}; + + conv2d.SetParam(param); + conv2d.Run(); + + LOG(INFO) << "output: "; + for (int i = 0; i < out.dims().production(); i++) { + LOG(INFO) << out_data[i] << " "; + } +} + +} // namespace x86 +} // namespace kernels +} // namespace lite +} // namespace paddle + +USE_LITE_KERNEL(conv2d, kX86, kFloat, kNCHW, def); diff --git a/paddle/fluid/lite/kernels/x86/dropout_compute.cc b/paddle/fluid/lite/kernels/x86/dropout_compute.cc index d762ec2a06f8b4e0b2842e58625534dc92ca96a1..6b68e1da310996903643d6dc12abfc5a02864e74 100644 --- a/paddle/fluid/lite/kernels/x86/dropout_compute.cc +++ b/paddle/fluid/lite/kernels/x86/dropout_compute.cc @@ -12,72 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include -#include -#include "paddle/fluid/framework/eigen.h" -#include "paddle/fluid/framework/operator.h" -#include "paddle/fluid/lite/core/kernel.h" -#include "paddle/fluid/lite/core/op_registry.h" - -namespace paddle { -namespace lite { -namespace kernels { -namespace x86 { - -template -using EigenMatrix = framework::EigenMatrix; - -template -class DropoutCompute : public KernelLite { - public: - using param_t = operators::DropoutParam; - void Run() override { - auto& param = *param_.get_mutable(); - const auto* x_data = param.x->data(); - auto* out_data = param.output->template mutable_data(); - if (!param.is_test) { - auto* mask_data = param.mask->template mutable_data(); - std::random_device rnd; - std::minstd_rand engine; - int seed = param.fix_seed ? param.seed : rnd(); - engine.seed(seed); - std::uniform_real_distribution dist(0, 1); - - size_t size = framework::product(param.mask->dims().data()); - for (size_t i = 0; i < size; ++i) { - if (dist(engine) < param.dropout_prob) { - mask_data[i] = 0; - out_data[i] = 0; - } else { - if (param.dropout_implementation == "upscale_in_train") { - mask_data[i] = 1.0f / static_cast(1.0f - param.dropout_prob); - out_data[i] = x_data[i] / static_cast(1.0f - param.dropout_prob); - } else { - mask_data[i] = 1; - out_data[i] = x_data[i]; - } - } - } - } else { - auto X = EigenMatrix::Reshape(param.x->raw_tensor(), 1); - auto Y = EigenMatrix::Reshape(param.output->raw_tensor(), 1); - auto& place = *platform::CPUDeviceContext().eigen_device(); - if (param.dropout_implementation == "upscale_in_train") { - Y.device(place) = X; - } else { - Y.device(place) = X * static_cast(1.0f - param.dropout_prob); - } - } - } - - virtual ~DropoutCompute() = default; -}; - -} // namespace x86 -} // namespace kernels -} // namespace lite -} // namespace paddle +#include "paddle/fluid/lite/kernels/x86/dropout_compute.h" REGISTER_LITE_KERNEL(dropout, kX86, kFloat, kNCHW, paddle::lite::kernels::x86::DropoutCompute, def) diff --git a/paddle/fluid/lite/kernels/x86/dropout_compute.h b/paddle/fluid/lite/kernels/x86/dropout_compute.h new file mode 100644 index 0000000000000000000000000000000000000000..ee8b51619a54594b390751c6d2c7a0c4f9931483 --- /dev/null +++ b/paddle/fluid/lite/kernels/x86/dropout_compute.h @@ -0,0 +1,81 @@ +// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +#pragma once + +#include +#include +#include "paddle/fluid/framework/eigen.h" +#include "paddle/fluid/framework/operator.h" +#include "paddle/fluid/lite/core/kernel.h" +#include "paddle/fluid/lite/core/op_registry.h" + +namespace paddle { +namespace lite { +namespace kernels { +namespace x86 { + +template +using EigenMatrix = framework::EigenMatrix; + +template +class DropoutCompute : public KernelLite { + public: + using param_t = operators::DropoutParam; + void Run() override { + auto& param = *param_.get_mutable(); + const auto* x_data = param.x->data(); + auto* out_data = param.output->template mutable_data(); + if (!param.is_test) { + auto* mask_data = param.mask->template mutable_data(); + std::random_device rnd; + std::minstd_rand engine; + int seed = param.fix_seed ? param.seed : rnd(); + engine.seed(seed); + std::uniform_real_distribution dist(0, 1); + + size_t size = framework::product(param.mask->dims().data()); + for (size_t i = 0; i < size; ++i) { + if (dist(engine) < param.dropout_prob) { + mask_data[i] = 0; + out_data[i] = 0; + } else { + if (param.dropout_implementation == "upscale_in_train") { + mask_data[i] = 1.0f / static_cast(1.0f - param.dropout_prob); + out_data[i] = x_data[i] / static_cast(1.0f - param.dropout_prob); + } else { + mask_data[i] = 1; + out_data[i] = x_data[i]; + } + } + } + } else { + auto X = EigenMatrix::Reshape(param.x->raw_tensor(), 1); + auto Y = EigenMatrix::Reshape(param.output->raw_tensor(), 1); + auto& place = *platform::CPUDeviceContext().eigen_device(); + if (param.dropout_implementation == "upscale_in_train") { + Y.device(place) = X; + } else { + Y.device(place) = X * static_cast(1.0f - param.dropout_prob); + } + } + } + + virtual ~DropoutCompute() = default; +}; + +} // namespace x86 +} // namespace kernels +} // namespace lite +} // namespace paddle diff --git a/paddle/fluid/lite/kernels/x86/dropout_compute_test.cc b/paddle/fluid/lite/kernels/x86/dropout_compute_test.cc new file mode 100644 index 0000000000000000000000000000000000000000..522877857c7adc47a258e24fc330f457520f8f79 --- /dev/null +++ b/paddle/fluid/lite/kernels/x86/dropout_compute_test.cc @@ -0,0 +1,78 @@ +// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/lite/kernels/x86/dropout_compute.h" +#include +#include +#include +#include "paddle/fluid/lite/core/op_registry.h" + +namespace paddle { +namespace lite { +namespace kernels { +namespace x86 { + +TEST(dropout_x86, retrive_op) { + auto dropout = + KernelRegistry::Global().Create( + "dropout"); + ASSERT_FALSE(dropout.empty()); + ASSERT_TRUE(dropout.front()); +} + +TEST(dropout_x86, init) { + DropoutCompute dropout; + ASSERT_EQ(dropout.precision(), PRECISION(kFloat)); + ASSERT_EQ(dropout.target(), TARGET(kX86)); +} + +TEST(dropout_x86, run_test) { + lite::Tensor x, y, out; + constexpr int batch_size = 1; + std::vector x_shape{batch_size, 3, 2, 2}; + x.Resize(lite::DDim(x_shape)); + std::vector out_shape{batch_size, 3, 2, 2}; + out.Resize(lite::DDim(out_shape)); + + auto x_data = x.mutable_data(); + auto out_data = out.mutable_data(); + + for (int64_t i = 0; i < x.dims().production(); i++) { + x_data[i] = static_cast(i); + } + // DropoutCompute dropout; + DropoutCompute dropout; + operators::DropoutParam param; + + param.x = &x; + param.dropout_prob = 0.25; + param.is_test = true; + param.fix_seed = true; + param.output = &out; + + dropout.SetParam(param); + dropout.Run(); + + LOG(INFO) << "output: "; + for (int i = 0; i < out.dims().production(); i++) { + LOG(INFO) << out_data[i]; + } +} + +} // namespace x86 +} // namespace kernels +} // namespace lite +} // namespace paddle + +USE_LITE_KERNEL(dropout, kX86, kFloat, kNCHW, def); diff --git a/paddle/fluid/lite/kernels/x86/elementwise_compute.cc b/paddle/fluid/lite/kernels/x86/elementwise_compute.cc index 8e2ea92d6de24eb5ef58b5ebbdded90b99c1b6b8..5024e49866ff8dd51cc8963af905066f6dfff8a7 100644 --- a/paddle/fluid/lite/kernels/x86/elementwise_compute.cc +++ b/paddle/fluid/lite/kernels/x86/elementwise_compute.cc @@ -12,113 +12,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/fluid/framework/eigen.h" -#include "paddle/fluid/framework/operator.h" -#include "paddle/fluid/lite/core/kernel.h" -#include "paddle/fluid/lite/core/op_registry.h" -#include "paddle/fluid/operators/activation_op.h" -#include "paddle/fluid/operators/elementwise/elementwise_op.h" -#include "paddle/fluid/operators/elementwise/elementwise_op_function.h" +#include "paddle/fluid/lite/kernels/x86/elementwise_compute.h" -namespace paddle { -namespace lite { -namespace kernels { -namespace x86 { - -template -struct SubFunctor { - inline HOSTDEVICE T operator()(T a, T b) const { return a - b; } -}; - -template -struct AddFunctor { - inline HOSTDEVICE T operator()(T a, T b) const { return a + b; } -}; - -template -class ElementwiseSubCompute - : public KernelLite { - public: - using param_t = operators::ElementwiseParam; - - void Run() override { - auto& param = *param_.get_mutable(); - auto& context = ctx_->As(); - CHECK(context.x86_device_context()); - - param.Out->template mutable_data(); - paddle::operators::ElementwiseComputeEx, - platform::CPUDeviceContext, T>( - *context.x86_execution_context(), ¶m.X->raw_tensor(), - ¶m.Y->raw_tensor(), param.axis, SubFunctor(), - ¶m.Out->raw_tensor()); - } - - virtual ~ElementwiseSubCompute() = default; -}; - -template -struct SubGradDX { - T operator()(T x, T y, T out, T dout) const { return dout; } -}; - -template -struct SubGradDY { - T operator()(T x, T y, T out, T dout) const { return -dout; } -}; - -template -class ElementwiseSubGradCompute - : public KernelLite { - public: - using param_t = operators::ElementwiseGradParam; - void Run() override { - auto& param = *param_.get_mutable(); - auto& context = ctx_->As(); - CHECK(context.x86_device_context()); - - param.X_grad->template mutable_data(); - param.Y_grad->template mutable_data(); - // skip out, x, y - auto dout = param.Out_grad->raw_tensor(); - auto dx = param.X_grad->raw_tensor(); - auto dy = param.Y_grad->raw_tensor(); - auto& skip = dout; - paddle::operators::ElemwiseExplicitGradCompute< - platform::CPUDeviceContext, T, SubGradDX, SubGradDY>( - *context.x86_execution_context(), skip, skip, skip, dout, param.axis, - &dx, &dy, SubGradDX(), SubGradDY()); - } - - virtual ~ElementwiseSubGradCompute() = default; -}; - -template -class ElementwiseAddCompute - : public KernelLite { - public: - using param_t = operators::ElementwiseParam; - void Run() override { - auto& param = *param_.get_mutable(); - auto& context = ctx_->As(); - CHECK(context.x86_device_context()); - param.Out->template mutable_data(); - paddle::operators::ElementwiseComputeEx, - platform::CPUDeviceContext, T>( - *context.x86_execution_context(), ¶m.X->raw_tensor(), - ¶m.Y->raw_tensor(), param.axis, AddFunctor(), - ¶m.Out->raw_tensor()); - } - - virtual ~ElementwiseAddCompute() = default; -}; - -} // namespace x86 -} // namespace kernels -} // namespace lite -} // namespace paddle - -// float REGISTER_LITE_KERNEL(elementwise_sub, kX86, kFloat, kNCHW, paddle::lite::kernels::x86::ElementwiseSubCompute, def) diff --git a/paddle/fluid/lite/kernels/x86/elementwise_compute.h b/paddle/fluid/lite/kernels/x86/elementwise_compute.h new file mode 100644 index 0000000000000000000000000000000000000000..5e46bf8d4525de30b7308d54b30bf9d71b9f2921 --- /dev/null +++ b/paddle/fluid/lite/kernels/x86/elementwise_compute.h @@ -0,0 +1,120 @@ +// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +#pragma once + +#include "paddle/fluid/framework/eigen.h" +#include "paddle/fluid/framework/operator.h" +#include "paddle/fluid/lite/core/kernel.h" +#include "paddle/fluid/lite/core/op_registry.h" +#include "paddle/fluid/operators/activation_op.h" +#include "paddle/fluid/operators/elementwise/elementwise_op.h" +#include "paddle/fluid/operators/elementwise/elementwise_op_function.h" + +namespace paddle { +namespace lite { +namespace kernels { +namespace x86 { + +template +struct SubFunctor { + inline HOSTDEVICE T operator()(T a, T b) const { return a - b; } +}; + +template +struct AddFunctor { + inline HOSTDEVICE T operator()(T a, T b) const { return a + b; } +}; + +template +class ElementwiseSubCompute + : public KernelLite { + public: + using param_t = operators::ElementwiseParam; + + void Run() override { + auto& param = *param_.get_mutable(); + auto& context = ctx_->As(); + CHECK(context.x86_device_context()); + + param.Out->template mutable_data(); + paddle::operators::ElementwiseComputeEx, + platform::CPUDeviceContext, T>( + *context.x86_execution_context(), ¶m.X->raw_tensor(), + ¶m.Y->raw_tensor(), param.axis, SubFunctor(), + ¶m.Out->raw_tensor()); + } + + virtual ~ElementwiseSubCompute() = default; +}; + +template +struct SubGradDX { + T operator()(T x, T y, T out, T dout) const { return dout; } +}; + +template +struct SubGradDY { + T operator()(T x, T y, T out, T dout) const { return -dout; } +}; + +template +class ElementwiseSubGradCompute + : public KernelLite { + public: + using param_t = operators::ElementwiseGradParam; + void Run() override { + auto& param = *param_.get_mutable(); + auto& context = ctx_->As(); + CHECK(context.x86_device_context()); + + param.X_grad->template mutable_data(); + param.Y_grad->template mutable_data(); + // skip out, x, y + auto dout = param.Out_grad->raw_tensor(); + auto dx = param.X_grad->raw_tensor(); + auto dy = param.Y_grad->raw_tensor(); + auto& skip = dout; + paddle::operators::ElemwiseExplicitGradCompute< + platform::CPUDeviceContext, T, SubGradDX, SubGradDY>( + *context.x86_execution_context(), skip, skip, skip, dout, param.axis, + &dx, &dy, SubGradDX(), SubGradDY()); + } + + virtual ~ElementwiseSubGradCompute() = default; +}; + +template +class ElementwiseAddCompute + : public KernelLite { + public: + using param_t = operators::ElementwiseParam; + void Run() override { + auto& param = *param_.get_mutable(); + auto& context = ctx_->As(); + CHECK(context.x86_device_context()); + param.Out->template mutable_data(); + paddle::operators::ElementwiseComputeEx, + platform::CPUDeviceContext, T>( + *context.x86_execution_context(), ¶m.X->raw_tensor(), + ¶m.Y->raw_tensor(), param.axis, AddFunctor(), + ¶m.Out->raw_tensor()); + } + + virtual ~ElementwiseAddCompute() = default; +}; + +} // namespace x86 +} // namespace kernels +} // namespace lite +} // namespace paddle diff --git a/paddle/fluid/lite/kernels/x86/elementwise_compute_test.cc b/paddle/fluid/lite/kernels/x86/elementwise_compute_test.cc new file mode 100644 index 0000000000000000000000000000000000000000..8bade95f58ca386adb2b9a94da888a58f15158ac --- /dev/null +++ b/paddle/fluid/lite/kernels/x86/elementwise_compute_test.cc @@ -0,0 +1,88 @@ +// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/lite/kernels/x86/elementwise_compute.h" +#include +#include +#include +#include +#include +#include "paddle/fluid/lite/core/op_registry.h" + +namespace paddle { +namespace lite { +namespace kernels { +namespace x86 { + +TEST(elementwise_add_x86, retrive_op) { + auto elementwise_add = + KernelRegistry::Global().Create( + "elementwise_add"); + ASSERT_FALSE(elementwise_add.empty()); + ASSERT_TRUE(elementwise_add.front()); +} + +TEST(elementwise_add_x86, init) { + ElementwiseAddCompute elementwise_add; + ASSERT_EQ(elementwise_add.precision(), PRECISION(kFloat)); + ASSERT_EQ(elementwise_add.target(), TARGET(kX86)); +} + +TEST(elementwise_add_x86, run_test) { + lite::Tensor x, y, out; + constexpr int batch_size = 1; + std::vector x_shape{batch_size, 3, 2, 2}; + x.Resize(lite::DDim(x_shape)); + std::vector y_shape{batch_size, 3, 2, 2}; + y.Resize(lite::DDim(y_shape)); + std::vector out_shape{batch_size, 3, 2, 2}; + out.Resize(lite::DDim(out_shape)); + + auto x_data = x.mutable_data(); + auto y_data = y.mutable_data(); + auto out_data = out.mutable_data(); + + for (int64_t i = 0; i < x.dims().production(); i++) { + x_data[i] = 1; + } + for (int64_t i = 0; i < y.dims().production(); i++) { + y_data[i] = 2; + } + + // ElementwiseAddCompute elementwise_add; + ElementwiseAddCompute elementwise_add; + operators::ElementwiseParam param; + + param.X = &x; + param.Y = &y; + param.Out = &out; + + std::unique_ptr ctx(new KernelContext); + ctx->As(); + elementwise_add.SetParam(param); + elementwise_add.SetContext(std::move(ctx)); + elementwise_add.Run(); + + LOG(INFO) << "output: "; + for (int i = 0; i < out.dims().production(); i++) { + LOG(INFO) << out_data[i]; + } +} + +} // namespace x86 +} // namespace kernels +} // namespace lite +} // namespace paddle + +USE_LITE_KERNEL(elementwise_add, kX86, kFloat, kNCHW, def); diff --git a/paddle/fluid/lite/kernels/x86/fc_compute.cc b/paddle/fluid/lite/kernels/x86/fc_compute.cc index dad37febc80433f0cf3a6859c985e22a5425b405..4d5399a90b2885046cb08948e32d1bb864876728 100644 --- a/paddle/fluid/lite/kernels/x86/fc_compute.cc +++ b/paddle/fluid/lite/kernels/x86/fc_compute.cc @@ -12,89 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include -#include "paddle/fluid/framework/eigen.h" -#include "paddle/fluid/framework/operator.h" -#include "paddle/fluid/lite/core/kernel.h" -#include "paddle/fluid/lite/core/op_lite.h" -#include "paddle/fluid/lite/core/op_registry.h" -#include "paddle/fluid/lite/core/type_system.h" -#include "paddle/fluid/lite/operators/fc_op.h" - -namespace paddle { -namespace lite { -namespace kernels { -namespace x86 { - -template -void fc_compute_eigen(const T* x, int x_h, int x_w, // - const T* w, int w_h, int w_w, // - const T* b, // - T* out) { - using matrix_t = - Eigen::Matrix; - - Eigen::Map X(x, x_h, x_w); - Eigen::Map W(w, w_h, w_w); - Eigen::Map Out(out, x_h, w_w); - - Out = X * W; - - if (b) { - Eigen::Map> B(b, w_w); - Out = Out.array().rowwise() + B.transpose().array(); - } -} - -template -void fc_compute_naive(const T* x, int x_h, int x_w, // - const T* w, int w_h, int w_w, // - const T* b, // - T* out) { - CHECK_EQ(x_w, w_h); - // out shape: (x_h, w_w) - memset(out, 0, x_h * w_w * sizeof(T)); - for (int i = 0; i < x_h; i++) { - for (int j = 0; j < w_w; j++) { - T tmp = static_cast(0); - for (int k = 0; k < x_w; k++) { - tmp += x[i * x_w + k] * w[k * w_w + j]; - } - out[i * w_w + j] = tmp + b[j]; - } - } -} - -template -class FcCompute : public KernelLite { - public: - using param_t = operators::FcParam; - - void Run() override { - auto& param = *param_.get_mutable(); - CHECK_GE(param.input->dims().size(), 2UL); - CHECK_EQ(param.output->dims().size(), 2UL); - - fc_compute_eigen( - param.input->data(), // x - param.input->dims().Slice(0, param.in_num_col_dims).production(), - param.input->dims() - .Slice(param.in_num_col_dims, param.input->dims().size()) - .production(), - param.w->data(), // w - param.w->dims()[0], // w_h - param.w->dims()[1], // w_w - param.bias->data(), // b - param.output->mutable_data()); - } - - virtual ~FcCompute() = default; -}; - -} // namespace x86 -} // namespace kernels -} // namespace lite -} // namespace paddle +#include "paddle/fluid/lite/kernels/x86/fc_compute.h" REGISTER_LITE_KERNEL(fc, kX86, kFloat, kNCHW, paddle::lite::kernels::x86::FcCompute, def) diff --git a/paddle/fluid/lite/kernels/x86/fc_compute.h b/paddle/fluid/lite/kernels/x86/fc_compute.h new file mode 100644 index 0000000000000000000000000000000000000000..dc71ca25601c24ca55b1730edf6bd354eadfddf9 --- /dev/null +++ b/paddle/fluid/lite/kernels/x86/fc_compute.h @@ -0,0 +1,98 @@ +// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +#pragma once + +#include +#include "paddle/fluid/framework/eigen.h" +#include "paddle/fluid/framework/operator.h" +#include "paddle/fluid/lite/core/kernel.h" +#include "paddle/fluid/lite/core/op_lite.h" +#include "paddle/fluid/lite/core/op_registry.h" +#include "paddle/fluid/lite/core/type_system.h" +#include "paddle/fluid/lite/operators/fc_op.h" + +namespace paddle { +namespace lite { +namespace kernels { +namespace x86 { + +template +void fc_compute_eigen(const T* x, int x_h, int x_w, // + const T* w, int w_h, int w_w, // + const T* b, // + T* out) { + using matrix_t = + Eigen::Matrix; + + Eigen::Map X(x, x_h, x_w); + Eigen::Map W(w, w_h, w_w); + Eigen::Map Out(out, x_h, w_w); + + Out = X * W; + + if (b) { + Eigen::Map> B(b, w_w); + Out = Out.array().rowwise() + B.transpose().array(); + } +} + +template +void fc_compute_naive(const T* x, int x_h, int x_w, // + const T* w, int w_h, int w_w, // + const T* b, // + T* out) { + CHECK_EQ(x_w, w_h); + // out shape: (x_h, w_w) + memset(out, 0, x_h * w_w * sizeof(T)); + for (int i = 0; i < x_h; i++) { + for (int j = 0; j < w_w; j++) { + T tmp = static_cast(0); + for (int k = 0; k < x_w; k++) { + tmp += x[i * x_w + k] * w[k * w_w + j]; + } + out[i * w_w + j] = tmp + b[j]; + } + } +} + +template +class FcCompute : public KernelLite { + public: + using param_t = operators::FcParam; + + void Run() override { + auto& param = *param_.get_mutable(); + CHECK_GE(param.input->dims().size(), 2UL); + CHECK_EQ(param.output->dims().size(), 2UL); + + fc_compute_eigen( + param.input->data(), // x + param.input->dims().Slice(0, param.in_num_col_dims).production(), + param.input->dims() + .Slice(param.in_num_col_dims, param.input->dims().size()) + .production(), + param.w->data(), // w + param.w->dims()[0], // w_h + param.w->dims()[1], // w_w + param.bias->data(), // b + param.output->mutable_data()); + } + + virtual ~FcCompute() = default; +}; + +} // namespace x86 +} // namespace kernels +} // namespace lite +} // namespace paddle diff --git a/paddle/fluid/lite/kernels/x86/fc_compute_test.cc b/paddle/fluid/lite/kernels/x86/fc_compute_test.cc new file mode 100644 index 0000000000000000000000000000000000000000..ed6016d341e830c2d859c246dfbca3c0f20c9117 --- /dev/null +++ b/paddle/fluid/lite/kernels/x86/fc_compute_test.cc @@ -0,0 +1,100 @@ +// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +#include "paddle/fluid/lite/kernels/x86/fc_compute.h" +#include +#include +#include "paddle/fluid/lite/core/op_registry.h" + +namespace paddle { +namespace lite { +namespace kernels { +namespace x86 { + +TEST(fc_x86, retrive_op) { + auto fc = + KernelRegistry::Global().Create("fc"); + ASSERT_FALSE(fc.empty()); + ASSERT_TRUE(fc.front()); +} + +TEST(fc_x86, init) { + FcCompute fc; + ASSERT_EQ(fc.precision(), PRECISION(kFloat)); + ASSERT_EQ(fc.target(), TARGET(kX86)); +} + +TEST(fc_x86, run_test) { + lite::Tensor x, w, b, out; + constexpr int batch_size = 2; + std::vector x_shape{batch_size, 3}; + x.Resize(lite::DDim(x_shape)); + std::vector w_shape{3, 4}; + w.Resize(lite::DDim(w_shape)); + std::vector b_shape{1, 4}; + b.Resize(lite::DDim(b_shape)); + std::vector out_shape{1, 4}; + out.Resize(lite::DDim(out_shape)); + + auto x_data = x.mutable_data(); + auto w_data = w.mutable_data(); + auto b_data = b.mutable_data(); + auto out_data = out.mutable_data(); + + for (int64_t i = 0; i < x.dims().production(); i++) { + x_data[i] = static_cast(i); + } + for (int64_t i = 0; i < w.dims().production(); i++) { + w_data[i] = static_cast(i); + } + for (int64_t i = 0; i < b.dims().production(); i++) { + b_data[i] = static_cast(i); + } + + /* lite::x86::math::fc_compute_eigen(x_data, batch_size, 3, // + w_data, 3, 4, // + b_data, ref_data); */ + + // FcCompute fc; + FcCompute fc; + operators::FcParam param; + + param.in_num_col_dims = 1; + param.input = &x; + param.w = &w; + param.bias = &b; + param.output = &out; + param.in_mat_dims = x.dims(); + + // std::unique_ptr ctx(new KernelContext); + // ctx->As(); + fc.SetParam(param); + // fc.SetContext(std::move(ctx)); + fc.Run(); + + VLOG(3) << "output vs ref"; + for (int i = 0; i < out.dims().production(); i++) { + VLOG(3) << out_data[i]; + } + + /* for (int i = 0; i < out.dims().product(); ++i) { + EXPECT_NEAR(out_data[i], ref_data[i], 1e-5); + }*/ +} + +} // namespace x86 +} // namespace kernels +} // namespace lite +} // namespace paddle + +USE_LITE_KERNEL(fc, kX86, kFloat, kNCHW, def); diff --git a/paddle/fluid/lite/kernels/x86/mul_compute.cc b/paddle/fluid/lite/kernels/x86/mul_compute.cc index ad009893c8a7c78c17218d66790d292a5030535c..01dd2171061c44cab6d9cbeb306473eb5349c89e 100644 --- a/paddle/fluid/lite/kernels/x86/mul_compute.cc +++ b/paddle/fluid/lite/kernels/x86/mul_compute.cc @@ -12,122 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/fluid/lite/core/kernel.h" -#include "paddle/fluid/lite/core/op_registry.h" -#include "paddle/fluid/lite/core/types.h" -#include "paddle/fluid/operators/math/blas.h" - -namespace paddle { -namespace lite { -namespace kernels { -namespace x86 { - -using Tensor = framework::Tensor; - -template -class MulCompute : public KernelLite { - public: - using param_t = operators::MulParam; - - void Run() override { - auto& context = ctx_->As(); - auto& param = *param_.get_mutable(); - CHECK(context.x86_device_context()); - - param.output->template mutable_data(); - - auto* x = ¶m.x->raw_tensor(); - auto* y = ¶m.y->raw_tensor(); - - const Tensor x_matrix = x->dims().size() > 2 ? framework::ReshapeToMatrix( - *x, param.x_num_col_dims) - : *x; - const Tensor y_matrix = y->dims().size() > 2 ? framework::ReshapeToMatrix( - *y, param.y_num_col_dims) - : *y; - - auto* z = ¶m.output->raw_tensor(); - auto z_dim = z->dims(); - if (z_dim.size() != 2) { - z->Resize({x_matrix.dims()[0], y_matrix.dims()[1]}); - } - - auto blas = paddle::operators::math::GetBlas( - *context.x86_device_context()); - - blas.MatMul(x_matrix, y_matrix, z); - if (z_dim.size() != 2) { - z->Resize(z_dim); - } - } - - virtual ~MulCompute() = default; -}; - -template -class MulGradCompute : public KernelLite { - public: - void Run() override { - auto& context = ctx_->As(); - auto& param = *param_.get_mutable(); - CHECK(context.x86_device_context()); - - auto* x = ¶m.x->raw_tensor(); - auto* y = ¶m.y->raw_tensor(); - auto x_matrix = x->dims().size() > 2 - ? framework::ReshapeToMatrix(*x, param.x_num_col_dims) - : static_cast(*x); - auto y_matrix = y->dims().size() > 2 - ? framework::ReshapeToMatrix(*y, param.y_num_col_dims) - : static_cast(*y); - auto* dout = ¶m.output_grad->raw_tensor(); - - Tensor dout_mat; - dout_mat.ShareDataWith(*dout); - dout_mat.Resize( - {framework::flatten_to_2d(x->dims(), param.x_num_col_dims)[0], - framework::flatten_to_2d(y->dims(), param.y_num_col_dims)[1]}); - - auto* dx = ¶m.x_grad->raw_tensor(); - auto* dy = ¶m.y_grad->raw_tensor(); - - if (dx != nullptr) { - dx->set_lod(x->lod()); - } - if (dy != nullptr) { - dy->set_lod(y->lod()); - } - - auto blas = paddle::operators::math::GetBlas( - *context.x86_device_context()); - if (dx) { - // dx->mutable_data(context.x86_device_context->GetPlace()); - param.x_grad->template mutable_data(); - Tensor dx_matrix = dx->dims().size() > 2 ? framework::ReshapeToMatrix( - *dx, param.x_num_col_dims) - : *dx; - - // dx = dout * y'. dx: M x K, dout : M x N, y : K x N - blas.MatMul(dout_mat, false, y_matrix, true, &dx_matrix); - } - if (dy) { - // dy->yutable_data(context.x86_device_context->GetPlace()); - param.y_grad->template mutable_data(); - Tensor dy_matrix = dy->dims().size() > 2 ? framework::ReshapeToMatrix( - *dy, param.y_num_col_dims) - : *dy; - // dy = x' * dout. dy K x N, dout : M x N, x : M x K - blas.MatMul(x_matrix, true, dout_mat, false, &dy_matrix); - } - } - - virtual ~MulGradCompute() = default; -}; - -} // namespace x86 -} // namespace kernels -} // namespace lite -} // namespace paddle +#include "paddle/fluid/lite/kernels/x86/mul_compute.h" REGISTER_LITE_KERNEL(mul, kX86, kFloat, kNCHW, paddle::lite::kernels::x86::MulCompute, def) diff --git a/paddle/fluid/lite/kernels/x86/mul_compute.h b/paddle/fluid/lite/kernels/x86/mul_compute.h new file mode 100644 index 0000000000000000000000000000000000000000..0f95fea934a26fee17dd52bf9746b96828af1948 --- /dev/null +++ b/paddle/fluid/lite/kernels/x86/mul_compute.h @@ -0,0 +1,149 @@ +// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +#pragma once + +#include "paddle/fluid/lite/core/kernel.h" +#include "paddle/fluid/lite/core/op_registry.h" +#include "paddle/fluid/lite/core/types.h" +#include "paddle/fluid/operators/math/blas.h" + +namespace paddle { +namespace lite { +namespace kernels { +namespace x86 { + +using Tensor = framework::Tensor; + +template +class MulCompute : public KernelLite { + public: + using param_t = operators::MulParam; + + void Run() override { + auto& context = ctx_->As(); + auto& param = *param_.get_mutable(); + CHECK(context.x86_device_context()); + + param.output->template mutable_data(); + + auto* x = ¶m.x->raw_tensor(); + auto* y = ¶m.y->raw_tensor(); + + Tensor x_matrix, y_matrix; + + if (x->dims().size() > 2) { + x_matrix = framework::ReshapeToMatrix(*x, param.x_num_col_dims); + } else { + x_matrix = *x; + } + + if (y->dims().size() > 2) { + y_matrix = framework::ReshapeToMatrix(*y, param.y_num_col_dims); + + } else { + y_matrix = *y; + } + + auto* z = ¶m.output->raw_tensor(); + auto z_dim = z->dims(); + if (z_dim.size() != 2) { + z->Resize({x_matrix.dims()[0], y_matrix.dims()[1]}); + } + + auto blas = paddle::operators::math::GetBlas( + *context.x86_device_context()); + + blas.MatMul(x_matrix, y_matrix, z); + if (z_dim.size() != 2) { + z->Resize(z_dim); + } + } + + virtual ~MulCompute() = default; +}; + +template +class MulGradCompute : public KernelLite { + public: + void Run() override { + auto& context = ctx_->As(); + auto& param = *param_.get_mutable(); + CHECK(context.x86_device_context()); + + auto* x = ¶m.x->raw_tensor(); + auto* y = ¶m.y->raw_tensor(); + + Tensor x_matrix, y_matrix; + + if (x->dims().size() > 2) { + x_matrix = framework::ReshapeToMatrix(*x, param.x_num_col_dims); + } else { + x_matrix = *x; + } + + if (y->dims().size() > 2) { + y_matrix = framework::ReshapeToMatrix(*y, param.y_num_col_dims); + + } else { + y_matrix = *y; + } + + auto* dout = ¶m.output_grad->raw_tensor(); + + Tensor dout_mat; + dout_mat.ShareDataWith(*dout); + dout_mat.Resize( + {framework::flatten_to_2d(x->dims(), param.x_num_col_dims)[0], + framework::flatten_to_2d(y->dims(), param.y_num_col_dims)[1]}); + + auto* dx = ¶m.x_grad->raw_tensor(); + auto* dy = ¶m.y_grad->raw_tensor(); + + if (dx != nullptr) { + dx->set_lod(x->lod()); + } + if (dy != nullptr) { + dy->set_lod(y->lod()); + } + + auto blas = paddle::operators::math::GetBlas( + *context.x86_device_context()); + if (dx) { + // dx->mutable_data(context.x86_device_context->GetPlace()); + param.x_grad->template mutable_data(); + Tensor dx_matrix = dx->dims().size() > 2 ? framework::ReshapeToMatrix( + *dx, param.x_num_col_dims) + : *dx; + + // dx = dout * y'. dx: M x K, dout : M x N, y : K x N + blas.MatMul(dout_mat, false, y_matrix, true, &dx_matrix); + } + if (dy) { + // dy->yutable_data(context.x86_device_context->GetPlace()); + param.y_grad->template mutable_data(); + Tensor dy_matrix = dy->dims().size() > 2 ? framework::ReshapeToMatrix( + *dy, param.y_num_col_dims) + : *dy; + // dy = x' * dout. dy K x N, dout : M x N, x : M x K + blas.MatMul(x_matrix, true, dout_mat, false, &dy_matrix); + } + } + + virtual ~MulGradCompute() = default; +}; + +} // namespace x86 +} // namespace kernels +} // namespace lite +} // namespace paddle diff --git a/paddle/fluid/lite/kernels/x86/mul_compute_test.cc b/paddle/fluid/lite/kernels/x86/mul_compute_test.cc new file mode 100644 index 0000000000000000000000000000000000000000..c551754328ebe005aeaadf06846d82b48da511e6 --- /dev/null +++ b/paddle/fluid/lite/kernels/x86/mul_compute_test.cc @@ -0,0 +1,86 @@ +// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/lite/kernels/x86/mul_compute.h" +#include +#include +#include +#include +#include +#include "paddle/fluid/lite/core/op_registry.h" + +namespace paddle { +namespace lite { +namespace kernels { +namespace x86 { + +TEST(mul_x86, retrive_op) { + auto mul = + KernelRegistry::Global().Create("mul"); + ASSERT_FALSE(mul.empty()); + ASSERT_TRUE(mul.front()); +} + +TEST(mul_x86, init) { + MulCompute mul; + ASSERT_EQ(mul.precision(), PRECISION(kFloat)); + ASSERT_EQ(mul.target(), TARGET(kX86)); +} + +TEST(mul_x86, run_test) { + lite::Tensor x, y, out; + constexpr int batch_size = 1; + std::vector x_shape{batch_size, 3}; + x.Resize(lite::DDim(x_shape)); + std::vector y_shape{3, 4}; + y.Resize(lite::DDim(y_shape)); + std::vector out_shape{batch_size, 4}; + out.Resize(lite::DDim(out_shape)); + + auto x_data = x.mutable_data(); + auto y_data = y.mutable_data(); + auto out_data = out.mutable_data(); + + for (int64_t i = 0; i < x.dims().production(); i++) { + x_data[i] = static_cast(i); + } + for (int64_t i = 0; i < y.dims().production(); i++) { + y_data[i] = static_cast(i); + } + // MulCompute mul; + MulCompute mul; + operators::MulParam param; + + param.x = &x; + param.y = &y; + param.output = &out; + + std::unique_ptr ctx(new KernelContext); + ctx->As(); + mul.SetContext(std::move(ctx)); + mul.SetParam(param); + mul.Run(); + + LOG(INFO) << "output: "; + for (int i = 0; i < out.dims().production(); i++) { + LOG(INFO) << out_data[i]; + } +} + +} // namespace x86 +} // namespace kernels +} // namespace lite +} // namespace paddle + +USE_LITE_KERNEL(mul, kX86, kFloat, kNCHW, def); diff --git a/paddle/fluid/lite/kernels/x86/pool_compute.cc b/paddle/fluid/lite/kernels/x86/pool_compute.cc index 745c2a787899070de9ab50601b7147c074b3d1c2..ee1bb9dbd5d57a82df6dfdda8997a39d1555d01b 100644 --- a/paddle/fluid/lite/kernels/x86/pool_compute.cc +++ b/paddle/fluid/lite/kernels/x86/pool_compute.cc @@ -12,69 +12,10 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include -#include "paddle/fluid/framework/eigen.h" -#include "paddle/fluid/lite/core/kernel.h" -#include "paddle/fluid/lite/core/op_registry.h" -#include "paddle/fluid/lite/core/types.h" -#include "paddle/fluid/operators/math/math_function.h" -#include "paddle/fluid/operators/math/pooling.h" - -namespace paddle { -namespace lite { -namespace kernels { -namespace x86 { - -template -class PoolCompute : public KernelLite { - public: - using param_t = operators::PoolParam; - void Run() override { - auto& param = *param_.get_mutable(); - if (param.global_pooling) { - for (size_t i = 0; i < param.ksize.size(); ++i) { - param.paddings[i] = 0; - param.ksize[i] = static_cast(param.x->dims()[i + 2]); - } - } - switch (param.ksize.size()) { - case 2: { - if (param.pooling_type == "max") { - paddle::operators::math::Pool2dFunctor< - platform::CPUDeviceContext, paddle::operators::math::MaxPool, - T> - pool2d_forward; - paddle::operators::math::MaxPool pool_process; - pool2d_forward(platform::CPUDeviceContext(), param.x->raw_tensor(), - param.ksize, param.strides, param.paddings, - pool_process, true, false, - &(param.output->raw_tensor())); - } else if (param.pooling_type == "avg") { - paddle::operators::math::Pool2dFunctor< - platform::CPUDeviceContext, paddle::operators::math::AvgPool, - T> - pool2d_forward; - paddle::operators::math::AvgPool pool_process; - pool2d_forward(platform::CPUDeviceContext(), param.x->raw_tensor(), - param.ksize, param.strides, param.paddings, - pool_process, param.exclusive, param.adaptive, - &(param.output->raw_tensor())); - } - } break; - case 3: { - } break; - } - } - virtual ~PoolCompute() = default; -}; - -} // namespace x86 -} // namespace kernels -} // namespace lite -} // namespace paddle +#include "paddle/fluid/lite/kernels/x86/pool_compute.h" REGISTER_LITE_KERNEL(pool2d, kX86, kFloat, kNCHW, paddle::lite::kernels::x86::PoolCompute, def) - .BindInput("X", {LiteType::GetTensorTy(TARGET(kX86))}) + .BindInput("x", {LiteType::GetTensorTy(TARGET(kX86))}) .BindOutput("Out", {LiteType::GetTensorTy(TARGET(kX86))}) .Finalize(); diff --git a/paddle/fluid/lite/kernels/x86/pool_compute.h b/paddle/fluid/lite/kernels/x86/pool_compute.h new file mode 100644 index 0000000000000000000000000000000000000000..d024c5b84e38ee5791982c7f49348cb05c8d41ca --- /dev/null +++ b/paddle/fluid/lite/kernels/x86/pool_compute.h @@ -0,0 +1,75 @@ +// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +#pragma once + +#include +#include "paddle/fluid/framework/eigen.h" +#include "paddle/fluid/lite/core/kernel.h" +#include "paddle/fluid/lite/core/op_registry.h" +#include "paddle/fluid/lite/core/types.h" +#include "paddle/fluid/operators/math/math_function.h" +#include "paddle/fluid/operators/math/pooling.h" + +namespace paddle { +namespace lite { +namespace kernels { +namespace x86 { + +template +class PoolCompute : public KernelLite { + public: + using param_t = operators::PoolParam; + void Run() override { + auto& param = *param_.get_mutable(); + if (param.global_pooling) { + for (size_t i = 0; i < param.ksize.size(); ++i) { + param.paddings[i] = 0; + param.ksize[i] = static_cast(param.x->dims()[i + 2]); + } + } + switch (param.ksize.size()) { + case 2: { + if (param.pooling_type == "max") { + paddle::operators::math::Pool2dFunctor< + platform::CPUDeviceContext, paddle::operators::math::MaxPool, + T> + pool2d_forward; + paddle::operators::math::MaxPool pool_process; + pool2d_forward(platform::CPUDeviceContext(), param.x->raw_tensor(), + param.ksize, param.strides, param.paddings, + pool_process, true, false, + &(param.output->raw_tensor())); + } else if (param.pooling_type == "avg") { + paddle::operators::math::Pool2dFunctor< + platform::CPUDeviceContext, paddle::operators::math::AvgPool, + T> + pool2d_forward; + paddle::operators::math::AvgPool pool_process; + pool2d_forward(platform::CPUDeviceContext(), param.x->raw_tensor(), + param.ksize, param.strides, param.paddings, + pool_process, param.exclusive, param.adaptive, + &(param.output->raw_tensor())); + } + } break; + case 3: { + } break; + } + } + virtual ~PoolCompute() = default; +}; + +} // namespace x86 +} // namespace kernels +} // namespace lite +} // namespace paddle diff --git a/paddle/fluid/lite/kernels/x86/pool_compute_test.cc b/paddle/fluid/lite/kernels/x86/pool_compute_test.cc new file mode 100644 index 0000000000000000000000000000000000000000..b3d833509109b887b22dba60b2e16ba5698f2b45 --- /dev/null +++ b/paddle/fluid/lite/kernels/x86/pool_compute_test.cc @@ -0,0 +1,79 @@ +// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/lite/kernels/x86/pool_compute.h" +#include +#include +#include +#include "paddle/fluid/lite/core/op_registry.h" + +namespace paddle { +namespace lite { +namespace kernels { +namespace x86 { + +TEST(pool_x86, retrive_op) { + auto pool2d = + KernelRegistry::Global().Create( + "pool2d"); + ASSERT_FALSE(pool2d.empty()); + ASSERT_TRUE(pool2d.front()); +} + +TEST(pool2d_x86, init) { + PoolCompute pool2d; + ASSERT_EQ(pool2d.precision(), PRECISION(kFloat)); + ASSERT_EQ(pool2d.target(), TARGET(kX86)); +} + +TEST(pool2d_x86, run_test) { + lite::Tensor x, out; + constexpr int batch_size = 1; + std::vector x_shape{batch_size, 3, 4, 4}; + x.Resize(lite::DDim(x_shape)); + std::vector out_shape{batch_size, 3, 2, 2}; + out.Resize(lite::DDim(out_shape)); + + auto x_data = x.mutable_data(); + auto out_data = out.mutable_data(); + + for (int64_t i = 0; i < x.dims().production(); i++) { + x_data[i] = static_cast(i); + } + + PoolCompute pool2d; + operators::PoolParam param; + + param.x = &x; + param.output = &out; + param.strides = {2, 2}; + param.paddings = {0, 0}; + param.ksize = {2, 2}; + param.pooling_type = "max"; + + pool2d.SetParam(param); + pool2d.Run(); + + LOG(INFO) << "output: "; + for (int i = 0; i < out.dims().production(); i++) { + LOG(INFO) << out_data[i]; + } +} + +} // namespace x86 +} // namespace kernels +} // namespace lite +} // namespace paddle + +USE_LITE_KERNEL(pool2d, kX86, kFloat, kNCHW, def); diff --git a/paddle/fluid/lite/kernels/x86/relu_compute.cc b/paddle/fluid/lite/kernels/x86/relu_compute.cc index 52fffb579816cd70a748d59cb3750ebaaadb10c7..326df35beffc53122fc7af4526e2148ead92bdf9 100644 --- a/paddle/fluid/lite/kernels/x86/relu_compute.cc +++ b/paddle/fluid/lite/kernels/x86/relu_compute.cc @@ -12,42 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include -#include "paddle/fluid/framework/eigen.h" -#include "paddle/fluid/framework/operator.h" -#include "paddle/fluid/lite/core/kernel.h" -#include "paddle/fluid/lite/core/op_lite.h" -#include "paddle/fluid/lite/core/op_registry.h" -#include "paddle/fluid/lite/core/type_system.h" -#include "paddle/fluid/lite/operators/relu_op.h" - -namespace paddle { -namespace lite { -namespace kernels { -namespace x86 { - -template -class ReluCompute : public KernelLite { - public: - using param_t = operators::ReluParam; - - void Run() override { - auto& param = *param_.get_mutable(); - auto n = param.input->dims().production(); - const float* input = param.input->data(); - float* output = param.output->mutable_data(); - for (int i = 0; i < n; i++) { - output[i] = std::max(0.f, input[i]); - } - } - - virtual ~ReluCompute() = default; -}; - -} // namespace x86 -} // namespace kernels -} // namespace lite -} // namespace paddle +#include "paddle/fluid/lite/kernels/x86/relu_compute.h" REGISTER_LITE_KERNEL(relu, kX86, kFloat, kNCHW, paddle::lite::kernels::x86::ReluCompute, def) diff --git a/paddle/fluid/lite/kernels/x86/relu_compute.h b/paddle/fluid/lite/kernels/x86/relu_compute.h new file mode 100644 index 0000000000000000000000000000000000000000..89458fad45e2ee8782039d6a04f499932267991b --- /dev/null +++ b/paddle/fluid/lite/kernels/x86/relu_compute.h @@ -0,0 +1,52 @@ +// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +#pragma once + +#include +#include +#include "paddle/fluid/framework/eigen.h" +#include "paddle/fluid/framework/operator.h" +#include "paddle/fluid/lite/core/kernel.h" +#include "paddle/fluid/lite/core/op_lite.h" +#include "paddle/fluid/lite/core/op_registry.h" +#include "paddle/fluid/lite/core/type_system.h" +#include "paddle/fluid/lite/operators/relu_op.h" + +namespace paddle { +namespace lite { +namespace kernels { +namespace x86 { + +template +class ReluCompute : public KernelLite { + public: + using param_t = operators::ReluParam; + + void Run() override { + auto& param = *param_.get_mutable(); + auto n = param.input->dims().production(); + const float* input = param.input->data(); + float* output = param.output->mutable_data(); + for (int i = 0; i < n; i++) { + output[i] = std::max(0.f, input[i]); + } + } + + virtual ~ReluCompute() = default; +}; + +} // namespace x86 +} // namespace kernels +} // namespace lite +} // namespace paddle diff --git a/paddle/fluid/lite/kernels/x86/relu_compute_test.cc b/paddle/fluid/lite/kernels/x86/relu_compute_test.cc new file mode 100644 index 0000000000000000000000000000000000000000..e868947bbd7383cbb8b0a10d475ff3dbb9a6485f --- /dev/null +++ b/paddle/fluid/lite/kernels/x86/relu_compute_test.cc @@ -0,0 +1,75 @@ +// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/lite/kernels/x86/relu_compute.h" +#include +#include +#include +#include "paddle/fluid/lite/core/op_registry.h" + +namespace paddle { +namespace lite { +namespace kernels { +namespace x86 { + +TEST(relu_x86, retrive_op) { + auto relu = + KernelRegistry::Global().Create("relu"); + ASSERT_FALSE(relu.empty()); + ASSERT_TRUE(relu.front()); +} + +TEST(relu_x86, init) { + ReluCompute relu; + ASSERT_EQ(relu.precision(), PRECISION(kFloat)); + ASSERT_EQ(relu.target(), TARGET(kX86)); +} + +TEST(relu_x86, run_test) { + lite::Tensor x, out; + constexpr int batch_size = 1; + std::vector x_shape{batch_size, 3, 2, 2}; + x.Resize(lite::DDim(x_shape)); + std::vector out_shape{batch_size, 3, 2, 2}; + out.Resize(lite::DDim(out_shape)); + + auto x_data = x.mutable_data(); + auto out_data = out.mutable_data(); + + for (int64_t i = 0; i < x.dims().production(); i++) { + int sign = i % 2 == 0 ? 1 : -1; + x_data[i] = static_cast(i * sign); + } + // ReluCompute relu; + ReluCompute relu; + operators::ReluParam param; + + param.input = &x; + param.output = &out; + + relu.SetParam(param); + relu.Run(); + + LOG(INFO) << "output: "; + for (int i = 0; i < out.dims().production(); i++) { + LOG(INFO) << out_data[i]; + } +} + +} // namespace x86 +} // namespace kernels +} // namespace lite +} // namespace paddle + +USE_LITE_KERNEL(relu, kX86, kFloat, kNCHW, def); diff --git a/paddle/fluid/lite/kernels/x86/scale_compute.cc b/paddle/fluid/lite/kernels/x86/scale_compute.cc index 0135a6f614ef4bee841cf21ce946d82e5d50628a..9a71750cf1ed93f641b74e92cf1590be9dd75377 100644 --- a/paddle/fluid/lite/kernels/x86/scale_compute.cc +++ b/paddle/fluid/lite/kernels/x86/scale_compute.cc @@ -12,48 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include -#include "paddle/fluid/framework/eigen.h" -#include "paddle/fluid/framework/operator.h" -#include "paddle/fluid/lite/core/kernel.h" -#include "paddle/fluid/lite/core/op_lite.h" -#include "paddle/fluid/lite/core/op_registry.h" -#include "paddle/fluid/lite/core/type_system.h" -#include "paddle/fluid/lite/operators/relu_op.h" - -namespace paddle { -namespace lite { -namespace kernels { -namespace x86 { - -template -void scale_compute(const T* x, T* out, int size, float scale, float bias, - bool bias_before) { - if (bias_before) bias *= scale; - for (int i = 0; i < size; i++) { - out[i] = x[i] * scale + bias; - } -} - -template -class ScaleCompute : public KernelLite { - public: - using param_t = operators::ScaleParam; - - void Run() override { - auto& param = *param_.get_mutable(); - scale_compute(param.x->data(), param.output->mutable_data(), - param.x->dims().production(), param.scale, param.bias, - param.bias_after_scale); - } - - virtual ~ScaleCompute() = default; -}; - -} // namespace x86 -} // namespace kernels -} // namespace lite -} // namespace paddle +#include "paddle/fluid/lite/kernels/x86/scale_compute.h" REGISTER_LITE_KERNEL(scale, kX86, kFloat, kNCHW, paddle::lite::kernels::x86::ScaleCompute, def) diff --git a/paddle/fluid/lite/kernels/x86/scale_compute.h b/paddle/fluid/lite/kernels/x86/scale_compute.h new file mode 100644 index 0000000000000000000000000000000000000000..dc54cc07bd81faae19e346a66e1f83edaa39b1e0 --- /dev/null +++ b/paddle/fluid/lite/kernels/x86/scale_compute.h @@ -0,0 +1,57 @@ +// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +#pragma once + +#include +#include "paddle/fluid/framework/eigen.h" +#include "paddle/fluid/framework/operator.h" +#include "paddle/fluid/lite/core/kernel.h" +#include "paddle/fluid/lite/core/op_lite.h" +#include "paddle/fluid/lite/core/op_registry.h" +#include "paddle/fluid/lite/core/type_system.h" +#include "paddle/fluid/lite/operators/relu_op.h" + +namespace paddle { +namespace lite { +namespace kernels { +namespace x86 { + +template +void scale_compute(const T* x, T* out, int size, float scale, float bias, + bool bias_before) { + if (bias_before) bias *= scale; + for (int i = 0; i < size; i++) { + out[i] = x[i] * scale + bias; + } +} + +template +class ScaleCompute : public KernelLite { + public: + using param_t = operators::ScaleParam; + + void Run() override { + auto& param = *param_.get_mutable(); + scale_compute(param.x->data(), param.output->mutable_data(), + param.x->dims().production(), param.scale, param.bias, + param.bias_after_scale); + } + + virtual ~ScaleCompute() = default; +}; + +} // namespace x86 +} // namespace kernels +} // namespace lite +} // namespace paddle diff --git a/paddle/fluid/lite/kernels/x86/scale_compute_test.cc b/paddle/fluid/lite/kernels/x86/scale_compute_test.cc new file mode 100644 index 0000000000000000000000000000000000000000..68d0e67cdf6770be1e09d8e5365e5045cd93c6b5 --- /dev/null +++ b/paddle/fluid/lite/kernels/x86/scale_compute_test.cc @@ -0,0 +1,76 @@ +// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/lite/kernels/x86/scale_compute.h" +#include +#include +#include +#include "paddle/fluid/lite/core/op_registry.h" + +namespace paddle { +namespace lite { +namespace kernels { +namespace x86 { + +TEST(scale_x86, retrive_op) { + auto scale = + KernelRegistry::Global().Create("scale"); + ASSERT_FALSE(scale.empty()); + ASSERT_TRUE(scale.front()); +} + +TEST(scale_x86, init) { + ScaleCompute scale; + ASSERT_EQ(scale.precision(), PRECISION(kFloat)); + ASSERT_EQ(scale.target(), TARGET(kX86)); +} + +TEST(scale_x86, run_test) { + lite::Tensor x, y, out; + constexpr int batch_size = 1; + std::vector x_shape{batch_size, 3, 2, 2}; + x.Resize(lite::DDim(x_shape)); + std::vector out_shape{batch_size, 3, 2, 2}; + out.Resize(lite::DDim(out_shape)); + + auto x_data = x.mutable_data(); + auto out_data = out.mutable_data(); + + for (int64_t i = 0; i < x.dims().production(); i++) { + x_data[i] = static_cast(i); + } + // ScaleCompute scale; + ScaleCompute scale; + operators::ScaleParam param; + + param.x = &x; + param.scale = 0.5; + param.bias = 0; + param.output = &out; + + scale.SetParam(param); + scale.Run(); + + LOG(INFO) << "output: "; + for (int i = 0; i < out.dims().production(); i++) { + LOG(INFO) << out_data[i]; + } +} + +} // namespace x86 +} // namespace kernels +} // namespace lite +} // namespace paddle + +USE_LITE_KERNEL(scale, kX86, kFloat, kNCHW, def); diff --git a/paddle/fluid/lite/kernels/x86/softmax_compute.cc b/paddle/fluid/lite/kernels/x86/softmax_compute.cc index fe408aa3c842396388ceb385802e75bcfeea94d5..5bdb58b6887f5700ba79e9717cf8dc9b67fa07e0 100644 --- a/paddle/fluid/lite/kernels/x86/softmax_compute.cc +++ b/paddle/fluid/lite/kernels/x86/softmax_compute.cc @@ -12,76 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/fluid/operators/math/softmax.h" -#include "paddle/fluid/framework/eigen.h" -#include "paddle/fluid/framework/lod_tensor.h" -#include "paddle/fluid/framework/operator.h" -#include "paddle/fluid/lite/core/kernel.h" -#include "paddle/fluid/lite/core/op_registry.h" -namespace paddle { -namespace lite { -namespace kernels { -namespace x86 { - -static inline int CanonicalAxis(const int axis, const int rank) { - if (axis < 0) { - return axis + rank; - } - return axis; -} - -static inline int SizeToAxis(const int axis, lite::DDim dims) { - int size = 1; - for (int i = 0; i < axis; i++) { - size *= dims[i]; - } - return size; -} - -static inline int SizeFromAxis(const int axis, lite::DDim dims) { - int size = 1; - for (int i = axis; i < dims.size(); i++) { - size *= dims[i]; - } - return size; -} - -template -class SoftmaxCompute : public KernelLite { - public: - using param_t = operators::SoftmaxParam; - - void Run() override { - auto& param = *param_.get_mutable(); - // auto& context = context_->As(); - CHECK(param.output); - CHECK(param.x); - const int rank = param.x->dims().size(); - const int axis = CanonicalAxis(param.axis, rank); - int axis_dim = param.x->dims()[axis]; - const int n = SizeToAxis(axis, param.x->dims()); - const int d = SizeFromAxis(axis, param.x->dims()); - std::vector shape{n, d}; - - lite::Tensor input_2d, out_2d; - input_2d.ShareDataWith(*param.x); - input_2d.Resize(lite::DDim(shape)); - out_2d.ShareDataWith(*param.output); - out_2d.Resize(lite::DDim(shape)); - - paddle::operators::math::SoftmaxFunctor()( - platform::CPUDeviceContext(), axis_dim, &input_2d.raw_tensor(), - &out_2d.raw_tensor()); - } - - virtual ~SoftmaxCompute() = default; -}; - -} // namespace x86 -} // namespace kernels -} // namespace lite -} // namespace paddle +#include "paddle/fluid/lite/kernels/x86/softmax_compute.h" REGISTER_LITE_KERNEL(softmax, kX86, kFloat, kNCHW, paddle::lite::kernels::x86::SoftmaxCompute, def) diff --git a/paddle/fluid/lite/kernels/x86/softmax_compute.h b/paddle/fluid/lite/kernels/x86/softmax_compute.h new file mode 100644 index 0000000000000000000000000000000000000000..984a56965a822cf567e69a2c12523fefbc94a9d2 --- /dev/null +++ b/paddle/fluid/lite/kernels/x86/softmax_compute.h @@ -0,0 +1,86 @@ +// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +#pragma once + +#include +#include "paddle/fluid/framework/eigen.h" +#include "paddle/fluid/framework/lod_tensor.h" +#include "paddle/fluid/framework/operator.h" +#include "paddle/fluid/lite/core/kernel.h" +#include "paddle/fluid/lite/core/op_registry.h" +#include "paddle/fluid/operators/math/softmax.h" +namespace paddle { +namespace lite { +namespace kernels { +namespace x86 { + +static inline int CanonicalAxis(const int axis, const int rank) { + if (axis < 0) { + return axis + rank; + } + return axis; +} + +static inline int SizeToAxis(const int axis, lite::DDim dims) { + int size = 1; + for (int i = 0; i < axis; i++) { + size *= dims[i]; + } + return size; +} + +static inline int SizeFromAxis(const int axis, lite::DDim dims) { + int size = 1; + for (size_t i = axis; i < dims.size(); i++) { + size *= dims[i]; + } + return size; +} + +template +class SoftmaxCompute : public KernelLite { + public: + using param_t = operators::SoftmaxParam; + + void Run() override { + auto& param = *param_.get_mutable(); + // auto& context = context_->As(); + CHECK(param.output); + CHECK(param.x); + const int rank = param.x->dims().size(); + const int axis = CanonicalAxis(param.axis, rank); + int axis_dim = param.x->dims()[axis]; + const int n = SizeToAxis(axis, param.x->dims()); + const int d = SizeFromAxis(axis, param.x->dims()); + std::vector shape{n, d}; + + lite::Tensor input_2d, out_2d; + input_2d.ShareDataWith(*param.x); + input_2d.Resize(lite::DDim(shape)); + out_2d.ShareDataWith(*param.output); + out_2d.Resize(lite::DDim(shape)); + + paddle::operators::math::SoftmaxFunctor()( + platform::CPUDeviceContext(), axis_dim, &input_2d.raw_tensor(), + &out_2d.raw_tensor()); + } + + virtual ~SoftmaxCompute() = default; +}; + +} // namespace x86 +} // namespace kernels +} // namespace lite +} // namespace paddle diff --git a/paddle/fluid/lite/kernels/x86/softmax_compute_test.cc b/paddle/fluid/lite/kernels/x86/softmax_compute_test.cc new file mode 100644 index 0000000000000000000000000000000000000000..daab7e82a5361105f1e40eea8e0418b26e393848 --- /dev/null +++ b/paddle/fluid/lite/kernels/x86/softmax_compute_test.cc @@ -0,0 +1,74 @@ +// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/lite/kernels/x86/softmax_compute.h" +#include +#include +#include +#include "paddle/fluid/lite/core/op_registry.h" + +namespace paddle { +namespace lite { +namespace kernels { +namespace x86 { + +TEST(softmax_x86, retrive_op) { + auto softmax = + KernelRegistry::Global().Create( + "softmax"); + ASSERT_FALSE(softmax.empty()); + ASSERT_TRUE(softmax.front()); +} + +TEST(softmax_x86, init) { + SoftmaxCompute softmax; + ASSERT_EQ(softmax.precision(), PRECISION(kFloat)); + ASSERT_EQ(softmax.target(), TARGET(kX86)); +} + +TEST(softmax_x86, run_test) { + lite::Tensor x, out; + constexpr int batch_size = 1; + std::vector x_shape{batch_size, 3, 3, 3}; + x.Resize(lite::DDim(x_shape)); + std::vector out_shape{batch_size, 3, 3, 3}; + out.Resize(lite::DDim(out_shape)); + + auto x_data = x.mutable_data(); + auto out_data = out.mutable_data(); + + for (int64_t i = 0; i < x.dims().production(); i++) { + x_data[i] = static_cast(i); + } + SoftmaxCompute softmax; + operators::SoftmaxParam param; + + param.x = &x; + param.output = &out; + + softmax.SetParam(param); + softmax.Run(); + + LOG(INFO) << "output: "; + for (int i = 0; i < out.dims().production(); i++) { + LOG(INFO) << out_data[i]; + } +} + +} // namespace x86 +} // namespace kernels +} // namespace lite +} // namespace paddle + +USE_LITE_KERNEL(softmax, kX86, kFloat, kNCHW, def); diff --git a/paddle/fluid/lite/model_parser/CMakeLists.txt b/paddle/fluid/lite/model_parser/CMakeLists.txt index d179e0350ac0edd89912377cc668c6b8888c2638..2690fa0206b1f60b506f7d7b6a76d7abff359fec 100644 --- a/paddle/fluid/lite/model_parser/CMakeLists.txt +++ b/paddle/fluid/lite/model_parser/CMakeLists.txt @@ -1,7 +1,7 @@ #cc_library(runtime_lite SRCS runtime.cc) #TODO(Superjomn) enable it again. -if(NOT LITE_WITH_LIGHT_WEIGHT_FRAMEWORK) +if(NOT LITE_ON_MOBILE) lite_cc_test(test_model_parser_lite SRCS model_parser_test.cc DEPS model_parser_lite framework_proto_lite ARGS --model_dir=${LITE_MODEL_DIR}/lite_naive_model) @@ -11,20 +11,18 @@ if(NOT LITE_WITH_LIGHT_WEIGHT_FRAMEWORK) endif() -cc_library(compatible_pb_lite SRCS compatible_pb.cc DEPS op_desc_lite framework_proto_lite var_desc_lite) +cc_library(compatible_pb_lite SRCS compatible_pb.cc + DEPS op_desc_lite framework_proto_lite var_desc_lite cpp_op_desc_lite) -set(model_parser_deps variable_lite scope_lite ${tensor_lite} scope_lite - target_wrapper_host - compatible_pb_lite - memory_lite - ) -if (LITE_WITH_CUDA) - set(model_parser_deps ${model_parser_deps} target_wrapper_cuda) -endif() -cc_library(model_parser_lite SRCS model_parser.cc DEPS ${model_parser_deps}) +lite_cc_library(model_parser_lite SRCS model_parser.cc DEPS + variable_lite scope_lite ${tensor_lite} scope_lite + target_wrapper_host + compatible_pb_lite + memory_lite + CUDA_DEPS target_wrapper_cuda) lite_cc_test(test_op_desc_lite SRCS op_desc_test.cc DEPS cpp_op_desc_lite op_desc_lite compatible_pb_lite) + add_subdirectory(pb) add_subdirectory(cpp) - diff --git a/paddle/fluid/lite/model_parser/cpp/CMakeLists.txt b/paddle/fluid/lite/model_parser/cpp/CMakeLists.txt index e6e2fc77f00c691176aa5c20c455964bd9bd5e66..71073179991294aadef40d5df6d23662ec41fcfe 100644 --- a/paddle/fluid/lite/model_parser/cpp/CMakeLists.txt +++ b/paddle/fluid/lite/model_parser/cpp/CMakeLists.txt @@ -1,2 +1 @@ cc_library(cpp_op_desc_lite SRCS op_desc.cc DEPS any_lite) - diff --git a/paddle/fluid/lite/model_parser/desc_apis.h b/paddle/fluid/lite/model_parser/desc_apis.h index d28f82a0e730850f6a05b1a1bc749e856fe7afd9..5981b873f7ce9c878f5d2e79b2d4b547f8b00c80 100644 --- a/paddle/fluid/lite/model_parser/desc_apis.h +++ b/paddle/fluid/lite/model_parser/desc_apis.h @@ -14,6 +14,7 @@ #pragma once #include +#include #include #include @@ -79,6 +80,27 @@ class OpDescAPI { /// Get an attribute. template T GetAttr(const std::string& name) const; + + std::string Repr() const { + std::stringstream ss; + ss << Type(); + ss << "("; + for (auto& arg : InputArgumentNames()) { + ss << arg << ":"; + for (auto val : Input(arg)) { + ss << val << " "; + } + } + ss << ") -> ("; + for (auto& arg : OutputArgumentNames()) { + ss << arg << ":"; + for (auto val : Output(arg)) { + ss << val << " "; + } + } + ss << ")"; + return ss.str(); + } }; } // namespace lite diff --git a/paddle/fluid/lite/model_parser/model_parser.cc b/paddle/fluid/lite/model_parser/model_parser.cc index 1b30ca772f872de6fec2b427eee1ad2e96d24576..d69fe4d7f7f61208e8c8a4973dcc648d79ed1cac 100644 --- a/paddle/fluid/lite/model_parser/model_parser.cc +++ b/paddle/fluid/lite/model_parser/model_parser.cc @@ -209,7 +209,7 @@ void TensorToStream(std::ostream &os, const lite::Tensor &tensor) { os.write(out.data(), size); } { // the 3rd field, tensor data - uint64_t size = tensor.data_size(); + uint64_t size = tensor.memory_size(); CHECK_LT(size, std::numeric_limits::max()) << "Index overflow when writing tensor"; diff --git a/paddle/fluid/lite/model_parser/pb/op_desc.h b/paddle/fluid/lite/model_parser/pb/op_desc.h index e8772e162a5e7229d57afe18c435a6fa635a87ec..b64ba5452d63a3cb6e4670880a6aed9ac603ac94 100644 --- a/paddle/fluid/lite/model_parser/pb/op_desc.h +++ b/paddle/fluid/lite/model_parser/pb/op_desc.h @@ -141,6 +141,8 @@ class OpDesc : public OpDescAPI { template T GetAttr(const std::string &name) const; + std::string DebugString() const { return desc_.DebugString(); } + private: std::vector GetArguments( const google::protobuf::RepeatedPtrField diff --git a/paddle/fluid/lite/operators/dropout_op.cc b/paddle/fluid/lite/operators/dropout_op.cc index b5b50dc3d1668712cdbe1af6b809485d9689d588..7c9fb2d0b0ce03739d7058d040348df4841a8f04 100644 --- a/paddle/fluid/lite/operators/dropout_op.cc +++ b/paddle/fluid/lite/operators/dropout_op.cc @@ -52,7 +52,7 @@ class DropoutOpLite : public OpLite { param_.mask = GetMutableVar(scope, Mask); param_.dropout_prob = op_desc.GetAttr("dropout_prob"); - if (op_desc.HasAttr("axis")) { + if (op_desc.HasAttr("is_test")) { param_.is_test = op_desc.GetAttr("is_test"); } param_.fix_seed = op_desc.GetAttr("fix_seed"); diff --git a/paddle/fluid/lite/operators/mul_op.h b/paddle/fluid/lite/operators/mul_op.h index 7aa1581bb2adb6214877b33382d09f32ca5e225c..a01427b1f4c87f0d29d073879c799720ddd987d7 100644 --- a/paddle/fluid/lite/operators/mul_op.h +++ b/paddle/fluid/lite/operators/mul_op.h @@ -38,15 +38,19 @@ class MulOpLite : public OpLite { void AttachKernel(KernelBase *kernel) override { kernel->SetParam(param_); } // TODO(Superjomn) replace framework::OpDesc with a lite one. bool AttachImpl(const cpp::OpDesc &op_desc, lite::Scope *scope) override { + CHECK(!op_desc.Input("X").empty()); + CHECK(!op_desc.Input("Y").empty()); + CHECK(!op_desc.Output("Out").empty()); + auto input = op_desc.Input("X").front(); auto W = op_desc.Input("Y").front(); auto out = op_desc.Output("Out").front(); auto *var = scope->FindVar(input); CHECK(var); - param_.x = var->GetMutable(); + param_.x = &var->Get(); var = scope->FindVar(W); CHECK(var) << "no var called " << W; - param_.y = var->GetMutable(); + param_.y = &var->Get(); var = scope->FindVar(out); CHECK(var) << "no var called " << out; param_.output = var->GetMutable(); diff --git a/paddle/fluid/lite/operators/op_params.h b/paddle/fluid/lite/operators/op_params.h index 0cc1e6b78e99902724857af7b13cf2fd84500243..b50e14a485526369777cbf3b44fd6e6f21e4ae33 100644 --- a/paddle/fluid/lite/operators/op_params.h +++ b/paddle/fluid/lite/operators/op_params.h @@ -67,8 +67,8 @@ struct ReluParam { // For Mul Op struct MulParam { - lite::Tensor* x{}; - lite::Tensor* y{}; + const lite::Tensor* x{}; + const lite::Tensor* y{}; lite::Tensor* output{}; int x_num_col_dims{1}; diff --git a/paddle/fluid/lite/operators/use_ops.h b/paddle/fluid/lite/operators/use_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..8f7599042b5538a9bff248a84c5f3f3980c9500b --- /dev/null +++ b/paddle/fluid/lite/operators/use_ops.h @@ -0,0 +1,36 @@ +// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once +/* + * ATTENTION this header file can only include in .cc file. + */ + +USE_LITE_OP(mul); +USE_LITE_OP(fc); +USE_LITE_OP(relu); +USE_LITE_OP(scale); +USE_LITE_OP(feed); +USE_LITE_OP(fetch); +USE_LITE_OP(io_copy); +USE_LITE_OP(elementwise_add) +USE_LITE_OP(elementwise_sub) +USE_LITE_OP(square) +USE_LITE_OP(softmax) +USE_LITE_OP(dropout) +USE_LITE_OP(concat) +USE_LITE_OP(conv2d) +USE_LITE_OP(depthwise_conv2d) +USE_LITE_OP(pool2d) +USE_LITE_OP(batch_norm) diff --git a/paddle/fluid/lite/tools/build.sh b/paddle/fluid/lite/tools/build.sh index f1c58489ce690b2ad639da48d48170e79835d9e0..70a23ecf691ccb9667509868ea774ddc6b0659a0 100755 --- a/paddle/fluid/lite/tools/build.sh +++ b/paddle/fluid/lite/tools/build.sh @@ -54,21 +54,6 @@ function check_style { fi } -function cmake_arm { - # $1: ARM_TARGET_OS in "android" , "armlinux" - # $2: ARM_TARGET_ARCH_ABI in "armv8", "armv7" ,"armv7hf" - cmake .. \ - -DWITH_GPU=OFF \ - -DWITH_MKL=OFF \ - -DWITH_LITE=ON \ - -DLITE_WITH_CUDA=OFF \ - -DLITE_WITH_X86=OFF \ - -DLITE_WITH_ARM=ON \ - -DLITE_WITH_LIGHT_WEIGHT_FRAMEWORK=ON \ - -DWITH_TESTING=ON \ - -DARM_TARGET_OS=$1 -DARM_TARGET_ARCH_ABI=$2 -} - function build_single { #make $1 -j$(expr $(nproc) - 2) make $1 -j$NUM_CORES_FOR_COMPILE @@ -113,47 +98,141 @@ function test_arm_android { echo "test name: ${test_name}" adb_work_dir="/data/local/tmp" - skip_list="test_model_parser_lite" # add more with space - [[ $skip_list =~ (^|[[:space:]])$test_name($|[[:space:]]) ]] && continue || echo 'skip $test_name' + + skip_list=("test_model_parser_lite" "test_cxx_api_lite") + for skip_name in ${skip_list[@]} ; do + [[ $skip_name =~ (^|[[:space:]])$test_name($|[[:space:]]) ]] && echo "skip $test_name" && return + done + testpath=$(find ./paddle/fluid -name ${test_name}) adb -s emulator-${port} push ${testpath} ${adb_work_dir} adb -s emulator-${port} shell chmod +x "${adb_work_dir}/${test_name}" adb -s emulator-${port} shell "./${adb_work_dir}/${test_name}" } -# Build the code and run lite arm tests. This is executed in the CI system. -function build_test_arm { - # 1. Build goes first +function test_arm_model { + local test_name=$1 + local port=$2 + local model_dir=$3 + + if [[ "${test_name}x" == "x" ]]; then + echo "test_name can not be empty" + exit 1 + fi + if [[ "${port}x" == "x" ]]; then + echo "Port can not be empty" + exit 1 + fi + if [[ "${model_dir}x" == "x" ]]; then + echo "Model dir can not be empty" + exit 1 + fi + + echo "test name: ${test_name}" + adb_work_dir="/data/local/tmp" + + testpath=$(find ./paddle/fluid -name ${test_name}) + adb -s emulator-${port} push ${model_dir} ${adb_work_dir} + adb -s emulator-${port} push ${testpath} ${adb_work_dir} + adb -s emulator-${port} shell chmod +x "${adb_work_dir}/${test_name}" + local adb_model_path="./${adb_work_dir}/`basename ${model_dir}`" + adb -s emulator-${port} shell "./${adb_work_dir}/${test_name} --eval_model_dir=$adb_model_path" + +} + +function cmake_arm { + # $1: ARM_TARGET_OS in "android" , "armlinux" + # $2: ARM_TARGET_ARCH_ABI in "armv8", "armv7" ,"armv7hf" + # $3: ARM_TARGET_LANG in "gcc" "clang" + cmake .. \ + -DWITH_GPU=OFF \ + -DWITH_MKL=OFF \ + -DWITH_LITE=ON \ + -DLITE_WITH_CUDA=OFF \ + -DLITE_WITH_X86=OFF \ + -DLITE_WITH_ARM=ON \ + -DLITE_WITH_LIGHT_WEIGHT_FRAMEWORK=ON \ + -DWITH_TESTING=ON \ + -DARM_TARGET_OS=$1 -DARM_TARGET_ARCH_ABI=$2 -DARM_TARGET_LANG=$3 +} + +# $1: ARM_TARGET_OS in "android" , "armlinux" +# $2: ARM_TARGET_ARCH_ABI in "armv8", "armv7" ,"armv7hf" +# $3: ARM_TARGET_LANG in "gcc" "clang" +function build_arm { + os=$1 + abi=$2 + lang=$3 + cur_dir=$(pwd) - for os in "android" "armlinux" ; do - for abi in "armv8" "armv7" "armv7hf"; do - # TODO(hongming): enable compile armv7 and armv7hf on armlinux - if [[ ${abi} == "armv7hf" ]]; then - echo "armv7hf is not supported on both android and armlinux yet" - continue - fi - - # TODO(hongming): enable armv7 on armlinux - if [[ ${os} == "armlinux" && ${abi} == "armv7" ]]; then - echo "armv7 is not supported on armlinux yet" - continue - fi - - if [[ ${os} == "android" && ${abi} == "armv7hf" ]]; then - echo "android do not need armv7hf" - continue - fi - - build_dir=$cur_dir/build.lite.${os}.${abi} - mkdir -p $build_dir - cd $build_dir - - cmake_arm ${os} ${abi} - build $TESTS_FILE - done + if [[ ${os} == "armlinux" ]]; then + # TODO(hongming): enable compile armv7 and armv7hf on armlinux, and clang compile + if [[ ${lang} == "clang" ]]; then + echo "clang is not enabled on armlinux yet" + return 0 + fi + if [[ ${abi} == "armv7hf" ]]; then + echo "armv7hf is not supported on armlinux yet" + return 0 + fi + if [[ ${abi} == "armv7" ]]; then + echo "armv7 is not supported on armlinux yet" + return 0 + fi + fi + + if [[ ${os} == "android" && ${abi} == "armv7hf" ]]; then + echo "android do not need armv7hf" + return 0 + fi + + build_dir=$cur_dir/build.lite.${os}.${abi}.${lang} + mkdir -p $build_dir + cd $build_dir + + cmake_arm ${os} ${abi} ${lang} + build $TESTS_FILE +} + +# $1: ARM_TARGET_OS in "android" , "armlinux" +# $2: ARM_TARGET_ARCH_ABI in "armv8", "armv7" ,"armv7hf" +# $3: ARM_TARGET_LANG in "gcc" "clang" +# $4: android test port +# Note: test must be in build dir +function test_arm { + os=$1 + abi=$2 + lang=$3 + port=$4 + if [[ ${os} == "armlinux" ]]; then + # TODO(hongming): enable test armlinux on armv8, armv7 and armv7hf + echo "Skip test arm linux yet. armlinux must in another docker" + return 0 + fi + + if [[ ${os} == "android" && ${abi} == "armv7hf" ]]; then + echo "android do not need armv7hf" + return 0 + fi + + # TODO(yuanshuai): enable armv7 on android + if [[ ${abi} == "armv7" ]]; then + echo "skip android v7 test yet" + return 0 + fi + + echo "test file: ${TESTS_FILE}" + for _test in $(cat $TESTS_FILE); do + test_arm_android $_test $port done + # TODO(sangoly): refine this + test_arm_model "test_cxx_api_lite" $port "./third_party/install/mobilenet_v2_relu" +} - # 2. Then test +# Build the code and run lite arm tests. This is executed in the CI system. +function build_test_arm { + ######################################################################## + # job 1-4 must be in one runner port_armv8=5554 port_armv7=5556 @@ -167,35 +246,46 @@ function build_test_arm { echo -ne '\n' | ${ANDROID_HOME}/emulator/emulator -avd paddle-armv7 -noaudio -no-window -gpu off -verbose -port ${port_armv7} & sleep 1m - # now can only test android. - for abi in "armv8" "armv7" ; do - # TODO(yuanshuai): enable armv7 on android - if [[ ${abi} == "armv7" ]]; then - continue - fi - - build_dir=$cur_dir/build.lite.android.${abi} - cd $build_dir + # job 1 + build_arm "android" "armv8" "gcc" + test_arm "android" "armv8" "gcc" ${port_armv8} + cd - - local port= - if [[ ${abi} == "armv7" ]]; then - port=${port_armv7} - fi + # job 2 + build_arm "android" "armv8" "clang" + test_arm "android" "armv8" "clang" ${port_armv8} + cd - - if [[ ${abi} == "armv8" ]]; then - port=${port_armv8} - fi - echo "test file: ${TESTS_FILE}" - for _test in $(cat $TESTS_FILE); do - test_arm_android $_test $port - done - done + # job 3 + build_arm "android" "armv7" "gcc" + test_arm "android" "armv7" "gcc" ${port_armv7} + cd - - # armlinux need in another docker - # TODO(hongming): enable test armlinux on armv8, armv7 and armv7hf + # job 4 + build_arm "android" "armv7" "clang" + test_arm "android" "armv7" "clang" ${port_armv7} + cd - adb devices | grep emulator | cut -f1 | while read line; do adb -s $line emu kill; done echo "Done" + ######################################################################## + + # job 5 + build_arm "armlinux" "armv8" + test_arm "armlinux" "armv8" + cd - + + # job 6 + build_arm "armlinux" "armv7" + test_arm "armlinux" "armv7" + cd - + + # job 7 + build_arm "armlinux" "armv7hf" + test_arm "armlinux" "armv7hf" + cd - + + echo "Done" } ############################# MAIN ################################# @@ -236,6 +326,10 @@ function main { ARM_ABI="${i#*=}" shift ;; + --arm_lang=*) + ARM_LANG="${i#*=}" + shift + ;; --arm_port=*) ARM_PORT="${i#*=}" shift @@ -258,13 +352,21 @@ function main { shift ;; cmake_arm) - cmake_arm $ARM_OS $ARM_ABI + cmake_arm $ARM_OS $ARM_ABI $ARM_LANG + shift + ;; + build_arm) + build_arm $ARM_OS $ARM_ABI $ARM_LANG shift ;; test_server) test_lite $TESTS_FILE shift ;; + test_arm) + build_arm $ARM_OS $ARM_ABI $ARM_LANG $ARM_PORT + shift + ;; test_arm_android) test_arm_android $TEST_NAME $ARM_PORT shift diff --git a/paddle/fluid/lite/tools/mobile_readme.md b/paddle/fluid/lite/tools/mobile_readme.md index 1d866cbb8226bf56944a2f084432cd3a16a30475..08bd7b0f5d6728eb5ac0b5734a60befe66bd876b 100644 --- a/paddle/fluid/lite/tools/mobile_readme.md +++ b/paddle/fluid/lite/tools/mobile_readme.md @@ -26,6 +26,7 @@ $ git checkout incubate/lite - "armv8", 等效于 "arm64"。 default值为这个。 - "armv7hf", 等效于使用`eabihf`且`-march=armv7-a -mfloat-abi=hard -mfpu=neon-vfpv4 `。 - "armv7", 等效于使用`eabi`且`-march=armv7-a -mfloat-abi=softfp -mfpu=neon-vfpv4`。 +- `ARM_TARGET_LANG` 代表目标编译的语言, 默认为gcc,支持 gcc和clang两种。 ### 编译 diff --git a/paddle/fluid/lite/utils/varient.h b/paddle/fluid/lite/utils/varient.h index 2d2a3061108978364cfebfd1c2b4389e008c5115..52bbcffcef980a13591d13d2bfc2bbc17069aaed 100644 --- a/paddle/fluid/lite/utils/varient.h +++ b/paddle/fluid/lite/utils/varient.h @@ -20,6 +20,7 @@ #include #include #include "paddle/fluid/lite/utils/cp_logging.h" +#include "paddle/fluid/lite/utils/string.h" // This is an equivalent implementation of boost::any. We implement this to // avoid including the whole boost library and keep the inference library small. @@ -116,9 +117,9 @@ struct variant { if (type_id == typeid(T).hash_code()) return *reinterpret_cast(&data); else - throw std::invalid_argument("unmatched type"); - // LOG(FATAL) << "unmatched type get, should be " << type_id << " but get " - // << typeid(T).name(); + throw std::invalid_argument( + string_format("unmatched type, store as %d, but want to get %s", + type_id, typeid(T).name())); return *reinterpret_cast(&data); } diff --git a/paddle/fluid/lite/x86/CMakeLists.txt b/paddle/fluid/lite/x86/CMakeLists.txt index 515933e2588844f2795ca676269965db9a9770fd..be772b921b4edc989e3ce25143bb88360fbb10b6 100644 --- a/paddle/fluid/lite/x86/CMakeLists.txt +++ b/paddle/fluid/lite/x86/CMakeLists.txt @@ -3,5 +3,3 @@ if (NOT LITE_WITH_X86) endif() cc_library(target_wrapper_x86 SRCS target_wrapper.cc) - -