From 3baaee9aabfacf10d3e1f93a3c6230ea3198a649 Mon Sep 17 00:00:00 2001 From: mozga-intel Date: Thu, 9 Apr 2020 07:55:11 +0200 Subject: [PATCH] Remove: NGraph engine from PDPD repository (#23545) * Remove the NGraph engine from PDPD repository 1. Each operator was removed from the operator's directory 2. Each test was removed from the unittest directory 3. The parallel executor support was removed from the PDPD 4. The CMake file was removed from the PDPD 5. The NG flags were removed from the repository test=develop * Remove ngraph from: 1. Cmake file 2. Python file test=develop --- CMakeLists.txt | 1 - cmake/external/ngraph.cmake | 66 -- cmake/inference_lib.cmake | 7 - cmake/third_party.cmake | 19 - go/paddle/config.go | 8 - paddle/fluid/framework/CMakeLists.txt | 10 +- paddle/fluid/framework/details/CMakeLists.txt | 7 - .../fluid/framework/details/build_strategy.cc | 23 - paddle/fluid/framework/executor.cc | 22 - paddle/fluid/framework/ir/CMakeLists.txt | 8 - .../framework/ir/ngraph_subgraph_pass.cc | 182 ------ .../fluid/framework/ir/ngraph_subgraph_pass.h | 42 -- .../fluid/framework/ir/subgraph_detector.cc | 7 - paddle/fluid/framework/parallel_executor.cc | 9 - paddle/fluid/framework/unused_var_check.cc | 4 +- .../inference/analysis/ir_pass_manager.cc | 4 - paddle/fluid/inference/api/CMakeLists.txt | 10 +- paddle/fluid/inference/api/analysis_config.cc | 28 - .../inference/api/demo_ci/CMakeLists.txt | 12 +- .../inference/api/paddle_analysis_config.h | 13 - .../inference/api/paddle_pass_builder.cc | 14 - .../fluid/inference/api/paddle_pass_builder.h | 9 - paddle/fluid/inference/capi/paddle_c_api.h | 5 - paddle/fluid/inference/capi/pd_config.cc | 10 - .../tests/api/analyzer_bert_tester.cc | 28 +- .../tests/api/analyzer_capi_gpu_tester.cc | 3 - .../inference/tests/api/config_printer.h | 2 - paddle/fluid/operators/CMakeLists.txt | 1 - paddle/fluid/operators/ngraph/CMakeLists.txt | 6 - .../fluid/operators/ngraph/ngraph_bridge.cc | 71 --- paddle/fluid/operators/ngraph/ngraph_bridge.h | 51 -- .../fluid/operators/ngraph/ngraph_engine.cc | 601 ------------------ paddle/fluid/operators/ngraph/ngraph_engine.h | 203 ------ .../operators/ngraph/ngraph_engine_op.cc | 51 -- .../fluid/operators/ngraph/ngraph_engine_op.h | 56 -- .../fluid/operators/ngraph/ops/CMakeLists.txt | 8 - .../fluid/operators/ngraph/ops/accuracy_op.h | 70 -- .../operators/ngraph/ops/activation_op.h | 117 ---- paddle/fluid/operators/ngraph/ops/adam_op.h | 84 --- paddle/fluid/operators/ngraph/ops/assign_op.h | 43 -- .../operators/ngraph/ops/batch_norm_op.h | 163 ----- .../operators/ngraph/ops/binary_unary_op.h | 61 -- paddle/fluid/operators/ngraph/ops/cast_op.h | 47 -- paddle/fluid/operators/ngraph/ops/concat_op.h | 53 -- paddle/fluid/operators/ngraph/ops/conv2d_op.h | 242 ------- .../operators/ngraph/ops/cross_entropy_op.h | 244 ------- .../fluid/operators/ngraph/ops/dropout_op.h | 112 ---- .../operators/ngraph/ops/elementwise_add_op.h | 93 --- .../ops/elementwise_binary_prepare_node.h | 78 --- .../operators/ngraph/ops/elementwise_div_op.h | 103 --- .../operators/ngraph/ops/elementwise_mul_op.h | 111 ---- .../operators/ngraph/ops/elementwise_node.h | 75 --- .../ngraph/ops/elementwise_scalar_op.h | 59 -- .../operators/ngraph/ops/fill_constant_op.h | 51 -- .../operators/ngraph/ops/fill_zeros_like_op.h | 45 -- paddle/fluid/operators/ngraph/ops/gather_op.h | 77 --- .../fluid/operators/ngraph/ops/increment_op.h | 49 -- .../operators/ngraph/ops/layer_norm_op.h | 195 ------ .../operators/ngraph/ops/lookup_table_op.h | 109 ---- paddle/fluid/operators/ngraph/ops/lrn_op.h | 54 -- paddle/fluid/operators/ngraph/ops/matmul_op.h | 248 -------- paddle/fluid/operators/ngraph/ops/mean_op.h | 72 --- .../fluid/operators/ngraph/ops/momentum_op.h | 106 --- paddle/fluid/operators/ngraph/ops/mul_op.h | 143 ----- paddle/fluid/operators/ngraph/ops/op_bridge.h | 84 --- paddle/fluid/operators/ngraph/ops/pool2d_op.h | 191 ------ .../operators/ngraph/ops/reduce_sum_op.h | 161 ----- .../fluid/operators/ngraph/ops/reshape_op.h | 112 ---- paddle/fluid/operators/ngraph/ops/scale_op.h | 44 -- paddle/fluid/operators/ngraph/ops/slice_op.h | 121 ---- .../fluid/operators/ngraph/ops/softmax_op.h | 93 --- .../ops/softmax_with_cross_entropy_op.h | 90 --- paddle/fluid/operators/ngraph/ops/stack_op.h | 56 -- paddle/fluid/operators/ngraph/ops/sum_op.h | 59 -- paddle/fluid/operators/ngraph/ops/top_k_op.h | 49 -- .../fluid/operators/ngraph/ops/transpose_op.h | 101 --- paddle/fluid/platform/ngraph_helper.h | 199 ------ .../pybind/global_value_getter_setter.cc | 2 +- paddle/fluid/pybind/inference_api.cc | 3 - paddle/fluid/pybind/pybind.cc | 12 - paddle/scripts/paddle_build.sh | 2 - python/paddle/fluid/__init__.py | 3 - .../fluid/tests/unittests/CMakeLists.txt | 4 - .../tests/unittests/ngraph/CMakeLists.txt | 6 - .../fluid/tests/unittests/ngraph/__init__.py | 13 - .../ngraph/test_accuracy_ngraph_op.py | 22 - .../ngraph/test_activation_ngraph_op.py | 48 -- .../unittests/ngraph/test_adam_ngraph_op.py | 22 - .../unittests/ngraph/test_assign_ngraph_op.py | 22 - .../ngraph/test_batch_norm_ngraph_op.py | 25 - .../unittests/ngraph/test_cast_ngraph_op.py | 22 - .../ngraph/test_compare_ngraph_op.py | 23 - .../unittests/ngraph/test_concat_ngraph_op.py | 22 - .../unittests/ngraph/test_conv2d_ngraph_op.py | 68 -- .../ngraph/test_cross_entropy_ngraph_op.py | 22 - .../ngraph/test_dropout_ngraph_op.py | 22 - .../ngraph/test_elementwise_add_ngraph_op.py | 22 - .../ngraph/test_elementwise_div_ngraph_op.py | 22 - .../ngraph/test_elementwise_max_ngraph_op.py | 22 - .../ngraph/test_elementwise_min_ngraph_op.py | 22 - .../ngraph/test_elementwise_mul_ngraph_op.py | 22 - .../ngraph/test_elementwise_pow_ngraph_op.py | 22 - .../ngraph/test_elementwise_sub_ngraph_op.py | 22 - .../ngraph/test_fill_constant_ngraph_op.py | 48 -- .../ngraph/test_fill_zeros_like_ngraph_op.py | 21 - .../unittests/ngraph/test_gather_ngraph_op.py | 21 - .../ngraph/test_increment_ngraph_op.py | 46 -- .../ngraph/test_layer_norm_ngraph_op.py | 30 - .../ngraph/test_logical_ngraph_op.py | 24 - .../ngraph/test_lookup_table_ngraph_op.py | 21 - .../unittests/ngraph/test_lrn_ngraph_op.py | 30 - .../unittests/ngraph/test_matmul_ngraph_op.py | 22 - .../unittests/ngraph/test_mean_ngraph_op.py | 21 - .../ngraph/test_momentum_ngraph_op.py | 22 - .../unittests/ngraph/test_mul_ngraph_op.py | 22 - .../ngraph/test_parallel_executor_ngraph.py | 87 --- .../unittests/ngraph/test_pool2d_ngraph_op.py | 39 -- .../unittests/ngraph/test_reduce_ngraph_op.py | 37 -- .../ngraph/test_reshape_ngraph_op.py | 23 - .../unittests/ngraph/test_scale_ngraph_op.py | 21 - .../unittests/ngraph/test_slice_ngraph_op.py | 22 - .../ngraph/test_softmax_ngraph_op.py | 21 - ...st_softmax_with_cross_entropy_ngraph_op.py | 21 - .../unittests/ngraph/test_stack_ngraph_op.py | 22 - .../unittests/ngraph/test_sum_ngraph_op.py | 21 - .../unittests/ngraph/test_top_k_ngraph_op.py | 21 - .../ngraph/test_transpose_ngraph_op.py | 22 - .../paddle/fluid/tests/unittests/op_test.py | 27 +- .../test_eager_deletion_delete_vars.py | 1 - .../test_global_var_getter_setter.py | 1 - python/setup.py.in | 40 +- 131 files changed, 23 insertions(+), 7113 deletions(-) delete mode 100644 cmake/external/ngraph.cmake delete mode 100644 paddle/fluid/framework/ir/ngraph_subgraph_pass.cc delete mode 100644 paddle/fluid/framework/ir/ngraph_subgraph_pass.h delete mode 100644 paddle/fluid/operators/ngraph/CMakeLists.txt delete mode 100644 paddle/fluid/operators/ngraph/ngraph_bridge.cc delete mode 100644 paddle/fluid/operators/ngraph/ngraph_bridge.h delete mode 100644 paddle/fluid/operators/ngraph/ngraph_engine.cc delete mode 100644 paddle/fluid/operators/ngraph/ngraph_engine.h delete mode 100644 paddle/fluid/operators/ngraph/ngraph_engine_op.cc delete mode 100644 paddle/fluid/operators/ngraph/ngraph_engine_op.h delete mode 100644 paddle/fluid/operators/ngraph/ops/CMakeLists.txt delete mode 100644 paddle/fluid/operators/ngraph/ops/accuracy_op.h delete mode 100644 paddle/fluid/operators/ngraph/ops/activation_op.h delete mode 100644 paddle/fluid/operators/ngraph/ops/adam_op.h delete mode 100644 paddle/fluid/operators/ngraph/ops/assign_op.h delete mode 100644 paddle/fluid/operators/ngraph/ops/batch_norm_op.h delete mode 100644 paddle/fluid/operators/ngraph/ops/binary_unary_op.h delete mode 100644 paddle/fluid/operators/ngraph/ops/cast_op.h delete mode 100644 paddle/fluid/operators/ngraph/ops/concat_op.h delete mode 100644 paddle/fluid/operators/ngraph/ops/conv2d_op.h delete mode 100644 paddle/fluid/operators/ngraph/ops/cross_entropy_op.h delete mode 100644 paddle/fluid/operators/ngraph/ops/dropout_op.h delete mode 100644 paddle/fluid/operators/ngraph/ops/elementwise_add_op.h delete mode 100644 paddle/fluid/operators/ngraph/ops/elementwise_binary_prepare_node.h delete mode 100644 paddle/fluid/operators/ngraph/ops/elementwise_div_op.h delete mode 100644 paddle/fluid/operators/ngraph/ops/elementwise_mul_op.h delete mode 100644 paddle/fluid/operators/ngraph/ops/elementwise_node.h delete mode 100644 paddle/fluid/operators/ngraph/ops/elementwise_scalar_op.h delete mode 100644 paddle/fluid/operators/ngraph/ops/fill_constant_op.h delete mode 100644 paddle/fluid/operators/ngraph/ops/fill_zeros_like_op.h delete mode 100644 paddle/fluid/operators/ngraph/ops/gather_op.h delete mode 100644 paddle/fluid/operators/ngraph/ops/increment_op.h delete mode 100644 paddle/fluid/operators/ngraph/ops/layer_norm_op.h delete mode 100644 paddle/fluid/operators/ngraph/ops/lookup_table_op.h delete mode 100644 paddle/fluid/operators/ngraph/ops/lrn_op.h delete mode 100644 paddle/fluid/operators/ngraph/ops/matmul_op.h delete mode 100644 paddle/fluid/operators/ngraph/ops/mean_op.h delete mode 100644 paddle/fluid/operators/ngraph/ops/momentum_op.h delete mode 100644 paddle/fluid/operators/ngraph/ops/mul_op.h delete mode 100644 paddle/fluid/operators/ngraph/ops/op_bridge.h delete mode 100644 paddle/fluid/operators/ngraph/ops/pool2d_op.h delete mode 100644 paddle/fluid/operators/ngraph/ops/reduce_sum_op.h delete mode 100644 paddle/fluid/operators/ngraph/ops/reshape_op.h delete mode 100644 paddle/fluid/operators/ngraph/ops/scale_op.h delete mode 100644 paddle/fluid/operators/ngraph/ops/slice_op.h delete mode 100644 paddle/fluid/operators/ngraph/ops/softmax_op.h delete mode 100644 paddle/fluid/operators/ngraph/ops/softmax_with_cross_entropy_op.h delete mode 100644 paddle/fluid/operators/ngraph/ops/stack_op.h delete mode 100644 paddle/fluid/operators/ngraph/ops/sum_op.h delete mode 100644 paddle/fluid/operators/ngraph/ops/top_k_op.h delete mode 100644 paddle/fluid/operators/ngraph/ops/transpose_op.h delete mode 100644 paddle/fluid/platform/ngraph_helper.h delete mode 100644 python/paddle/fluid/tests/unittests/ngraph/CMakeLists.txt delete mode 100644 python/paddle/fluid/tests/unittests/ngraph/__init__.py delete mode 100644 python/paddle/fluid/tests/unittests/ngraph/test_accuracy_ngraph_op.py delete mode 100644 python/paddle/fluid/tests/unittests/ngraph/test_activation_ngraph_op.py delete mode 100644 python/paddle/fluid/tests/unittests/ngraph/test_adam_ngraph_op.py delete mode 100644 python/paddle/fluid/tests/unittests/ngraph/test_assign_ngraph_op.py delete mode 100644 python/paddle/fluid/tests/unittests/ngraph/test_batch_norm_ngraph_op.py delete mode 100644 python/paddle/fluid/tests/unittests/ngraph/test_cast_ngraph_op.py delete mode 100644 python/paddle/fluid/tests/unittests/ngraph/test_compare_ngraph_op.py delete mode 100644 python/paddle/fluid/tests/unittests/ngraph/test_concat_ngraph_op.py delete mode 100644 python/paddle/fluid/tests/unittests/ngraph/test_conv2d_ngraph_op.py delete mode 100644 python/paddle/fluid/tests/unittests/ngraph/test_cross_entropy_ngraph_op.py delete mode 100644 python/paddle/fluid/tests/unittests/ngraph/test_dropout_ngraph_op.py delete mode 100644 python/paddle/fluid/tests/unittests/ngraph/test_elementwise_add_ngraph_op.py delete mode 100644 python/paddle/fluid/tests/unittests/ngraph/test_elementwise_div_ngraph_op.py delete mode 100644 python/paddle/fluid/tests/unittests/ngraph/test_elementwise_max_ngraph_op.py delete mode 100644 python/paddle/fluid/tests/unittests/ngraph/test_elementwise_min_ngraph_op.py delete mode 100644 python/paddle/fluid/tests/unittests/ngraph/test_elementwise_mul_ngraph_op.py delete mode 100644 python/paddle/fluid/tests/unittests/ngraph/test_elementwise_pow_ngraph_op.py delete mode 100644 python/paddle/fluid/tests/unittests/ngraph/test_elementwise_sub_ngraph_op.py delete mode 100644 python/paddle/fluid/tests/unittests/ngraph/test_fill_constant_ngraph_op.py delete mode 100644 python/paddle/fluid/tests/unittests/ngraph/test_fill_zeros_like_ngraph_op.py delete mode 100644 python/paddle/fluid/tests/unittests/ngraph/test_gather_ngraph_op.py delete mode 100644 python/paddle/fluid/tests/unittests/ngraph/test_increment_ngraph_op.py delete mode 100644 python/paddle/fluid/tests/unittests/ngraph/test_layer_norm_ngraph_op.py delete mode 100644 python/paddle/fluid/tests/unittests/ngraph/test_logical_ngraph_op.py delete mode 100644 python/paddle/fluid/tests/unittests/ngraph/test_lookup_table_ngraph_op.py delete mode 100644 python/paddle/fluid/tests/unittests/ngraph/test_lrn_ngraph_op.py delete mode 100644 python/paddle/fluid/tests/unittests/ngraph/test_matmul_ngraph_op.py delete mode 100644 python/paddle/fluid/tests/unittests/ngraph/test_mean_ngraph_op.py delete mode 100644 python/paddle/fluid/tests/unittests/ngraph/test_momentum_ngraph_op.py delete mode 100644 python/paddle/fluid/tests/unittests/ngraph/test_mul_ngraph_op.py delete mode 100644 python/paddle/fluid/tests/unittests/ngraph/test_parallel_executor_ngraph.py delete mode 100644 python/paddle/fluid/tests/unittests/ngraph/test_pool2d_ngraph_op.py delete mode 100644 python/paddle/fluid/tests/unittests/ngraph/test_reduce_ngraph_op.py delete mode 100644 python/paddle/fluid/tests/unittests/ngraph/test_reshape_ngraph_op.py delete mode 100644 python/paddle/fluid/tests/unittests/ngraph/test_scale_ngraph_op.py delete mode 100644 python/paddle/fluid/tests/unittests/ngraph/test_slice_ngraph_op.py delete mode 100644 python/paddle/fluid/tests/unittests/ngraph/test_softmax_ngraph_op.py delete mode 100644 python/paddle/fluid/tests/unittests/ngraph/test_softmax_with_cross_entropy_ngraph_op.py delete mode 100644 python/paddle/fluid/tests/unittests/ngraph/test_stack_ngraph_op.py delete mode 100644 python/paddle/fluid/tests/unittests/ngraph/test_sum_ngraph_op.py delete mode 100644 python/paddle/fluid/tests/unittests/ngraph/test_top_k_ngraph_op.py delete mode 100644 python/paddle/fluid/tests/unittests/ngraph/test_transpose_ngraph_op.py diff --git a/CMakeLists.txt b/CMakeLists.txt index 87fc787b74..03da6ebd36 100755 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -72,7 +72,6 @@ option(ON_INFER "Turn on inference optimization and inference-lib genera ################################ Internal Configurations ####################################### option(WITH_AMD_GPU "Compile PaddlePaddle with AMD GPU" OFF) option(WITH_NV_JETSON "Compile PaddlePaddle with NV JETSON" OFF) -option(WITH_NGRAPH "Compile PaddlePaddle with nGraph support." OFF) option(WITH_PROFILER "Compile PaddlePaddle with GPU profiler and gperftools" OFF) option(WITH_COVERAGE "Compile PaddlePaddle with code coverage" OFF) OPTION(WITH_LIBXSMM "Compile with libxsmm" OFF) diff --git a/cmake/external/ngraph.cmake b/cmake/external/ngraph.cmake deleted file mode 100644 index 1ab052254b..0000000000 --- a/cmake/external/ngraph.cmake +++ /dev/null @@ -1,66 +0,0 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -INCLUDE(GNUInstallDirs) - -INCLUDE(ExternalProject) - -SET(NGRAPH_PROJECT "extern_ngraph") -SET(NGRAPH_GIT_TAG "972dd2f5ecfa18e3819b17c47698fae9795b499f") -SET(NGRAPH_SOURCES_DIR ${THIRD_PARTY_PATH}/ngraph) -SET(NGRAPH_INSTALL_DIR ${THIRD_PARTY_PATH}/install/ngraph) -SET(NGRAPH_INC_DIR ${NGRAPH_INSTALL_DIR}/include) -SET(NGRAPH_LIB_DIR ${NGRAPH_INSTALL_DIR}/${CMAKE_INSTALL_LIBDIR}) -SET(NGRAPH_SHARED_LIB_NAME libngraph.so) -SET(NGRAPH_CPU_LIB_NAME libcpu_backend.so) -if(CMAKE_BUILD_TYPE STREQUAL "Debug") - SET(NGRAPH_TBB_LIB_NAME libtbb_debug.so.2) -else() - SET(NGRAPH_TBB_LIB_NAME libtbb.so.2) -endif() -SET(NGRAPH_GIT_REPO "https://github.com/NervanaSystems/ngraph.git") -SET(NGRAPH_SHARED_LIB ${NGRAPH_LIB_DIR}/${NGRAPH_SHARED_LIB_NAME}) -SET(NGRAPH_CPU_LIB ${NGRAPH_LIB_DIR}/${NGRAPH_CPU_LIB_NAME}) -SET(NGRAPH_TBB_LIB ${NGRAPH_LIB_DIR}/${NGRAPH_TBB_LIB_NAME}) - -ExternalProject_Add( - ${NGRAPH_PROJECT} - ${EXTERNAL_PROJECT_LOG_ARGS} - ${SHALLOW_CLONE} - DEPENDS ${MKLDNN_PROJECT} ${MKLML_PROJECT} - GIT_REPOSITORY ${NGRAPH_GIT_REPO} - GIT_TAG ${NGRAPH_GIT_TAG} - PREFIX ${NGRAPH_SOURCES_DIR} - UPDATE_COMMAND "" - CMAKE_GENERATOR ${CMAKE_GENERATOR} - CMAKE_GENERATOR_PLATFORM ${CMAKE_GENERATOR_PLATFORM} - CMAKE_GENERATOR_TOOLSET ${CMAKE_GENERATOR_TOOLSET} - CMAKE_ARGS -DCMAKE_C_COMPILER=${CMAKE_C_COMPILER} - CMAKE_ARGS -DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER} - CMAKE_ARGS -DCMAKE_INSTALL_PREFIX=${NGRAPH_INSTALL_DIR} - CMAKE_ARGS -DNGRAPH_UNIT_TEST_ENABLE=FALSE - CMAKE_ARGS -DNGRAPH_TOOLS_ENABLE=FALSE - CMAKE_ARGS -DNGRAPH_INTERPRETER_ENABLE=FALSE - CMAKE_ARGS -DNGRAPH_DEX_ONLY=TRUE - CMAKE_ARGS -DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE} - CMAKE_ARGS -DMKLDNN_INCLUDE_DIR=${MKLDNN_INC_DIR} - CMAKE_ARGS -DMKLDNN_LIB_DIR=${MKLDNN_INSTALL_DIR}/${CMAKE_INSTALL_LIBDIR} - CMAKE_ARGS -DMKLML_LIB_DIR=${MKLML_INSTALL_DIR}/lib -) - -add_library(ngraph INTERFACE) -add_dependencies(ngraph ${NGRAPH_PROJECT}) -target_compile_definitions(ngraph INTERFACE -DPADDLE_WITH_NGRAPH) -target_include_directories(ngraph INTERFACE ${NGRAPH_INC_DIR}) -target_link_libraries(ngraph INTERFACE ${NGRAPH_SHARED_LIB}) diff --git a/cmake/inference_lib.cmake b/cmake/inference_lib.cmake index 87667d1cae..30e96b8e5f 100644 --- a/cmake/inference_lib.cmake +++ b/cmake/inference_lib.cmake @@ -117,13 +117,6 @@ function(copy_part_of_thrid_party TARGET DST) DSTS ${dst_dir} ${dst_dir}/lib) endif () - if (WITH_NGRAPH) - set(dst_dir "${DST}/third_party/install/ngraph") - copy(${TARGET} - SRCS ${NGRAPH_INC_DIR} ${NGRAPH_LIB_DIR} - DSTS ${dst_dir} ${dst_dir}) - endif () - if (LITE_BINARY_DIR) set(dst_dir "${DST}/third_party/install/lite") copy(${TARGET} diff --git a/cmake/third_party.cmake b/cmake/third_party.cmake index 7bfa18d626..3b3a43a69a 100644 --- a/cmake/third_party.cmake +++ b/cmake/third_party.cmake @@ -120,13 +120,6 @@ if(WIN32 OR APPLE) SET(WITH_LIBXSMM OFF CACHE STRING "Disable LIBXSMM in Windows and MacOS" FORCE) endif() - if(WITH_NGRAPH) - MESSAGE(WARNING - "Windows or Mac is not supported with nGraph in Paddle yet." - "Force WITH_NGRAPH=OFF") - SET(WITH_NGRAPH OFF CACHE STRING "Disable nGraph in Windows and MacOS" FORCE) - endif() - if(WITH_BOX_PS) MESSAGE(WARNING "Windows or Mac is not supported with BOX_PS in Paddle yet." @@ -260,18 +253,6 @@ if(WITH_DISTRIBUTE) endif() endif() -if(WITH_NGRAPH) - if(WITH_MKLDNN) - include(external/ngraph) # download, build, install nGraph - list(APPEND third_party_deps extern_ngraph) - else() - MESSAGE(WARNING - "nGraph needs mkl-dnn to be enabled." - "Force WITH_NGRAPH=OFF") - SET(WITH_NGRAPH OFF CACHE STRING "Disable nGraph if mkl-dnn is disabled" FORCE) - endif() -endif() - if(WITH_XBYAK) include(external/xbyak) # download, build, install xbyak list(APPEND third_party_deps extern_xbyak) diff --git a/go/paddle/config.go b/go/paddle/config.go index 05e126114b..cea69e716b 100644 --- a/go/paddle/config.go +++ b/go/paddle/config.go @@ -138,14 +138,6 @@ func (config *AnalysisConfig) SwitchIrDebug(x bool) { C.PD_SwitchIrDebug(config.c, C.bool(x)) } -func (config *AnalysisConfig) EnableNgraph() { - C.PD_EnableNgraph(config.c) -} - -func (config *AnalysisConfig) NgraphEnabled() bool { - return ConvertCBooleanToGo(C.PD_NgraphEnabled(config.c)) -} - func (config *AnalysisConfig) EnableMkldnn() { C.PD_EnableMKLDNN(config.c) } diff --git a/paddle/fluid/framework/CMakeLists.txt b/paddle/fluid/framework/CMakeLists.txt index 24e4d7a122..d6cce25401 100644 --- a/paddle/fluid/framework/CMakeLists.txt +++ b/paddle/fluid/framework/CMakeLists.txt @@ -181,12 +181,6 @@ cc_library(variable_helper SRCS variable_helper.cc DEPS lod_tensor) cc_library(naive_executor SRCS naive_executor.cc DEPS op_registry device_context scope framework_proto glog lod_rank_table feed_fetch_method graph_to_program_pass variable_helper) -if(WITH_NGRAPH) - set(NGRAPH_EXE_DEPS ngraph_engine) -else() - set(NGRAPH_EXE_DEPS) -endif() - cc_library(executor_gc_helper SRCS executor_gc_helper.cc DEPS scope proto_desc operator garbage_collector) if(WITH_DISTRIBUTE) cc_library(executor SRCS executor.cc multi_trainer.cc pipeline_trainer.cc dataset_factory.cc @@ -195,7 +189,7 @@ if(WITH_DISTRIBUTE) pull_dense_worker.cc section_worker.cc device_worker_factory.cc data_set.cc DEPS op_registry device_context scope framework_proto trainer_desc_proto glog fs shell fleet_wrapper box_wrapper lodtensor_printer lod_rank_table feed_fetch_method sendrecvop_rpc communicator collective_helper ${GLOB_DISTRIBUTE_DEPS} - graph_to_program_pass variable_helper data_feed_proto ${NGRAPH_EXE_DEPS} timer) + graph_to_program_pass variable_helper data_feed_proto timer) set(DISTRIBUTE_COMPILE_FLAGS "-Wno-non-virtual-dtor -Wno-error=non-virtual-dtor -Wno-error=delete-non-virtual-dtor") set_source_files_properties(executor.cc PROPERTIES COMPILE_FLAGS ${DISTRIBUTE_COMPILE_FLAGS}) else() @@ -205,7 +199,7 @@ else() pull_dense_worker.cc section_worker.cc device_worker_factory.cc data_set.cc DEPS op_registry device_context scope framework_proto data_feed_proto trainer_desc_proto glog lod_rank_table fs shell fleet_wrapper box_wrapper lodtensor_printer feed_fetch_method - graph_to_program_pass variable_helper ${NGRAPH_EXE_DEPS} timer) + graph_to_program_pass variable_helper timer) cc_test(test_naive_executor SRCS naive_executor_test.cc DEPS naive_executor elementwise_add_op) endif() diff --git a/paddle/fluid/framework/details/CMakeLists.txt b/paddle/fluid/framework/details/CMakeLists.txt index 6db4b6bccb..223a7da7f6 100644 --- a/paddle/fluid/framework/details/CMakeLists.txt +++ b/paddle/fluid/framework/details/CMakeLists.txt @@ -113,15 +113,8 @@ set(IR_PASS_DEPS graph_viz_pass multi_devices_graph_pass if(NOT APPLE AND NOT WIN32 AND WITH_GPU) set(IR_PASS_DEPS ${IR_PASS_DEPS} fusion_group_pass) endif() -if(WITH_NGRAPH) - set(IR_PASS_DEPS ${IR_PASS_DEPS} ngraph) -endif() cc_library(build_strategy SRCS build_strategy.cc DEPS pass_builder ${IR_PASS_DEPS}) if (WITH_MKLDNN) target_link_libraries(build_strategy mkldnn_placement_pass) endif() - -if (WITH_NGRAPH) - target_link_libraries(build_strategy ngraph_subgraph_pass) -endif() diff --git a/paddle/fluid/framework/details/build_strategy.cc b/paddle/fluid/framework/details/build_strategy.cc index eb9accaed8..a7fc6a8955 100644 --- a/paddle/fluid/framework/details/build_strategy.cc +++ b/paddle/fluid/framework/details/build_strategy.cc @@ -27,7 +27,6 @@ limitations under the License. */ #include "paddle/fluid/framework/ir/multi_devices_graph_pass/multi_devices_graph_pass.h" DECLARE_bool(use_mkldnn); -DECLARE_bool(use_ngraph); namespace paddle { namespace framework { @@ -60,8 +59,6 @@ class ParallelExecutorPassBuilder : public ir::PassBuilder { "sequential_execution_pass"); AppendPassWithCheck(strategy_.sync_batch_norm_, "sync_batch_norm_pass"); - AppendPassToUseNgraph("ngraph_subgraph_pass"); - AppendOpFusePasses(); AppendPrintGraphPass("graph_viz_pass", "_fused_graph"); @@ -277,23 +274,6 @@ class ParallelExecutorPassBuilder : public ir::PassBuilder { #endif } - void AppendPassToUseNgraph(const std::string &pass_name) { -#ifdef PADDLE_WITH_NGRAPH - if (FLAGS_use_ngraph) { - if (strategy_.reduce_ != BuildStrategy::ReduceStrategy::kAllReduce) { - LOG(WARNING) << "Currently ngraph_subgraph_pass works under AllReduce," - "please set FLAGS_use_ngraph=false."; - } else { - AppendPass(pass_name); - } - } -#else - PADDLE_ENFORCE_NE(FLAGS_use_ngraph, true, - platform::errors::PreconditionNotMet( - "Please compile with NGRAPH first to use NGRAPH")); -#endif - } - private: BuildStrategy strategy_; }; @@ -451,9 +431,6 @@ USE_PASS(add_reader_dependency_pass); #ifdef PADDLE_WITH_MKLDNN USE_PASS(mkldnn_placement_pass); #endif -#ifdef PADDLE_WITH_NGRAPH -USE_PASS(ngraph_subgraph_pass); -#endif #if defined(PADDLE_WITH_CUDA) && !defined(_WIN32) && !defined(__APPLE__) USE_PASS(fusion_group_pass); #endif diff --git a/paddle/fluid/framework/executor.cc b/paddle/fluid/framework/executor.cc index ebf0aeb354..de15c8be7a 100644 --- a/paddle/fluid/framework/executor.cc +++ b/paddle/fluid/framework/executor.cc @@ -37,13 +37,8 @@ limitations under the License. */ #include "paddle/fluid/platform/place.h" #include "paddle/fluid/platform/profiler.h" -#ifdef PADDLE_WITH_NGRAPH -#include "paddle/fluid/operators/ngraph/ngraph_engine.h" -#endif - DECLARE_bool(benchmark); DEFINE_bool(use_mkldnn, false, "Use MKLDNN to run"); -DEFINE_bool(use_ngraph, false, "Use NGRAPH to run"); namespace paddle { namespace framework { @@ -59,17 +54,6 @@ ExecutorPrepareContext::ExecutorPrepareContext( void ExecutorPrepareContext::PrepareUnusedVars( const std::vector& keep_vars, bool force_disable_gc) { -#ifdef PADDLE_WITH_NGRAPH - if (FLAGS_use_ngraph) { - // FIXME(zjl): There is difference when ngraph and gc are both enabled - // in unittests. I do not know why it happens. Maybe ngraph engine - // would cache some variables? - LOG_FIRST_N(WARNING, 1) - << "FLAGS_use_ngraph=True, garbage collection strategy is " - "disabled in Executor"; - force_disable_gc = true; - } -#endif // If gc is enabled and block size > 1 if (prog_.Size() > 1) { operators::PrepareSafeEagerDeletionOnConditionalOpAndConditionalGradOp( @@ -375,12 +359,6 @@ std::unique_ptr Executor::Prepare( for (auto& op_desc : block.AllOps()) { ctx->ops_.push_back(OpRegistry::CreateOp(*op_desc)); } -#ifdef PADDLE_WITH_NGRAPH - if (FLAGS_use_ngraph && ctx->block_id_ == 0) { - paddle::operators::NgraphEngine::FuseNgraphOps( - ctx->prog_.Block(ctx->block_id_), &ctx->ops_); - } -#endif ctx->PrepareUnusedVars(skip_ref_cnt_vars, force_disable_gc); return ctx; } diff --git a/paddle/fluid/framework/ir/CMakeLists.txt b/paddle/fluid/framework/ir/CMakeLists.txt index de98280f9c..eb1e5aca30 100644 --- a/paddle/fluid/framework/ir/CMakeLists.txt +++ b/paddle/fluid/framework/ir/CMakeLists.txt @@ -98,14 +98,6 @@ if(WITH_MKLDNN) pass_library(cpu_quantize_squash_pass inference DIR mkldnn) endif() -if(WITH_NGRAPH) - cc_library(ngraph_subgraph_pass SRCS ngraph_subgraph_pass.cc DEPS ngraph_bridge - subgraph_detector fuse_pass_base ${op_library_DEPS}) - set(pass_file ${PADDLE_BINARY_DIR}/paddle/fluid/inference/api/paddle_inference_pass.h) - file(APPEND ${pass_file} "USE_PASS(ngraph_subgraph_pass);\n") - set(INFER_IR_PASSES ${INFER_IR_PASSES} ngraph_subgraph_pass CACHE INTERNAL "") -endif() - cc_library(fuse_bn_act_pass SRCS fuse_bn_act_pass.cc DEPS pass graph_pattern_detector ) cc_library(fuse_elewise_add_act_pass SRCS fuse_elewise_add_act_pass.cc DEPS pass graph_pattern_detector ) cc_library(fuse_relu_depthwise_conv_pass SRCS fuse_relu_depthwise_conv_pass.cc DEPS pass graph_pattern_detector ) diff --git a/paddle/fluid/framework/ir/ngraph_subgraph_pass.cc b/paddle/fluid/framework/ir/ngraph_subgraph_pass.cc deleted file mode 100644 index 9778b6215a..0000000000 --- a/paddle/fluid/framework/ir/ngraph_subgraph_pass.cc +++ /dev/null @@ -1,182 +0,0 @@ -// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include -#include -#include -#include - -#include "paddle/fluid/framework/ir/graph_helper.h" -#include "paddle/fluid/framework/ir/graph_pattern_detector.h" -#include "paddle/fluid/framework/ir/ngraph_subgraph_pass.h" -#include "paddle/fluid/framework/ir/subgraph_detector.h" -#include "paddle/fluid/operators/ngraph/ngraph_bridge.h" -#include "paddle/fluid/platform/enforce.h" -#include "paddle/fluid/string/pretty_log.h" - -namespace paddle { -namespace framework { -namespace ir { - -std::string GenerateEngineKey(const std::set &engine_inputs, - const std::set &engine_outputs, - const std::string &size) { - std::string engine_hash_key = ""; - for (auto name : engine_inputs) { - engine_hash_key += name; - } - for (auto name : engine_outputs) { - engine_hash_key += name; - } - engine_hash_key += size; - auto engine_key = std::to_string(std::hash()(engine_hash_key)); - return engine_key; -} - -void NgraphSubgraphPass::ApplyImpl(Graph *graph) const { - PADDLE_ENFORCE_NOT_NULL(graph); - FusePassBase::Init("ngraph_subgraph_pass", graph); - - std::unordered_set nodes2delete; - - auto teller = [](const Node *node) { - if (!node->IsOp() || !node->Op()) return false; - auto op_type = node->Op()->Type(); - return !paddle::operators::NgraphBridge::isRegister(op_type); - }; - - SubGraphFuser fuser(graph, teller, 0, "ngraph_engine"); - fuser(); - - for (auto *node : graph->Nodes()) { - if (node->IsOp() && !Agent(node).subgraph()->empty()) { - OpDesc *op_desc = node->Op(); - op_desc->SetType("ngraph_engine"); - - CreateNgraphEngineOp(node, graph); - - std::unordered_set nodes2remove( - Agent(node).subgraph()->begin(), Agent(node).subgraph()->end()); - - GraphSafeRemoveNodes(graph, nodes2remove); - } - } - - std::unordered_set nodes2remove; - for (auto *node : graph->Nodes()) { - if (node->IsOp() && Agent(node).deleted()) { - nodes2remove.insert(node); - } - } - - framework::ir::GraphSafeRemoveNodes(graph, nodes2remove); - // std::vector nodes = ir::TopologySortOperations(*graph); -} - -bool IsValid(std::string name) { - return name.find(Node::kControlDepVarName) == std::string::npos; -} - -void UpdateNgraphIO(Node *node, Graph *graph, - std::vector *input_names, - std::vector *output_names) { - bool is_test = true, has_fetch = false; - for (Node *node : graph->Nodes()) { - if (node->IsOp() && node->Name().find("_grad") != std::string::npos) { - is_test = false; - } - if (node->IsVar() && node->Var()) { - for (auto out : node->outputs) { - if (out->Name() == "fetch") has_fetch = true; - } - } - } - if (is_test && has_fetch) { - for (auto *x : node->inputs) { - (*input_names).emplace_back(x->Name()); - } - for (auto *x : node->outputs) { - (*output_names).emplace_back(x->Name()); - } - return; - } - - auto &subgraph = *Agent(node).subgraph(); - std::unordered_set inputs; - std::unordered_set outputs; - for (auto *node : subgraph) { - for (auto in : node->inputs) { - auto name = in->Name(); - if (!IsValid(name)) continue; - if (!outputs.count(name) && !inputs.count(name)) { - (*input_names).emplace_back(name); - inputs.insert(name); - } - } - for (auto out : node->outputs) { - auto name = out->Name(); - if (!IsValid(name)) continue; - outputs.insert(name); - (*output_names).emplace_back(name); - } - } -} - -void NgraphSubgraphPass::CreateNgraphEngineOp(Node *node, Graph *graph) const { - auto &subgraph = *Agent(node).subgraph(); - PADDLE_ENFORCE_NE(subgraph.empty(), true, "subgraph cannot be empty"); - - framework::proto::BlockDesc block_proto; - framework::BlockDesc block_desc(nullptr, &block_proto); - block_desc.Proto()->set_parent_idx(-1); - block_desc.Proto()->set_idx(0); - for (auto *node : subgraph) { - auto *op = block_desc.AppendOp(); - *op->Proto() = *node->Op()->Proto(); - } - auto *vars = block_desc.Proto()->mutable_vars(); - for (Node *node : graph->Nodes()) { - if (node->IsVar() && node->Var()) { - *vars->Add() = *node->Var()->Proto(); - } - } - PADDLE_ENFORCE_NE(block_desc.Proto()->vars().empty(), true, - "the block has no var-desc"); - - std::vector input_names; - std::vector output_names; - UpdateNgraphIO(node, graph, &input_names, &output_names); - auto *op_desc = node->Op(); - op_desc->SetInput( - "Xs", std::vector(input_names.begin(), input_names.end())); - op_desc->SetOutput( - "Ys", std::vector(output_names.begin(), output_names.end())); - - int sgs = subgraph.size(); - std::string subgraph_str = block_desc.Proto()->SerializeAsString(); - std::string engine_key = - std::to_string(std::hash()(subgraph_str)); - std::vector interval{0, sgs}; - op_desc->SetType("ngraph_engine"); - op_desc->SetAttr("interval", interval); - op_desc->SetAttr("graph", subgraph_str); - op_desc->SetAttr("engine_key", engine_key); - op_desc->SetAttr("op_role", 0); -} - -} // namespace ir -} // namespace framework -} // namespace paddle - -REGISTER_PASS(ngraph_subgraph_pass, paddle::framework::ir::NgraphSubgraphPass); diff --git a/paddle/fluid/framework/ir/ngraph_subgraph_pass.h b/paddle/fluid/framework/ir/ngraph_subgraph_pass.h deleted file mode 100644 index 09f062671c..0000000000 --- a/paddle/fluid/framework/ir/ngraph_subgraph_pass.h +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#pragma once - -#include "paddle/fluid/framework/ir/fuse_pass_base.h" -#include "paddle/fluid/framework/ir/graph.h" -#include "paddle/fluid/framework/ir/graph_pattern_detector.h" -#include "paddle/fluid/framework/ir/pass.h" - -namespace paddle { -namespace framework { -namespace ir { - -/* - * Fuse supported ops to a NgraphEngineOp. - */ -class NgraphSubgraphPass : public FusePassBase { - public: - void ApplyImpl(ir::Graph *graph) const override; - - virtual ~NgraphSubgraphPass() {} - - private: - void CreateNgraphEngineOp(framework::ir::Node *x, - framework::ir::Graph *graph) const; -}; - -} // namespace ir -} // namespace framework -} // namespace paddle diff --git a/paddle/fluid/framework/ir/subgraph_detector.cc b/paddle/fluid/framework/ir/subgraph_detector.cc index f705fca4e8..62c91af15d 100644 --- a/paddle/fluid/framework/ir/subgraph_detector.cc +++ b/paddle/fluid/framework/ir/subgraph_detector.cc @@ -21,8 +21,6 @@ limitations under the License. */ #include "paddle/fluid/framework/ir/graph_pattern_detector.h" #include "paddle/fluid/framework/ir/node.h" -DECLARE_bool(use_ngraph); - namespace paddle { namespace framework { namespace ir { @@ -398,11 +396,6 @@ void RemoveIntermediateOutputInSubgraph(const std::vector &subgraph, } } - // In use for ngraph subgraph pass for parallel executor, - // this will remove all nodes, bypass this and let ngraph - // subgraph pass to process outputs - if (FLAGS_use_ngraph && valid_output.size() == 0) return; - outputs->assign(valid_output.begin(), valid_output.end()); } diff --git a/paddle/fluid/framework/parallel_executor.cc b/paddle/fluid/framework/parallel_executor.cc index 18049293c3..1c4a694a26 100644 --- a/paddle/fluid/framework/parallel_executor.cc +++ b/paddle/fluid/framework/parallel_executor.cc @@ -32,8 +32,6 @@ limitations under the License. */ #include "paddle/fluid/framework/ir/memory_optimize_pass/reference_count_pass_helper.h" #include "paddle/fluid/platform/profiler.h" -DECLARE_bool(use_ngraph); - DECLARE_double(eager_delete_tensor_gb); #ifdef WITH_GPERFTOOLS @@ -286,13 +284,6 @@ bool ParallelExecutorPrivate::AllowPartialFeed() const { } ir::Graph *ParallelExecutorPrivate::ApplyMemoryOptimizePass(ir::Graph *graph) { - if (FLAGS_use_ngraph) { - LOG_FIRST_N(WARNING, 1) - << "FLAGS_use_ngraph=True, memory optimization strategy is " - "disabled in ParallelExecutor"; - return graph; - } - /** * NOTE(zengjinle): If BuildStrategy.memory_optimize = None in Python, * set BuildStrategy.memory_optimize according to whether gc is enabled. diff --git a/paddle/fluid/framework/unused_var_check.cc b/paddle/fluid/framework/unused_var_check.cc index f9eeaae497..5eb8011385 100644 --- a/paddle/fluid/framework/unused_var_check.cc +++ b/paddle/fluid/framework/unused_var_check.cc @@ -33,7 +33,6 @@ DEFINE_bool(enable_unused_var_check, false, // not in cpu kernel; // 1: the inputs of which are used to indicate dtype of outputs; // 2: the inputs of which are used in fused operators. -// 3: specical operators, like ngraph_engine. // The category number is presented in the comments after each operator. const std::unordered_set op_has_unsed_vars_white_list = { @@ -54,8 +53,7 @@ const std::unordered_set op_has_unsed_vars_white_list = { "precision_recall", // 1 "fusion_seqpool_cvm_concat", // 2 "fused_batch_norm_act", // 2 - "fused_batch_norm_act_grad", // 2 - "ngraph_engine", // 3 + "fused_batch_norm_act_grad" // 2 }; namespace paddle { diff --git a/paddle/fluid/inference/analysis/ir_pass_manager.cc b/paddle/fluid/inference/analysis/ir_pass_manager.cc index 795f9863db..4a79a3cf30 100644 --- a/paddle/fluid/inference/analysis/ir_pass_manager.cc +++ b/paddle/fluid/inference/analysis/ir_pass_manager.cc @@ -136,10 +136,6 @@ void IRPassManager::CreatePasses(Argument *argument, pass->Set("disable_trt_plugin_fp16", new bool(argument->disable_trt_plugin_fp16())); } - if (pass_name == "ngraph_subgraph_pass") { - pass->Set("program", - new framework::ProgramDesc *(&argument->main_program())); - } if (pass_name == "lite_subgraph_pass") { bool enable_int8 = argument->lite_precision_mode() == AnalysisConfig::Precision::kInt8; diff --git a/paddle/fluid/inference/api/CMakeLists.txt b/paddle/fluid/inference/api/CMakeLists.txt index 219a6b2548..d375a4443e 100755 --- a/paddle/fluid/inference/api/CMakeLists.txt +++ b/paddle/fluid/inference/api/CMakeLists.txt @@ -28,11 +28,7 @@ if(WITH_MKLDNN) endif() cc_library(analysis_config SRCS analysis_config.cc DEPS ${mkldnn_quantizer_cfg} lod_tensor paddle_pass_builder) -if(WITH_NGRAPH) - cc_library(paddle_pass_builder SRCS paddle_pass_builder.cc DEPS ngraph) -else(WITH_NGRAPH) - cc_library(paddle_pass_builder SRCS paddle_pass_builder.cc) -endif(WITH_NGRAPH) +cc_library(paddle_pass_builder SRCS paddle_pass_builder.cc) cc_library(paddle_inference_api SRCS api.cc api_impl.cc helper.cc DEPS lod_tensor scope reset_tensor_array analysis_config zero_copy_tensor trainer_desc_proto) @@ -43,10 +39,6 @@ if(WITH_GPU AND TENSORRT_FOUND) set(inference_deps ${inference_deps} tensorrt_engine tensorrt_converter) endif() -if(WITH_NGRAPH) - set(inference_deps ${inference_deps} ngraph) -endif() - cc_library(analysis_predictor SRCS analysis_predictor.cc ${mkldnn_quantizer_src} DEPS ${inference_deps} zero_copy_tensor ir_pass_manager op_compatible_info) diff --git a/paddle/fluid/inference/api/analysis_config.cc b/paddle/fluid/inference/api/analysis_config.cc index 5ee2a54f43..fd7031b01d 100644 --- a/paddle/fluid/inference/api/analysis_config.cc +++ b/paddle/fluid/inference/api/analysis_config.cc @@ -116,8 +116,6 @@ AnalysisConfig::AnalysisConfig(const AnalysisConfig &other) { CP_MEMBER(tensorrt_precision_mode_); CP_MEMBER(trt_use_static_engine_); CP_MEMBER(trt_use_calib_mode_); - // NGRAPH related. - CP_MEMBER(use_ngraph_); // MKLDNN related. CP_MEMBER(use_mkldnn_); CP_MEMBER(mkldnn_enabled_op_types_); @@ -208,16 +206,6 @@ void AnalysisConfig::EnableMkldnnQuantizer() { Update(); } -void AnalysisConfig::EnableNgraph() { -#ifdef PADDLE_WITH_NGRAPH - pass_builder()->EnableNgraph(); - use_ngraph_ = true; -#else - LOG(ERROR) << "Please compile with NGRAPH first to use NGRAPH"; - use_ngraph_ = false; -#endif -} - MkldnnQuantizerConfig *AnalysisConfig::mkldnn_quantizer_config() const { PADDLE_ENFORCE_NOT_NULL(mkldnn_quantizer_config_, "MkldnnQuantizer was not enabled yet."); @@ -305,20 +293,6 @@ void AnalysisConfig::Update() { #endif } - if (use_ngraph_) { - if (!enable_ir_optim_) { - LOG(ERROR) - << "EnableNgraph() only works when IR optimization is enabled."; - } -#ifdef PADDLE_WITH_NGRAPH - pass_builder()->EnableNgraph(); - use_ngraph_ = true; -#else - LOG(ERROR) << "Please compile with NGRAPH first to use NGRAPH"; - use_ngraph_ = false; -#endif - } - if (use_mkldnn_) { #ifdef PADDLE_WITH_MKLDNN if (!enable_ir_optim_) { @@ -387,8 +361,6 @@ std::string AnalysisConfig::SerializeInfoCache() { ss << enable_memory_optim_; - ss << use_ngraph_; - ss << use_mkldnn_; ss << mkldnn_cache_capacity_; for (auto &item : mkldnn_enabled_op_types_) ss << item; diff --git a/paddle/fluid/inference/api/demo_ci/CMakeLists.txt b/paddle/fluid/inference/api/demo_ci/CMakeLists.txt index 255a1bf33e..67519e3f4c 100644 --- a/paddle/fluid/inference/api/demo_ci/CMakeLists.txt +++ b/paddle/fluid/inference/api/demo_ci/CMakeLists.txt @@ -80,16 +80,6 @@ if (NOT WIN32) endif() endif(NOT WIN32) -if (NOT WIN32) - set(NGRAPH_PATH "${PADDLE_LIB_THIRD_PARTY_PATH}ngraph") - if(EXISTS ${NGRAPH_PATH}) - include(GNUInstallDirs) - include_directories("${NGRAPH_PATH}/include") - link_directories("${NGRAPH_PATH}/${CMAKE_INSTALL_LIBDIR}") - set(NGRAPH_LIB ${NGRAPH_PATH}/${CMAKE_INSTALL_LIBDIR}/libngraph${CMAKE_SHARED_LIBRARY_SUFFIX}) - endif() -endif() - if(WITH_MKL) set(MATH_LIB_PATH "${PADDLE_LIB_THIRD_PARTY_PATH}mklml") include_directories("${MATH_LIB_PATH}/include") @@ -132,7 +122,7 @@ endif() if (NOT WIN32) set(EXTERNAL_LIB "-lrt -ldl -lpthread") set(DEPS ${DEPS} - ${MATH_LIB} ${MKLDNN_LIB} ${NGRAPH_LIB} + ${MATH_LIB} ${MKLDNN_LIB} glog gflags protobuf xxhash ${EXTERNAL_LIB}) else() diff --git a/paddle/fluid/inference/api/paddle_analysis_config.h b/paddle/fluid/inference/api/paddle_analysis_config.h index 4d6d1ab36e..2002d1f76a 100644 --- a/paddle/fluid/inference/api/paddle_analysis_config.h +++ b/paddle/fluid/inference/api/paddle_analysis_config.h @@ -339,18 +339,6 @@ struct AnalysisConfig { /// void SwitchIrDebug(int x = true); - /// - /// \brief Turn on NGRAPH. - /// - /// - void EnableNgraph(); - /// - /// \brief A boolean state telling whether to use the NGRAPH. - /// - /// \return bool Whether to use the NGRAPH. - /// - bool ngraph_enabled() const { return use_ngraph_; } - /// /// \brief Turn on MKLDNN. /// @@ -548,7 +536,6 @@ struct AnalysisConfig { // memory reuse related. bool enable_memory_optim_{false}; - bool use_ngraph_{false}; bool use_mkldnn_{false}; std::unordered_set mkldnn_enabled_op_types_; diff --git a/paddle/fluid/inference/api/paddle_pass_builder.cc b/paddle/fluid/inference/api/paddle_pass_builder.cc index f02f4688b8..a05abf5a74 100644 --- a/paddle/fluid/inference/api/paddle_pass_builder.cc +++ b/paddle/fluid/inference/api/paddle_pass_builder.cc @@ -143,10 +143,6 @@ void GpuPassStrategy::EnableMkldnnQuantizer() { LOG(ERROR) << "GPU not support MKL-DNN quantization"; } -void GpuPassStrategy::EnableNgraph() { - LOG(ERROR) << "GPU not support Ngraph yet"; -} - CpuPassStrategy::CpuPassStrategy() : PassStrategy({}) { // NOTE the large fusions should be located in the front, so that they will // not be damaged by smaller ones. @@ -224,14 +220,4 @@ void CpuPassStrategy::EnableMkldnnQuantizer() { #endif } -void CpuPassStrategy::EnableNgraph() { -#ifdef PADDLE_WITH_NGRAPH - if (!use_ngraph_) { - passes_.insert(passes_.begin(), "ngraph_subgraph_pass"); - } - use_ngraph_ = true; -#else - use_ngraph_ = false; -#endif -} } // namespace paddle diff --git a/paddle/fluid/inference/api/paddle_pass_builder.h b/paddle/fluid/inference/api/paddle_pass_builder.h index 1a83fabbdb..546c2ce1a0 100644 --- a/paddle/fluid/inference/api/paddle_pass_builder.h +++ b/paddle/fluid/inference/api/paddle_pass_builder.h @@ -94,10 +94,6 @@ class PassStrategy : public PaddlePassBuilder { */ virtual void EnableMKLDNN() {} - /** Enable NGRAPH optimization - */ - virtual void EnableNgraph() {} - /** Enable MKLDNN quantize optimization */ virtual void EnableMkldnnQuantizer() {} @@ -107,7 +103,6 @@ class PassStrategy : public PaddlePassBuilder { virtual ~PassStrategy() = default; protected: - bool use_ngraph_{false}; bool use_gpu_{false}; bool use_mkldnn_{false}; }; @@ -121,7 +116,6 @@ class CpuPassStrategy : public PassStrategy { explicit CpuPassStrategy(const CpuPassStrategy &other) : PassStrategy(other.AllPasses()) { use_gpu_ = other.use_gpu_; - use_ngraph_ = other.use_ngraph_; use_mkldnn_ = other.use_mkldnn_; use_mkldnn_quantizer_ = other.use_mkldnn_quantizer_; } @@ -129,12 +123,10 @@ class CpuPassStrategy : public PassStrategy { virtual ~CpuPassStrategy() = default; void EnableCUDNN() override; - void EnableNgraph() override; void EnableMKLDNN() override; void EnableMkldnnQuantizer() override; protected: - bool use_ngraph_{false}; bool use_mkldnn_quantizer_{false}; }; @@ -151,7 +143,6 @@ class GpuPassStrategy : public PassStrategy { } void EnableCUDNN() override; - void EnableNgraph() override; void EnableMKLDNN() override; void EnableMkldnnQuantizer() override; diff --git a/paddle/fluid/inference/capi/paddle_c_api.h b/paddle/fluid/inference/capi/paddle_c_api.h index bfac4baef0..e26f6b1f2b 100644 --- a/paddle/fluid/inference/capi/paddle_c_api.h +++ b/paddle/fluid/inference/capi/paddle_c_api.h @@ -215,11 +215,6 @@ typedef struct PD_MaxInputShape { PADDLE_CAPI_EXPORT extern void PD_SwitchIrDebug(PD_AnalysisConfig* config, bool x); -PADDLE_CAPI_EXPORT extern void PD_EnableNgraph(PD_AnalysisConfig* config); - -PADDLE_CAPI_EXPORT extern bool PD_NgraphEnabled( - const PD_AnalysisConfig* config); - PADDLE_CAPI_EXPORT extern void PD_EnableMKLDNN(PD_AnalysisConfig* config); PADDLE_CAPI_EXPORT extern void PD_SetMkldnnCacheCapacity( diff --git a/paddle/fluid/inference/capi/pd_config.cc b/paddle/fluid/inference/capi/pd_config.cc index 4586373773..f5445dd5a3 100644 --- a/paddle/fluid/inference/capi/pd_config.cc +++ b/paddle/fluid/inference/capi/pd_config.cc @@ -171,16 +171,6 @@ void PD_SwitchIrDebug(PD_AnalysisConfig* config, bool x) { config->config.SwitchIrDebug(x); } -void PD_EnableNgraph(PD_AnalysisConfig* config) { - PADDLE_ENFORCE_NOT_NULL(config); - config->config.EnableNgraph(); -} - -bool PD_NgraphEnabled(const PD_AnalysisConfig* config) { - PADDLE_ENFORCE_NOT_NULL(config); - return config->config.ngraph_enabled(); -} - void PD_EnableMKLDNN(PD_AnalysisConfig* config) { PADDLE_ENFORCE_NOT_NULL(config); config->config.EnableMKLDNN(); diff --git a/paddle/fluid/inference/tests/api/analyzer_bert_tester.cc b/paddle/fluid/inference/tests/api/analyzer_bert_tester.cc index 7232dbbe57..f956c34f23 100644 --- a/paddle/fluid/inference/tests/api/analyzer_bert_tester.cc +++ b/paddle/fluid/inference/tests/api/analyzer_bert_tester.cc @@ -150,7 +150,7 @@ void SetConfig(AnalysisConfig *config) { config->DisableFCPadding(); } -void profile(bool use_mkldnn = false, bool use_ngraph = false) { +void profile(bool use_mkldnn = false) { AnalysisConfig config; SetConfig(&config); @@ -158,10 +158,6 @@ void profile(bool use_mkldnn = false, bool use_ngraph = false) { config.EnableMKLDNN(); } - if (use_ngraph) { - config.EnableNgraph(); - } - std::vector> outputs; std::vector> inputs; LoadInputData(&inputs); @@ -171,11 +167,7 @@ void profile(bool use_mkldnn = false, bool use_ngraph = false) { TEST(Analyzer_bert, profile) { profile(); } #ifdef PADDLE_WITH_MKLDNN -TEST(Analyzer_bert, profile_mkldnn) { profile(true, false); } -#endif - -#ifdef PADDLE_WITH_NGRAPH -TEST(Analyzer_bert, profile_ngraph) { profile(false, true); } +TEST(Analyzer_bert, profile_mkldnn) { profile(true); } #endif // Check the fuse status @@ -190,17 +182,13 @@ TEST(Analyzer_bert, fuse_statis) { } // Compare result of NativeConfig and AnalysisConfig -void compare(bool use_mkldnn = false, bool use_ngraph = false) { +void compare(bool use_mkldnn = false) { AnalysisConfig cfg; SetConfig(&cfg); if (use_mkldnn) { cfg.EnableMKLDNN(); } - if (use_ngraph) { - cfg.EnableNgraph(); - } - std::vector> inputs; LoadInputData(&inputs); CompareNativeAndAnalysis( @@ -209,15 +197,7 @@ void compare(bool use_mkldnn = false, bool use_ngraph = false) { TEST(Analyzer_bert, compare) { compare(); } #ifdef PADDLE_WITH_MKLDNN -TEST(Analyzer_bert, compare_mkldnn) { - compare(true, false /* use_mkldnn, no use_ngraph */); -} -#endif - -#ifdef PADDLE_WITH_NGRAPH -TEST(Analyzer_bert, compare_ngraph) { - compare(false, true /* no use_mkldnn, use_ngraph */); -} +TEST(Analyzer_bert, compare_mkldnn) { compare(true /* use_mkldnn */); } #endif // Compare Deterministic result diff --git a/paddle/fluid/inference/tests/api/analyzer_capi_gpu_tester.cc b/paddle/fluid/inference/tests/api/analyzer_capi_gpu_tester.cc index 85bd5bafd9..c60e0a25f2 100644 --- a/paddle/fluid/inference/tests/api/analyzer_capi_gpu_tester.cc +++ b/paddle/fluid/inference/tests/api/analyzer_capi_gpu_tester.cc @@ -58,9 +58,6 @@ TEST(PD_AnalysisConfig, use_gpu) { false); bool trt_enable = PD_TensorrtEngineEnabled(config); CHECK(trt_enable) << "NO"; - PD_EnableNgraph(config); - bool ngraph_enable = PD_NgraphEnabled(config); - LOG(INFO) << ngraph_enable << " Ngraph"; PD_EnableMemoryOptim(config); bool memory_optim_enable = PD_MemoryOptimEnabled(config); CHECK(memory_optim_enable) << "NO"; diff --git a/paddle/fluid/inference/tests/api/config_printer.h b/paddle/fluid/inference/tests/api/config_printer.h index de938669c0..b952b62f13 100644 --- a/paddle/fluid/inference/tests/api/config_printer.h +++ b/paddle/fluid/inference/tests/api/config_printer.h @@ -78,8 +78,6 @@ std::ostream &operator<<(std::ostream &os, const AnalysisConfig &config) { << "use_tensorrt: " << config.tensorrt_engine_enabled() << "\n"; os << GenSpaces(num_spaces) << "use_mkldnn: " << config.mkldnn_enabled() << "\n"; - os << GenSpaces(num_spaces) << "use_ngraph: " << config.ngraph_enabled() - << "\n"; num_spaces--; os << GenSpaces(num_spaces) << "}\n"; return os; diff --git a/paddle/fluid/operators/CMakeLists.txt b/paddle/fluid/operators/CMakeLists.txt index f59e73d142..596eb99e81 100644 --- a/paddle/fluid/operators/CMakeLists.txt +++ b/paddle/fluid/operators/CMakeLists.txt @@ -12,7 +12,6 @@ add_subdirectory(detection) add_subdirectory(elementwise) add_subdirectory(fused) add_subdirectory(metrics) -add_subdirectory(ngraph) add_subdirectory(optimizers) add_subdirectory(reduce_ops) add_subdirectory(sequence_ops) diff --git a/paddle/fluid/operators/ngraph/CMakeLists.txt b/paddle/fluid/operators/ngraph/CMakeLists.txt deleted file mode 100644 index 7559d29ce2..0000000000 --- a/paddle/fluid/operators/ngraph/CMakeLists.txt +++ /dev/null @@ -1,6 +0,0 @@ -if(WITH_NGRAPH) - cc_library(ngraph_bridge SRCS ngraph_bridge.cc DEPS operator framework_proto ngraph) - cc_library(ngraph_engine SRCS ngraph_engine.cc DEPS ngraph_bridge framework_proto) - op_library(ngraph_engine_op DEPS ngraph_engine op_registry op_info device_context) - add_subdirectory(ops) -endif() diff --git a/paddle/fluid/operators/ngraph/ngraph_bridge.cc b/paddle/fluid/operators/ngraph/ngraph_bridge.cc deleted file mode 100644 index 9ea7db2a67..0000000000 --- a/paddle/fluid/operators/ngraph/ngraph_bridge.cc +++ /dev/null @@ -1,71 +0,0 @@ -/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#include -#include -#include -#include -#include - -#include "ngraph/ngraph.hpp" -#include "paddle/fluid/operators/ngraph/ngraph_bridge.h" -#include "paddle/fluid/operators/ngraph/ngraph_ops.h" -#include "paddle/fluid/operators/ngraph/ops/op_bridge.h" -#include "paddle/fluid/platform/enforce.h" -#include "paddle/fluid/platform/ngraph_helper.h" - -constexpr int64_t kNoPadding = -1; - -namespace paddle { -namespace operators { - -bool NgraphBridge::isRegister(const std::string& str) { - return ops::NgraphSingleton::Lookup(str); -} - -bool NgraphBridge::isSupported( - const std::unique_ptr& op) { - static std::unordered_set skip_op_list{ - "reshape", "reshape2", "lookup_table", "lookup_table_grad"}; - bool result = true; - auto& op_type = op->Type(); - auto op_attrs = paddle::framework::AttrReader(op->Attrs()); - if (!isRegister(op_type)) { - if (skip_op_list.count(op_type)) { - if (op_type == "lookup_table" || op_type == "lookup_table_grad") { - if (op_attrs.Get("is_sparse")) { - result = false; - } - } else if ((op_type == "reshape") || (op_type == "reshape2")) { - if (op->Input("Shape") != paddle::framework::kEmptyVarName) { - result = false; - } - } else { - result = false; - } - } - } else { - result = false; - } - return result; -} - -void NgraphBridge::BuildNgNode( - const std::shared_ptr& op) { - auto& op_type = op->Type(); - ops::NgraphSingleton::BuildNode(ngb_node_map_, op, op_type); -} - -} // namespace operators -} // namespace paddle diff --git a/paddle/fluid/operators/ngraph/ngraph_bridge.h b/paddle/fluid/operators/ngraph/ngraph_bridge.h deleted file mode 100644 index 0b43ec5387..0000000000 --- a/paddle/fluid/operators/ngraph/ngraph_bridge.h +++ /dev/null @@ -1,51 +0,0 @@ -/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#pragma once - -#include -#include -#include -#include -#include - -#include "ngraph/node.hpp" - -#include "paddle/fluid/framework/operator.h" - -namespace paddle { -namespace operators { - -class NgraphBridge { - public: - explicit NgraphBridge( - std::shared_ptr< - std::unordered_map>> - var_node_map) - : ngb_node_map_(var_node_map) {} - - void BuildNgNode(const std::shared_ptr& op); - - static bool isRegister(const std::string& str); - - static bool isSupported(const std::unique_ptr& op); - - private: - std::shared_ptr< - std::unordered_map>> - ngb_node_map_; -}; - -} // namespace operators -} // namespace paddle diff --git a/paddle/fluid/operators/ngraph/ngraph_engine.cc b/paddle/fluid/operators/ngraph/ngraph_engine.cc deleted file mode 100644 index 41f845e26e..0000000000 --- a/paddle/fluid/operators/ngraph/ngraph_engine.cc +++ /dev/null @@ -1,601 +0,0 @@ -/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#include - -#include -#include -#include -#include -#include -#include -#include - -#include "paddle/fluid/framework/block_desc.h" -#include "paddle/fluid/framework/ddim.h" -#include "paddle/fluid/framework/feed_fetch_type.h" -#include "paddle/fluid/framework/framework.pb.h" -#include "paddle/fluid/framework/lod_tensor.h" -#include "paddle/fluid/framework/op_desc.h" -#include "paddle/fluid/framework/op_registry.h" -#include "paddle/fluid/framework/var_type.h" -#include "paddle/fluid/operators/ngraph/ngraph_bridge.h" -#include "paddle/fluid/operators/ngraph/ngraph_engine.h" - -namespace paddle { -namespace operators { - -static ngraph::Shape Ddim2Shape(const framework::DDim& dims) { - ngraph::Shape sp; - for (int i = 0; i < dims.size(); ++i) { - sp.emplace_back(dims[i]); - } - return sp; -} - -static framework::DDim Shape2Ddim(const ngraph::Shape& shape) { - std::vector dims; - for (size_t i = 0; i < shape.size(); ++i) { - int64_t k = shape[i]; - dims.emplace_back(k); - } - return framework::make_ddim(dims); -} - -static std::map - pd2ng_type_map = { - {framework::proto::VarType::FP32, ngraph::element::f32}, - {framework::proto::VarType::FP64, ngraph::element::f64}, - {framework::proto::VarType::INT32, ngraph::element::i32}, - {framework::proto::VarType::INT64, ngraph::element::i64}, - {framework::proto::VarType::UINT8, ngraph::element::u8}, - {framework::proto::VarType::BOOL, ngraph::element::boolean}}; - -static std::map - ng2pd_type_map = { - {ngraph::element::f32, framework::proto::VarType::FP32}, - {ngraph::element::f64, framework::proto::VarType::FP64}, - {ngraph::element::i32, framework::proto::VarType::INT32}, - {ngraph::element::i64, framework::proto::VarType::INT64}, - {ngraph::element::u8, framework::proto::VarType::UINT8}, - {ngraph::element::boolean, framework::proto::VarType::BOOL}}; - -std::vector NgraphEngine::feed_vars = {}; - -std::weak_ptr NgraphEngine::wp_backend_; - -std::mutex NgraphEngine::ng_mutex_; - -static std::vector> NgraphOpIntervals( - std::vector>* ops) { - NgraphEngine::feed_vars.clear(); - std::vector> intervals; - - int size = ops->size(); - int left = 0, feed_idx = -1; - while (left < size && ops->at(left)->Type() != framework::kFeedOpType && - ops->at(left)->Type() != "read" && - ops->at(left)->Type() != framework::kFetchOpType) { - ++left; - } - - if (left < size) { - auto op_type = ops->at(left)->Type(); - if (op_type == framework::kFeedOpType || op_type == "read") { - feed_idx = left; - } - } - - while (left < size && (ops->at(left)->Type() == framework::kFeedOpType || - ops->at(left)->Type() == "read")) { - for (auto& var_name_item : ops->at(left)->Outputs()) { - for (auto& var_name : var_name_item.second) { - NgraphEngine::feed_vars.emplace_back(var_name); - } - } - ++left; - } - - int right = left; - while (right < size && ops->at(right)->Type() != framework::kFetchOpType) { - ++right; - } - - int index = right; - while (index < size && ops->at(index)->Type() == framework::kFetchOpType) { - ++index; - } - - if (left == size || ops->at(left)->Type() == framework::kFetchOpType) { - left = 0; - } - - // (left, right - 1) represents indices between feed and fetch - int pivot = left; - while (pivot < right) { - auto op_type = ops->at(pivot)->Type(); - if (!NgraphBridge::isSupported(ops->at(pivot))) { - ++pivot; - } else { - int start = pivot, end = start; - while (pivot < right && (NgraphBridge::isSupported(ops->at(pivot)))) { - ++pivot; - ++end; - } - std::vector interval = {start, end}; - if (feed_idx != -1 && start > feed_idx) { - intervals.emplace_back(interval); - } - } - } // end while - return intervals; -} - -static void SubstituteNgraphOp( - std::vector>* ops, - std::string engine_key, std::string block_str, std::vector interval) { - framework::OpDesc ng_op_desc(nullptr); - ng_op_desc.SetType("ngraph_engine"); - ng_op_desc.SetAttr("interval", interval); - ng_op_desc.SetAttr("engine_key", engine_key); - ng_op_desc.SetAttr("graph", block_str); - ng_op_desc.SetInput("Xs", std::vector(0)); - ng_op_desc.SetOutput("Ys", std::vector(0)); - - ops->erase(ops->begin() + interval[0], ops->begin() + interval[1]); - ops->insert(ops->begin() + interval[0], - framework::OpRegistry::CreateOp(ng_op_desc)); -} - -std::string SerializedBlock(const framework::BlockDesc& bdesc) { - framework::proto::BlockDesc block_proto; - framework::BlockDesc block_desc(nullptr, &block_proto); - block_desc.Proto()->set_parent_idx(-1); - block_desc.Proto()->set_idx(0); - - for (auto& op_desc : bdesc.AllOps()) { - auto* op = block_desc.AppendOp(); - *op->Proto() = *op_desc->Proto(); - } - - auto* vars = block_desc.Proto()->mutable_vars(); - for (auto& var_desc : bdesc.AllVars()) { - *vars->Add() = *var_desc->Proto(); - } - - return block_desc.Proto()->SerializeAsString(); -} - -void NgraphEngine::FuseNgraphOps( - const framework::BlockDesc& block_desc, - std::vector>* ops) { - auto intervals = NgraphOpIntervals(ops); - std::string serialized_block = SerializedBlock(block_desc); - std::string engine_key = - std::to_string(std::hash()(serialized_block)); - for (auto it = intervals.rbegin(); it != intervals.rend(); ++it) { - SubstituteNgraphOp(ops, engine_key, serialized_block, *it); - } -} - -NgraphEngine::NgraphEngine(const framework::Scope& scope, - const platform::Place& place, - const framework::ExecutionContext& ctx) - : scope_(scope), place_(place) { - var_in_node_map_ = std::make_shared< - std::unordered_map>>(); - - var_node_map_ = std::make_shared< - std::unordered_map>>(); - - std::lock_guard lock(ng_mutex_); - - if (!wp_backend_.lock()) { - try { - VLOG(3) << "ngraph creating CPU backend."; - backend_ = ngraph::runtime::Backend::create("CPU"); - } catch (...) { - PADDLE_THROW("Unsupported nGraph backend"); - } - wp_backend_ = backend_; - } else { - backend_ = wp_backend_.lock(); - } - - GetNgFunction(ctx); -} - -void NgraphEngine::Prepare(const framework::ExecutionContext& ctx) { - auto interval = ctx.Attr>("interval"); - std::string serialized_graph = ctx.Attr("graph"); - - framework::proto::BlockDesc block_proto; - if (!serialized_graph.empty()) block_proto.ParseFromString(serialized_graph); - framework::BlockDesc block_desc(nullptr, &block_proto); - - for (auto& var : block_desc.AllVars()) { - if (!(var->GetType() == framework::proto::VarType::SELECTED_ROWS || - var->GetType() == framework::proto::VarType::LOD_TENSOR || - var->GetType() == framework::proto::VarType::LOD_TENSOR_ARRAY)) { - continue; - } - - auto var_name = var->Name(); - if (var->Name() == framework::kEmptyVarName) { - continue; - } - - if (var_name != framework::kFeedOpType && - var_name != framework::kFetchOpType) { - auto pd_type = var->GetDataType(); - if (pd2ng_type_map.find(pd_type) == pd2ng_type_map.end()) { - PADDLE_THROW("Data type of var %s not found in pd2ng_type_map", - var_name); - } - var_type_map_[var_name] = pd2ng_type_map[pd_type]; - } - - if (var->Persistable()) { - persistables_.insert(var->Name()); - } - } - - std::vector ops_desc; - for (auto op_desc : block_desc.AllOps()) { - ops_desc.emplace_back(op_desc); - if (op_desc->Type().find("_grad") != std::string::npos) { - this->is_test_ = false; - } - } - - int idx = interval[0]; - while (idx < interval[1]) { - this->fused_ops_.emplace_back( - framework::OpRegistry::CreateOp(*(ops_desc[idx]))); - ++idx; - } - while (idx < static_cast(ops_desc.size())) { - auto op_desc = ops_desc.at(idx); - for (auto& var_name_item : op_desc->Inputs()) { - for (auto& var_name : var_name_item.second) { - this->post_op_inputs_.insert(var_name); - } - } - ++idx; - } - - auto input_vars = ctx.InputNames("Xs"); - if (!input_vars.empty()) { - feed_vars = input_vars; - var_in_ = input_vars; - } - - auto output_vars = ctx.OutputNames("Ys"); - if (!output_vars.empty()) { - var_out_ = output_vars; - } - - if (var_in_.empty() && var_out_.empty()) { - BuildNgIO(ops_desc, interval); - } - - for (size_t i = 0; i < var_in_.size(); ++i) { - auto var_name = var_in_[i]; - if (persistables_.find(var_name) == persistables_.end()) { - var_in_updates_.emplace_back(i); - } - } -} - -void NgraphEngine::BuildNgIO(const std::vector& ops_desc, - const std::vector& interval) { - std::unordered_set inputs; - std::unordered_set outputs; - - for (int i = interval[0]; i < interval[1]; ++i) { - auto op = ops_desc[i]; - for (auto& var_name_item : op->Inputs()) { - for (auto& var_name : var_name_item.second) { - inputs.insert(var_name); - const bool is_output = outputs.find(var_name) != outputs.end(); - if (!is_output && - std::find(var_in_.begin(), var_in_.end(), var_name) == - var_in_.end() && - scope_.FindVar(var_name)) { - // fill var_in here to keep lhs and rhs order - this->var_in_.emplace_back(var_name); - } - } - } - - for (auto& var_name_item : op->Outputs()) { - PADDLE_ENFORCE_LE(var_name_item.second.size(), 1, - "op %s has more than 1 output - Not handling yet", - op->Type()); - for (auto& var_name : var_name_item.second) { - outputs.insert(var_name); - } - } - } - - // var_out.clear(); - for (int i = interval[0]; i < interval[1]; ++i) { - auto op = ops_desc[i]; - for (auto& var_name_item : op->Outputs()) { - PADDLE_ENFORCE_LE(var_name_item.second.size(), 1, - "op %s has more than 1 output - Not handling yet", - op->Type()); - for (auto& var_name : var_name_item.second) { - if (this->is_test_) { - if (post_op_inputs_.find(var_name) != post_op_inputs_.end()) { - this->var_out_.emplace_back(var_name); - } - } else { - if (post_op_inputs_.find(var_name) != post_op_inputs_.end() || - persistables_.find(var_name) != persistables_.end()) { - this->var_out_.emplace_back(var_name); - } - } - } - } - } - // remove output duplicates - std::unordered_set var_out_set; - for (int i = static_cast(var_out_.size()) - 1; i >= 0; --i) { - std::string var_name = var_out_.at(i); - if (var_out_set.count(var_name)) { - var_out_.erase(var_out_.begin() + i); - } - var_out_set.insert(var_name); - } -} - -void NgraphEngine::GetNgInputShape() { - for (auto& var_name : var_in_) { - auto* var = scope_.FindVar(var_name); - if (var && var->IsType()) { - auto* tensor_pd = GetLoDTensorOrSelectedRowsValueFromVar(*var); - auto sp = Ddim2Shape(tensor_pd->dims()); - auto ng_type = var_type_map_[var_name]; - auto prm = std::make_shared(ng_type, sp, true); - (*var_node_map_)[var_name] = prm; - (*var_in_node_map_)[var_name] = prm; - } - } -} - -void NgraphEngine::BuildNgNodes() { - for (auto& op : fused_ops_) { - for (auto& var_name_item : op->Outputs()) { - for (auto& var_name : var_name_item.second) { - if (var_node_map_->find(var_name) == var_node_map_->end()) { - auto* var = scope_.FindVar(var_name); - if (var && var->IsType()) { - auto* tensor_pd = GetLoDTensorOrSelectedRowsValueFromVar(*var); - auto& ddim = tensor_pd->dims(); - auto ng_shape = Ddim2Shape(ddim); - auto ng_type = var_type_map_[var_name]; - auto prm = std::make_shared(ng_type, - ng_shape, true); - (*var_node_map_)[var_name] = prm; - } - } - } - } - } - NgraphBridge ngb(var_node_map_); - for (auto& op : fused_ops_) { - ngb.BuildNgNode(op); - } -} - -std::shared_ptr NgraphEngine::BuildNgFunction( - const framework::ExecutionContext& ctx) { - Prepare(ctx); - GetNgInputShape(); - BuildNgNodes(); - ngraph::NodeVector func_outputs; - ngraph::ParameterVector func_inputs; - - for (auto& vo : var_out_) { - PADDLE_ENFORCE_GT(var_node_map_->count(vo), 0, - "Cannot find vo %s in var_node_map_", vo); - func_outputs.emplace_back(var_node_map_->at(vo)); - } - - for (auto& vi : var_in_) { - PADDLE_ENFORCE_GT(var_node_map_->count(vi), 0, - "Cannot find vi %s in var_node_map_", vi); - std::shared_ptr prm = - std::dynamic_pointer_cast( - var_in_node_map_->at(vi)); - func_inputs.emplace_back(prm); - } - - return std::make_shared(func_outputs, func_inputs); -} - -void NgraphEngine::ClearNgCache() { - auto& engine_cache = main_engine_cache::fetch(); - auto& t_in_cache_ = main_t_in_cache::fetch(); - - auto it = engine_cache.begin(); - while (it != engine_cache.end()) { - auto ng_engine = it->second; - ng_engine.ngraph_backend->remove_compiled_function(ng_engine.ngraph_handle); - ng_engine.ngraph_backend.reset(); - ++it; - } - engine_cache.clear(); - auto it_tensor = t_in_cache_.begin(); - while (it_tensor != t_in_cache_.end()) { - auto t_vec = it_tensor->second; - for (auto t_in : t_vec) { - t_in.reset(); - } - ++it_tensor; - } - t_in_cache_.clear(); -} - -void NgraphEngine::GetNgFunction(const framework::ExecutionContext& ctx) { - auto interval = ctx.Attr>("interval"); - std::string engine_key = ctx.Attr("engine_key"); - - // set to flase, to debug cache or recompile everytime. - bool use_cache = true; - if (!use_cache) ClearNgCache(); - - this->func_cache_key_ = ""; - for (int i = 0; i < static_cast(feed_vars.size()); ++i) { - auto* var = scope_.FindVar(feed_vars[i]); - if (var && var->IsType()) { - auto* tensor_pd = GetLoDTensorOrSelectedRowsValueFromVar(*var); - auto dims = tensor_pd->dims(); - for (int j = 0; j < dims.size(); ++j) { - func_cache_key_ += std::to_string(dims[j]); - } - } - } - func_cache_key_ += std::to_string(interval[0]) + "_" + - std::to_string(interval[1]) + engine_key; - func_cache_key_ = std::to_string(std::hash()(func_cache_key_)); - - auto& engine_cache = main_engine_cache::fetch(); - - if (engine_cache.find(func_cache_key_) != engine_cache.end()) { - if (engine_cache[func_cache_key_].persistables.size() == 0) { - ClearNgCache(); - } - } - - if (engine_cache.find(func_cache_key_) == engine_cache.end()) { - if (engine_cache.size() > 5) ClearNgCache(); - auto func = BuildNgFunction(ctx); - // Due to optimization backend may produce results in other layouts, - // make sure we get default layout for results. - for (auto& r : func->get_results()) { - r->set_needs_default_layout(true); - } - engine_cache[func_cache_key_].ngraph_backend = backend_; - engine_cache[func_cache_key_].ngraph_handle = backend_->compile(func); - engine_cache[func_cache_key_].persistables = this->persistables_; - engine_cache[func_cache_key_].var_in_updates = this->var_in_updates_; - engine_cache[func_cache_key_].var_in = this->var_in_; - engine_cache[func_cache_key_].var_out = this->var_out_; - engine_cache[func_cache_key_].is_test = this->is_test_; - } -} - -void NgraphEngine::Run(const framework::Scope& scope, - const platform::Place& place) const { - VLOG(3) << "NgraphEngine Run ..."; - std::shared_ptr ng_handle; - std::shared_ptr ng_backend; - const std::set* p_persistables; - const std::vector* p_var_in_updates; - const std::vector* p_var_in; - const std::vector* p_var_out; - - auto& engine_cache = main_engine_cache::fetch(); - auto& t_in_cache_ = main_t_in_cache::fetch(); - - PADDLE_ENFORCE_GT(engine_cache.count(func_cache_key_), 0, - "Cannot find cached data to run ngraph function"); - ng_handle = engine_cache[func_cache_key_].ngraph_handle; - ng_backend = engine_cache[func_cache_key_].ngraph_backend; - p_persistables = &(engine_cache[func_cache_key_].persistables); - p_var_in_updates = &(engine_cache[func_cache_key_].var_in_updates); - p_var_in = &(engine_cache[func_cache_key_].var_in); - p_var_out = &(engine_cache[func_cache_key_].var_out); - - std::vector>* p_t_in; - std::vector> t_in = {}; - - auto m_parameters = ng_handle->get_parameters(); - auto m_results = ng_handle->get_results(); - if (is_inference_ && t_in_cache_.find(func_cache_key_) != t_in_cache_.end()) { - p_t_in = &(t_in_cache_[func_cache_key_]); - for (size_t i = 0; i < p_var_in_updates->size(); ++i) { - int index = p_var_in_updates->at(i); - auto vi = p_var_in->at(index); - auto sp = m_parameters[index]->get_shape(); - auto ng_type = m_parameters[index]->get_element_type(); - std::shared_ptr ti; - auto* var = scope.FindVar(vi); - if (var && var->IsType()) { - auto* tensor_pd = GetMutableLoDTensorOrSelectedRowsValueFromVar(var); - void* pd_arr = tensor_pd->mutable_data(place, ng2pd_type_map[ng_type]); - ti = ng_backend->create_tensor(ng_type, sp, pd_arr); - (*p_t_in)[index] = ti; - } else { - PADDLE_THROW("Cannot find var or tensor with var name %s", vi); - } - } - } else { - if (is_inference_) { - p_t_in = &(t_in_cache_[func_cache_key_]); - } else { - p_t_in = &t_in; - } - - for (size_t i = 0; i < p_var_in->size(); ++i) { - auto vi = p_var_in->at(i); - auto sp = m_parameters[i]->get_shape(); - auto ng_type = m_parameters[i]->get_element_type(); - std::shared_ptr ti; - auto* var = scope.FindVar(vi); - if (var && var->IsType()) { - auto* tensor_pd = GetMutableLoDTensorOrSelectedRowsValueFromVar(var); - void* pd_arr = tensor_pd->mutable_data(place, ng2pd_type_map[ng_type]); - ti = ng_backend->create_tensor(ng_type, sp, pd_arr); - } else { - PADDLE_THROW("Cannot find var or tensor with var name %s", vi); - } - bool is_persistable = - (p_persistables->find(vi) != p_persistables->end()) ? true : false; - if (is_inference_ && is_persistable) { - ti->set_stale(false); - } - (*p_t_in).emplace_back(ti); - } - } - - for (auto& op : fused_ops_) { - framework::RuntimeContext ctx(op->Inputs(), op->Outputs(), scope_); - op->RuntimeInferShape(scope_, place_, ctx); - } - - std::vector> t_out = {}; - for (size_t i = 0; i < p_var_out->size(); ++i) { - auto vo = p_var_out->at(i); - auto* var = scope.FindVar(vo); - if (var && var->IsType()) { - auto sp = m_results[i]->get_shape(); - var->GetMutable()->Resize(Shape2Ddim(sp)); - auto* tensor_pd = GetMutableLoDTensorOrSelectedRowsValueFromVar(var); - auto ng_type = m_results[i]->get_element_type(); - void* pd_arr = tensor_pd->mutable_data(place, ng2pd_type_map[ng_type]); - std::shared_ptr to = - ng_backend->create_tensor(ng_type, sp, pd_arr); - t_out.emplace_back(to); - } else { - PADDLE_THROW("Cannot find var or tensor with var name %s", vo); - } - } - - ng_handle->call(t_out, *p_t_in); -} // NgraphEngine::Run -} // namespace operators -} // namespace paddle diff --git a/paddle/fluid/operators/ngraph/ngraph_engine.h b/paddle/fluid/operators/ngraph/ngraph_engine.h deleted file mode 100644 index 0fb2d16749..0000000000 --- a/paddle/fluid/operators/ngraph/ngraph_engine.h +++ /dev/null @@ -1,203 +0,0 @@ -/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#pragma once - -#include -#include -#include //NOLINT -#include -#include -#include -#include -#include -#include - -#include "paddle/fluid/framework/operator.h" -#include "paddle/fluid/framework/program_desc.h" -#include "paddle/fluid/framework/var_desc.h" - -#include "ngraph/ngraph.hpp" - -namespace paddle { -namespace operators { - -// cache engine repetitives -struct EngineCache { - std::shared_ptr ngraph_handle = nullptr; - std::shared_ptr ngraph_backend = nullptr; - std::set persistables; - std::vector var_in; - std::vector var_out; - std::vector var_in_updates; - bool is_test = true; -}; - -template -class NgraphThreadCache { - public: - typedef decltype(Engine::getMutex()) mutex_type; - typedef std::lock_guard guard_type; - typedef T& ref_type; - enum class type_of_thread { unknown, forward, backward }; - - template - struct MetaInfo { - std::thread::id owner_tid; // owner of the cache, future use; - type_of_thread worker_type; // future use - S real_content; - MetaInfo() - : owner_tid{std::this_thread::get_id()}, - worker_type{type_of_thread::unknown} {} - }; - - typedef std::unique_ptr> content_type; - typedef std::list storage_type; - - protected: - static storage_type l; - static mutex_type getMutex() { return Engine::getMutex(); } - static void remove_from_list(const T* raw_ptr) { - guard_type guard(getMutex()); - l.remove_if([raw_ptr](const content_type& sh) { - return &(sh->real_content) == raw_ptr; - }); - } - - template - struct TLSDescriptor { - TRaw* raw_ptr; - TLSDescriptor() : raw_ptr{nullptr} {} - ~TLSDescriptor() { - // if thread die - NgraphThreadCache::remove_from_list(raw_ptr); - - /* TODO : Parallel executor swap */ - // FastMultiThreadCache::keep_alive_for_backward_thread(raw_ptr); - } - }; - - public: - NgraphThreadCache() = delete; - NgraphThreadCache(const NgraphThreadCache& copy) = delete; - - static T& fetch() { - thread_local TLSDescriptor tls; - if (!tls.raw_ptr) { - using elem_type = typename content_type::element_type; - content_type _p(new elem_type()); - if (!_p) PADDLE_THROW("Cannot alloc memory for thread-cache "); - guard_type guard(getMutex()); - l.push_back(std::move(_p)); - tls.raw_ptr = &l.back()->real_content; - } - return *(tls.raw_ptr); - } - auto getSize() -> decltype(l.size()) { - guard_type guard(getMutex()); - return l.size(); - } - - template - void for_each_cache(F f) { - guard_type guard(getMutex()); - std::for_each(l.begin(), l.end(), f); - } -}; - -template -typename NgraphThreadCache::storage_type - NgraphThreadCache::l; - -// perform graph build through bridge and execute computation -class NgraphEngine { - public: - explicit NgraphEngine(const framework::Scope& scope, - const platform::Place& place, - const framework::ExecutionContext& ctx); - - void Run(const framework::Scope& scope, const platform::Place& place) const; - - static std::vector feed_vars; - - static void FuseNgraphOps( - const framework::BlockDesc& prog, - std::vector>* ops); - - static std::recursive_mutex& getMutex() { - static std::recursive_mutex mx; - return mx; - } - - private: - template - using ThCache = - NgraphThreadCache, NgraphEngine>; - - using main_engine_cache = ThCache; - using main_t_in_cache = - ThCache>>; - - const framework::Scope& scope_; - const platform::Place& place_; - std::vector> fused_ops_; - std::unordered_map var_type_map_; - std::set persistables_; - std::unordered_set post_op_inputs_; - // it is test for a single run, it can be a validation during training - bool is_test_{true}; - // inference only. eg. CAPI inference - bool is_inference_{false}; - std::string func_cache_key_; - // use a weak pointer to keep backend_ alive - // to avoid it to be destropyed too earlier - static std::weak_ptr wp_backend_; - // use mutex to keep it thread safe - static std::mutex ng_mutex_; - // ngraph backend eg. CPU - std::shared_ptr backend_; - // var_name of inputs - std::vector var_in_; - // var_name of outputs from fetch in order - std::vector var_out_; - // non-persitable var_in - std::vector var_in_updates_; - // map input vars to nodes - std::shared_ptr< - std::unordered_map>> - var_in_node_map_; - // map each var name with a ngraph node - std::shared_ptr< - std::unordered_map>> - var_node_map_; - // prepare info for ngraph engine need - void Prepare(const framework::ExecutionContext& ctx); - // get ngraph engine input and output list - void BuildNgIO(const std::vector& op_descs, - const std::vector& interval); - // get ngraph input and define ngraph input parameters - void GetNgInputShape(); - // Call ngraph bridge to map ops - void BuildNgNodes(); - // build ngraph function call - std::shared_ptr BuildNgFunction( - const framework::ExecutionContext& ctx); - // clear ngraph engine cache and t_in cache - void ClearNgCache(); - // Check cache for ngraph function or otherwise build the function - void GetNgFunction(const framework::ExecutionContext& ctx); -}; - -} // namespace operators -} // namespace paddle diff --git a/paddle/fluid/operators/ngraph/ngraph_engine_op.cc b/paddle/fluid/operators/ngraph/ngraph_engine_op.cc deleted file mode 100644 index 621f1a3d8c..0000000000 --- a/paddle/fluid/operators/ngraph/ngraph_engine_op.cc +++ /dev/null @@ -1,51 +0,0 @@ -/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ - -#include - -#include "paddle/fluid/framework/block_desc.h" -#include "paddle/fluid/framework/op_desc.h" -#include "paddle/fluid/framework/op_info.h" -#include "paddle/fluid/framework/op_registry.h" -#include "paddle/fluid/operators/ngraph/ngraph_engine_op.h" - -namespace paddle { -namespace operators { - -class NgraphEngineOpMaker : public framework::OpProtoAndCheckerMaker { - public: - void Make() override { - AddInput("Xs", "A list of inputs.").AsDispensable(); - AddOutput("Ys", "A list of outputs").AsDispensable(); - AddAttr("graph", "the graph."); - AddAttr("engine_key", "the engine hash key."); - AddAttr>("interval", "op interval supported by ngraph"); - AddComment("ngraph engine operator."); - } -}; - -class NgraphEngineInferVarType : public framework::VarTypeInference { - public: - void operator()(framework::InferVarTypeContext *ctx) const override {} -}; - -} // namespace operators -} // namespace paddle - -namespace ops = paddle::operators; - -REGISTER_OPERATOR(ngraph_engine, ops::NgraphEngineOp, ops::NgraphEngineOpMaker); -REGISTER_OP_CPU_KERNEL( - ngraph_engine, - ops::NgraphEngineKernel); diff --git a/paddle/fluid/operators/ngraph/ngraph_engine_op.h b/paddle/fluid/operators/ngraph/ngraph_engine_op.h deleted file mode 100644 index c9b2a3970e..0000000000 --- a/paddle/fluid/operators/ngraph/ngraph_engine_op.h +++ /dev/null @@ -1,56 +0,0 @@ -/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ - -#pragma once - -#include -#include - -#include "paddle/fluid/framework/scope.h" -#include "paddle/fluid/operators/ngraph/ngraph_engine.h" -#include "paddle/fluid/platform/device_context.h" -#include "paddle/fluid/platform/place.h" - -namespace paddle { -namespace operators { - -class NgraphEngineOp : public framework::OperatorWithKernel { - public: - using framework::OperatorWithKernel::OperatorWithKernel; - - protected: - void InferShape(framework::InferShapeContext* ctx) const override {} - - framework::OpKernelType GetExpectedKernelType( - const framework::ExecutionContext& ctx) const override { - framework::OpKernelType kt = framework::OpKernelType( - framework::proto::VarType::FP32, platform::CPUPlace()); - return kt; - } -}; - -template -class NgraphEngineKernel : public framework::OpKernel { - public: - void Compute(const framework::ExecutionContext& ctx) const override { - auto& scope = ctx.scope(); - auto place = ctx.GetPlace(); - - NgraphEngine ngraph_engine(scope, place, ctx); - ngraph_engine.Run(scope, place); - } -}; - -} // namespace operators -} // namespace paddle diff --git a/paddle/fluid/operators/ngraph/ops/CMakeLists.txt b/paddle/fluid/operators/ngraph/ops/CMakeLists.txt deleted file mode 100644 index 7dee3308b7..0000000000 --- a/paddle/fluid/operators/ngraph/ops/CMakeLists.txt +++ /dev/null @@ -1,8 +0,0 @@ -file(GLOB LIST_OPS RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}" "*.h") -set(pass_file ${PADDLE_BINARY_DIR}/paddle/fluid/operators/ngraph/ngraph_ops.h) -file(APPEND ${pass_file} "\#pragma once\n") -file(WRITE ${pass_file} "// Generated by the /paddle/fluid/operators/ngraph/ops/CMakeLists.txt. DO NOT EDIT!\n\n") - -foreach(OPS_NAME ${LIST_OPS}) - file(APPEND ${pass_file} "\#include \"paddle/fluid/operators/ngraph/ops/${OPS_NAME}\"\n") -endforeach(OPS_NAME) diff --git a/paddle/fluid/operators/ngraph/ops/accuracy_op.h b/paddle/fluid/operators/ngraph/ops/accuracy_op.h deleted file mode 100644 index 0da57517a7..0000000000 --- a/paddle/fluid/operators/ngraph/ops/accuracy_op.h +++ /dev/null @@ -1,70 +0,0 @@ -/*Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#pragma once - -#include -#include -#include -#include -#include "ngraph/ngraph.hpp" -#include "paddle/fluid/operators/ngraph/ops/op_bridge.h" -#include "paddle/fluid/platform/ngraph_helper.h" - -namespace paddle { -namespace operators { -namespace ngraphs { - -void BuildAccuracyNode( - const std::shared_ptr& op, - std::shared_ptr< - std::unordered_map>> - ngb_node_map) { - auto indices = platform::GetInputNode(op, "Indices", ngb_node_map); - auto label = platform::GetInputNode(op, "Label", ngb_node_map); - auto inference = platform::GetInputNode(op, "Out", ngb_node_map); - auto inference_shape = inference->get_shape(); - size_t num_samples = inference_shape.at(0); - size_t k = inference_shape.at(1); - - std::shared_ptr label_k = label; - if (k > 1) { - auto label_1d = std::make_shared( - label, ngraph::AxisVector{0, 1}, ngraph::Shape{num_samples}); - label_k = std::make_shared(label_1d, inference_shape, - ngraph::AxisSet{1}); - } - - auto node_equal = std::make_shared(indices, label_k); - auto node_eq_int = - std::make_shared(node_equal, ngraph::element::i64); - auto num_correct_0d = - std::make_shared(node_eq_int, ngraph::AxisSet{0, 1}); - std::shared_ptr num_correct = - platform::NgReshaper(num_correct_0d, ngraph::Shape{1}); - std::shared_ptr n_samples = ngraph::op::Constant::create( - ngraph::element::i64, ngraph::Shape{1}, {num_samples}); - std::shared_ptr accuracy = std::make_shared( - std::make_shared(num_correct, ngraph::element::f32), - std::make_shared(n_samples, ngraph::element::f32)); - - platform::SetOutputNode(op, "Accuracy", accuracy, ngb_node_map); - platform::SetOutputNode(op, "Correct", num_correct, ngb_node_map); - platform::SetOutputNode(op, "Total", n_samples, ngb_node_map); -} -} // namespace ngraphs -} // namespace operators -} // namespace paddle - -REGISTER_NG_OP(accuracy, BuildAccuracyNode); diff --git a/paddle/fluid/operators/ngraph/ops/activation_op.h b/paddle/fluid/operators/ngraph/ops/activation_op.h deleted file mode 100644 index 884ec65926..0000000000 --- a/paddle/fluid/operators/ngraph/ops/activation_op.h +++ /dev/null @@ -1,117 +0,0 @@ -/*Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#pragma once - -#include -#include -#include - -#include "ngraph/ngraph.hpp" -#include "paddle/fluid/operators/ngraph/ops/op_bridge.h" -#include "paddle/fluid/platform/ngraph_helper.h" - -namespace paddle { -namespace operators { -namespace ngraphs { - -void BuildGeluNode( - const std::shared_ptr& op, - std::shared_ptr< - std::unordered_map>> - ngb_node_map) { - auto input = platform::GetInputNode(op, "X", ngb_node_map); - auto half = paddle::platform::CreateConstant(input->get_element_type(), - input->get_shape(), {0.5}); - auto one = paddle::platform::CreateConstant(input->get_element_type(), - input->get_shape(), {1}); - auto sqrt_two = - std::make_shared(paddle::platform::CreateConstant( - input->get_element_type(), input->get_shape(), {2})); - auto out = half * input * - (one + std::make_shared(input / sqrt_two)); - platform::SetOutputNode(op, "Out", out, ngb_node_map); -} - -void BuildGeluGradNode( - const std::shared_ptr& op, - std::shared_ptr< - std::unordered_map>> - ngb_node_map) { - auto input = platform::GetInputNode(op, "X", ngb_node_map); - auto dout = platform::GetInputNode(op, "Out@GRAD", ngb_node_map); - auto half = paddle::platform::CreateConstant(input->get_element_type(), - input->get_shape(), {0.5}); - auto minus_half = paddle::platform::CreateConstant( - input->get_element_type(), input->get_shape(), {-0.5}); - auto one = paddle::platform::CreateConstant(input->get_element_type(), - input->get_shape(), {1}); - auto two = paddle::platform::CreateConstant(input->get_element_type(), - input->get_shape(), {2}); - auto pi = paddle::platform::CreateConstant( - input->get_element_type(), input->get_shape(), {3.14159265359}); - auto sqrt_two = std::make_shared(two); - auto sqrt_pi = std::make_shared(pi); - - auto first = - half * (one + std::make_shared(input * one / sqrt_two)); - auto second = half * (two / sqrt_pi) * (one / sqrt_two) * input * - std::make_shared(minus_half * input * input); - auto gelu_grad = dout * (first + second); - platform::SetOutputNode(op, "X@GRAD", gelu_grad, ngb_node_map); -} - -void BuildReluGradNode( - const std::shared_ptr& op, - std::shared_ptr< - std::unordered_map>> - ngb_node_map) { - auto out = platform::GetInputNode(op, "Out", ngb_node_map); - auto dout = platform::GetInputNode(op, "Out@GRAD", ngb_node_map); - auto relu_grad = std::make_shared(out, dout); - platform::SetOutputNode(op, "X@GRAD", relu_grad, ngb_node_map); -} - -void BuildSquareNode( - const std::shared_ptr& op, - std::shared_ptr< - std::unordered_map>> - ngb_node_map) { - auto input = platform::GetInputNode(op, "X", ngb_node_map); - auto out = input * input; - platform::SetOutputNode(op, "Out", out, ngb_node_map); -} - -void BuildTanhGradNode( - const std::shared_ptr& op, - std::shared_ptr< - std::unordered_map>> - ngb_node_map) { - auto out = platform::GetInputNode(op, "Out", ngb_node_map); - auto dout = platform::GetInputNode(op, "Out@GRAD", ngb_node_map); - auto shape = out->get_shape(); - auto node_const = - ngraph::op::Constant::create(ngraph::element::f32, shape, {1}); - auto result = dout * (node_const - out * out); - platform::SetOutputNode(op, "X@GRAD", result, ngb_node_map); -} -} // namespace ngraphs -} // namespace operators -} // namespace paddle - -REGISTER_NG_OP(gelu, BuildGeluNode); -REGISTER_NG_OP(gelu_grad, BuildGeluGradNode); -REGISTER_NG_OP(relu_grad, BuildReluGradNode); -REGISTER_NG_OP(square, BuildSquareNode); -REGISTER_NG_OP(tanh_grad, BuildTanhGradNode); diff --git a/paddle/fluid/operators/ngraph/ops/adam_op.h b/paddle/fluid/operators/ngraph/ops/adam_op.h deleted file mode 100644 index 93383a83a2..0000000000 --- a/paddle/fluid/operators/ngraph/ops/adam_op.h +++ /dev/null @@ -1,84 +0,0 @@ -/*Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#pragma once - -#include -#include -#include - -#include "ngraph/ngraph.hpp" -#include "paddle/fluid/operators/ngraph/ops/elementwise_scalar_op.h" -#include "paddle/fluid/operators/ngraph/ops/op_bridge.h" -#include "paddle/fluid/platform/ngraph_helper.h" - -namespace paddle { -namespace operators { -namespace ngraphs { - -void BuildAdamNode( - const std::shared_ptr& op, - std::shared_ptr< - std::unordered_map>> - ngb_node_map) { - auto op_attrs = framework::AttrReader(op->Attrs()); - auto beta1pow = platform::GetInputNode(op, "Beta1Pow", ngb_node_map); - auto beta2pow = platform::GetInputNode(op, "Beta2Pow", ngb_node_map); - auto grad = platform::GetInputNode(op, "Grad", ngb_node_map); - auto learning_rate = platform::GetInputNode(op, "LearningRate", ngb_node_map); - auto moment1 = platform::GetInputNode(op, "Moment1", ngb_node_map); - auto moment2 = platform::GetInputNode(op, "Moment2", ngb_node_map); - auto param = platform::GetInputNode(op, "Param", ngb_node_map); - - auto epsilon = op_attrs.Get("epsilon"); - auto beta2 = op_attrs.Get("beta2"); - auto beta1 = op_attrs.Get("beta1"); - - auto moment1_shape = moment1->get_shape(); - auto grad_shape = grad->get_shape(); - - auto moment1out = std::make_shared( - ElementwiseScalar(beta1, moment1), - ElementwiseScalar(1. - beta1, grad)); - - auto grad_square = std::make_shared(grad, grad); - auto moment2out = std::make_shared( - ElementwiseScalar(beta2, moment2), - ElementwiseScalar(1. - beta2, grad_square)); - auto node_sqrt = std::make_shared( - ElementwiseScalar(1., beta2pow)); - auto lr = std::make_shared( - node_sqrt, ElementwiseScalar(1., beta1pow)); - auto updated_lr = std::make_shared(learning_rate, lr); - - auto moment2_sqrt = std::make_shared(moment2out); - auto param_grad = std::make_shared( - moment1out, ElementwiseScalar(epsilon, moment2_sqrt)); - auto delta = ElementwiseScalar(updated_lr, param_grad); - auto param_out = std::make_shared(param, delta); - - auto beta1_pow_out = ElementwiseScalar(beta1, beta1pow); - auto beta2_pow_out = ElementwiseScalar(beta2, beta2pow); - - platform::SetOutputNode(op, "Moment1Out", moment1out, ngb_node_map); - platform::SetOutputNode(op, "Moment2Out", moment2out, ngb_node_map); - platform::SetOutputNode(op, "ParamOut", param_out, ngb_node_map); - platform::SetOutputNode(op, "Beta1PowOut", beta1_pow_out, ngb_node_map); - platform::SetOutputNode(op, "Beta2PowOut", beta2_pow_out, ngb_node_map); -} -} // namespace ngraphs -} // namespace operators -} // namespace paddle - -REGISTER_NG_OP(adam, BuildAdamNode); diff --git a/paddle/fluid/operators/ngraph/ops/assign_op.h b/paddle/fluid/operators/ngraph/ops/assign_op.h deleted file mode 100644 index 1815c2ee2d..0000000000 --- a/paddle/fluid/operators/ngraph/ops/assign_op.h +++ /dev/null @@ -1,43 +0,0 @@ -/*Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#pragma once - -#include -#include -#include -#include - -#include "ngraph/ngraph.hpp" -#include "paddle/fluid/operators/ngraph/ops/op_bridge.h" -#include "paddle/fluid/platform/ngraph_helper.h" - -namespace paddle { -namespace operators { -namespace ngraphs { - -static void BuildAssignNode( - const std::shared_ptr& op, - std::shared_ptr< - std::unordered_map>> - ngb_node_map) { - auto input = platform::GetInputNode(op, "X", ngb_node_map); - auto out = input; - paddle::platform::SetOutputNode(op, "Out", out, ngb_node_map); -} -} // namespace ngraphs -} // namespace operators -} // namespace paddle - -REGISTER_NG_OP(assign, BuildAssignNode); diff --git a/paddle/fluid/operators/ngraph/ops/batch_norm_op.h b/paddle/fluid/operators/ngraph/ops/batch_norm_op.h deleted file mode 100644 index 01fe78cdb2..0000000000 --- a/paddle/fluid/operators/ngraph/ops/batch_norm_op.h +++ /dev/null @@ -1,163 +0,0 @@ -/*Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#pragma once - -#include -#include -#include -#include - -#include "ngraph/ngraph.hpp" -#include "paddle/fluid/operators/ngraph/ops/elementwise_node.h" -#include "paddle/fluid/operators/ngraph/ops/elementwise_scalar_op.h" -#include "paddle/fluid/operators/ngraph/ops/op_bridge.h" -#include "paddle/fluid/platform/ngraph_helper.h" - -namespace paddle { -namespace operators { -namespace ngraphs { - -void BuildBatchNormNode( - const std::shared_ptr& op, - std::shared_ptr< - std::unordered_map>> - ngb_node_map) { - auto op_attrs = paddle::framework::AttrReader(op->Attrs()); - auto& data_layout = op_attrs.Get("data_layout"); - - auto bias = paddle::platform::GetInputNode(op, "Bias", ngb_node_map); - auto mean = paddle::platform::GetInputNode(op, "Mean", ngb_node_map); - auto variance = paddle::platform::GetInputNode(op, "Variance", ngb_node_map); - auto scale = paddle::platform::GetInputNode(op, "Scale", ngb_node_map); - auto x = paddle::platform::GetInputNode(op, "X", ngb_node_map); - - const bool is_test = op_attrs.Get("is_test"); - const float epsilon = op_attrs.Get("epsilon"); - const float momentum = op_attrs.Get("momentum"); - - PADDLE_ENFORCE( - data_layout == "NHWC" || data_layout == "NCHW" || data_layout == "NC", - "The BatchNorm operator only supports NHWC/NCHW/NC data format"); - - if (data_layout == "NHWC") { - x = paddle::platform::Nhwc2Nchw(x); - } - - std::shared_ptr mean_out, saved_mean, saved_variance, - variance_out, y; - - if (!is_test) { - auto BN = std::make_shared(epsilon, scale, - bias, x); - y = std::make_shared(BN, 0); - saved_mean = std::make_shared(BN, 1); - saved_variance = std::make_shared(BN, 2); - - mean_out = std::make_shared( - paddle::operators::ngraphs::ElementwiseScalar( - momentum, mean), - paddle::operators::ngraphs::ElementwiseScalar( - 1. - momentum, saved_mean)); - variance_out = std::make_shared( - paddle::operators::ngraphs::ElementwiseScalar( - momentum, variance), - paddle::operators::ngraphs::ElementwiseScalar( - 1. - momentum, saved_variance)); - - if (data_layout == "NHWC") { - y = paddle::platform::Nchw2Nhwc(y); - } - - paddle::platform::SetOutputNode(op, "MeanOut", mean_out, ngb_node_map); - paddle::platform::SetOutputNode(op, "VarianceOut", variance_out, - ngb_node_map); - paddle::platform::SetOutputNode(op, "SavedMean", saved_mean, ngb_node_map); - paddle::platform::SetOutputNode(op, "SavedVariance", saved_variance, - ngb_node_map); - paddle::platform::SetOutputNode(op, "Y", y, ngb_node_map); - } else { - y = std::make_shared(epsilon, scale, bias, - x, mean, variance); - paddle::platform::SetOutputNode(op, "Y", y, ngb_node_map); - } -} - -void BuildBatchNormGradNode( - const std::shared_ptr& op, - std::shared_ptr< - std::unordered_map>> - ngb_node_map) { - auto op_attrs = paddle::framework::AttrReader(op->Attrs()); - auto& data_layout = op_attrs.Get("data_layout"); - - auto bias = paddle::platform::GetInputNode(op, "Bias", ngb_node_map); - auto saved_mean = - paddle::platform::GetInputNode(op, "SavedMean", ngb_node_map); - auto saved_variance = - paddle::platform::GetInputNode(op, "SavedVariance", ngb_node_map); - auto scale = paddle::platform::GetInputNode(op, "Scale", ngb_node_map); - auto x = paddle::platform::GetInputNode(op, "X", ngb_node_map); - auto dy = paddle::platform::GetInputNode(op, "Y@GRAD", ngb_node_map); - auto x_shape = x->get_shape(); - auto dy_shape = dy->get_shape(); - - PADDLE_ENFORCE(x_shape.size() == 2 || x_shape.size() == 4, - "BN grap input size needs to be 2 or 4"); - PADDLE_ENFORCE_EQ(x_shape.size(), dy_shape.size(), - "BN grap input and delta size needs to be equal"); - PADDLE_ENFORCE( - data_layout == "NHWC" || data_layout == "NCHW" || data_layout == "NC", - "The BatchNorm operator only supports NHWC/NCHW/NC data format"); - - if (x_shape.size() == 2) { - x = std::make_shared( - x, ngraph::AxisVector{0, 1}, - ngraph::Shape{x_shape.at(0), x_shape.at(1), 1, 1}); - dy = std::make_shared( - dy, ngraph::AxisVector{0, 1}, - ngraph::Shape{dy_shape.at(0), dy_shape.at(1), 1, 1}); - } - - if (data_layout == "NHWC") { - x = paddle::platform::Nhwc2Nchw(dy); - dy = paddle::platform::Nhwc2Nchw(dy); - } - const float epsilon = op_attrs.Get("epsilon"); - - auto bn_bprop = std::make_shared( - epsilon, scale, bias, x, saved_mean, saved_variance, dy); - - std::shared_ptr dx = - std::make_shared(bn_bprop, 0); - auto dscale = std::make_shared(bn_bprop, 1); - auto dbias = std::make_shared(bn_bprop, 2); - paddle::platform::SetOutputNode(op, "Bias@GRAD", dbias, ngb_node_map); - paddle::platform::SetOutputNode(op, "Scale@GRAD", dscale, ngb_node_map); - if (x_shape.size() == 2) { - paddle::platform::SetOutputNode( - op, "X@GRAD", paddle::platform::NgReshaper(dx, x_shape), ngb_node_map); - } else { - if (data_layout == "NHWC") { - dx = paddle::platform::Nchw2Nhwc(dx); - } - paddle::platform::SetOutputNode(op, "X@GRAD", dx, ngb_node_map); - } -} -} // namespace ngraphs -} // namespace operators -} // namespace paddle - -REGISTER_NG_OP(batch_norm, BuildBatchNormNode); -REGISTER_NG_OP(batch_norm_grad, BuildBatchNormGradNode); diff --git a/paddle/fluid/operators/ngraph/ops/binary_unary_op.h b/paddle/fluid/operators/ngraph/ops/binary_unary_op.h deleted file mode 100644 index b8e9f3d858..0000000000 --- a/paddle/fluid/operators/ngraph/ops/binary_unary_op.h +++ /dev/null @@ -1,61 +0,0 @@ -/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#pragma once - -#include -#include -#include -#include "ngraph/ngraph.hpp" -#include "paddle/fluid/operators/ngraph/ops/op_bridge.h" -#include "paddle/fluid/platform/ngraph_helper.h" - -namespace paddle { -namespace operators { -namespace ngraphs { - -template -static void BuildBinaryNode( - const std::shared_ptr& op, - std::shared_ptr< - std::unordered_map>> - ngb_node_map) { - auto x = paddle::platform::GetInputNode(op, "X", ngb_node_map); - auto y = paddle::platform::GetInputNode(op, "Y", ngb_node_map); - auto out = std::make_shared(x, y); - paddle::platform::SetOutputNode(op, "Out", out, ngb_node_map); -} - -template -static void BuildUnaryNode( - const std::shared_ptr& op, - std::shared_ptr< - std::unordered_map>> - ngb_node_map) { - auto input = paddle::platform::GetInputNode(op, "X", ngb_node_map); - auto out = std::make_shared(input); - paddle::platform::SetOutputNode(op, "Out", out, ngb_node_map); -} -} // namespace ngraphs -} // namespace operators -} // namespace paddle - -REGISTER_NG_OP(abs, BuildUnaryNode); -REGISTER_NG_OP(relu, BuildUnaryNode); -REGISTER_NG_OP(tanh, BuildUnaryNode); -REGISTER_NG_OP(sigmoid, BuildUnaryNode); - -REGISTER_NG_OP(logical_and, BuildBinaryNode); -REGISTER_NG_OP(logical_or, BuildBinaryNode); -REGISTER_NG_OP(logical_not, BuildUnaryNode); diff --git a/paddle/fluid/operators/ngraph/ops/cast_op.h b/paddle/fluid/operators/ngraph/ops/cast_op.h deleted file mode 100644 index ae26e7fde6..0000000000 --- a/paddle/fluid/operators/ngraph/ops/cast_op.h +++ /dev/null @@ -1,47 +0,0 @@ -/*Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#pragma once - -#include -#include -#include -#include - -#include "ngraph/ngraph.hpp" -#include "paddle/fluid/operators/ngraph/ops/op_bridge.h" -#include "paddle/fluid/platform/ngraph_helper.h" - -namespace paddle { -namespace operators { -namespace ngraphs { - -static void BuildCastNode( - const std::shared_ptr& op, - std::shared_ptr< - std::unordered_map>> - ngb_node_map) { - auto input = platform::GetInputNode(op, "X", ngb_node_map); - auto op_attrs = framework::AttrReader(op->Attrs()); - auto ng_dtype = - platform::GetNgType(static_cast( - op_attrs.Get("out_dtype"))); - auto out = std::make_shared(input, ng_dtype); - paddle::platform::SetOutputNode(op, "Out", out, ngb_node_map); -} -} // namespace ngraphs -} // namespace operators -} // namespace paddle - -REGISTER_NG_OP(cast, BuildCastNode); diff --git a/paddle/fluid/operators/ngraph/ops/concat_op.h b/paddle/fluid/operators/ngraph/ops/concat_op.h deleted file mode 100644 index f34e161177..0000000000 --- a/paddle/fluid/operators/ngraph/ops/concat_op.h +++ /dev/null @@ -1,53 +0,0 @@ -/*Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#pragma once - -#include -#include -#include -#include -#include "ngraph/ngraph.hpp" -#include "paddle/fluid/operators/ngraph/ops/op_bridge.h" -#include "paddle/fluid/platform/ngraph_helper.h" - -namespace paddle { -namespace operators { -namespace ngraphs { - -void BuildConcatNode( - const std::shared_ptr& op, - std::shared_ptr< - std::unordered_map>> - ngb_node_map) { - std::vector> args; - for (auto& var_name_item : op->Inputs()) { - for (auto& var_name : var_name_item.second) { - auto& node0 = ngb_node_map->at(var_name); - args.push_back(node0); - } - } - auto op_attrs = framework::AttrReader(op->Attrs()); - int axis = op_attrs.Get("axis"); - if (axis < 0) { - axis = axis + args[0]->get_shape().size(); - } - auto out = std::make_shared(args, axis); - platform::SetOutputNode(op, "Out", out, ngb_node_map); -} -} // namespace ngraphs -} // namespace operators -} // namespace paddle - -REGISTER_NG_OP(concat, BuildConcatNode); diff --git a/paddle/fluid/operators/ngraph/ops/conv2d_op.h b/paddle/fluid/operators/ngraph/ops/conv2d_op.h deleted file mode 100644 index ab88d870c4..0000000000 --- a/paddle/fluid/operators/ngraph/ops/conv2d_op.h +++ /dev/null @@ -1,242 +0,0 @@ -/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#pragma once - -#include -#include -#include -#include -#include "ngraph/ngraph.hpp" -#include "paddle/fluid/operators/ngraph/ops/op_bridge.h" -#include "paddle/fluid/platform/ngraph_helper.h" - -namespace paddle { -namespace operators { -namespace ngraphs { - -std::shared_ptr GroupedConvolution( - const std::shared_ptr& data_batch, - const std::shared_ptr& filters, const ngraph::Strides strides, - const ngraph::Strides dilations, const ngraph::CoordinateDiff& paddings, - size_t groups) { - auto& data_shape = data_batch->get_shape(); - auto& filter_shape = filters->get_shape(); - ngraph::NodeVector ng_slices; - - for (size_t i = 0; i < groups; ++i) { - size_t channel_step = filter_shape.at(1); - const std::vector lower_bound{0, i * channel_step, 0, 0}; - const std::vector upper_bound{data_shape.at(0), - (i + 1) * channel_step, - data_shape.at(2), data_shape.at(3)}; - auto data_slice = std::make_shared( - data_batch, lower_bound, upper_bound); - - size_t filter_step = filter_shape.at(0) / groups; - const std::vector filter_lower_bound{i * filter_step, 0, 0, 0}; - const std::vector filter_upper_bound{ - (i + 1) * filter_step, filter_shape.at(1), filter_shape.at(2), - filter_shape.at(3)}; - auto filter_slice = std::make_shared( - filters, filter_lower_bound, filter_upper_bound); - auto ng_conv = std::make_shared( - data_slice, filter_slice, strides, dilations, paddings, paddings); - ng_slices.push_back(ng_conv); - } - - size_t concat_axis = 1; - return std::make_shared(ng_slices, concat_axis); -} - -std::shared_ptr GroupedGradConvolutionFilter( - const std::shared_ptr& data_batch, - const std::shared_ptr& filters, - const std::shared_ptr& doutput, const ngraph::Strides strides, - const ngraph::Strides dilations, const ngraph::CoordinateDiff& paddings, - size_t groups) { - auto& data_shape = data_batch->get_shape(); - auto& filter_shape = filters->get_shape(); - auto& out_shape = doutput->get_shape(); - ngraph::NodeVector ng_slices; - - for (size_t i = 0; i < groups; ++i) { - size_t channel_step = filter_shape.at(1); - const std::vector lower_bound{0, i * channel_step, 0, 0}; - const std::vector upper_bound{data_shape.at(0), - (i + 1) * channel_step, - data_shape.at(2), data_shape.at(3)}; - auto data_slice = std::make_shared( - data_batch, lower_bound, upper_bound); - - size_t filter_step = filter_shape.at(0) / groups; - - const std::vector filter_lower_bound{i * filter_step, 0, 0, 0}; - const std::vector filter_upper_bound{ - (i + 1) * filter_step, filter_shape.at(1), filter_shape.at(2), - filter_shape.at(3)}; - auto filter_slice = std::make_shared( - filters, filter_lower_bound, filter_upper_bound); - - const std::vector olower_bound{0, i * filter_step, 0, 0}; - const std::vector oupper_bound{out_shape.at(0), - (i + 1) * filter_step, - out_shape.at(2), out_shape.at(3)}; - auto out_slice = std::make_shared(doutput, olower_bound, - oupper_bound); - - auto ng_conv = std::make_shared( - data_slice, filter_slice->get_shape(), out_slice, strides, dilations, - paddings, paddings, ngraph::Strides{1, 1}); - - ng_slices.push_back(ng_conv); - } - - size_t concat_axis = 0; - return std::make_shared(ng_slices, concat_axis); -} - -std::shared_ptr GroupedGradConvolutionData( - const std::shared_ptr& data_batch, - const std::shared_ptr& filters, - const std::shared_ptr& doutput, const ngraph::Strides strides, - const ngraph::Strides dilations, const ngraph::CoordinateDiff& paddings, - size_t groups) { - auto& data_shape = data_batch->get_shape(); - auto& filter_shape = filters->get_shape(); - auto& out_shape = doutput->get_shape(); - ngraph::NodeVector ng_slices; - - for (size_t i = 0; i < groups; ++i) { - size_t channel_step = filter_shape.at(1); - const std::vector lower_bound{0, i * channel_step, 0, 0}; - const std::vector upper_bound{data_shape.at(0), - (i + 1) * channel_step, - data_shape.at(2), data_shape.at(3)}; - auto data_slice = std::make_shared( - data_batch, lower_bound, upper_bound); - - size_t filter_step = filter_shape.at(0) / groups; - - const std::vector filter_lower_bound{i * filter_step, 0, 0, 0}; - const std::vector filter_upper_bound{ - (i + 1) * filter_step, filter_shape.at(1), filter_shape.at(2), - filter_shape.at(3)}; - auto filter_slice = std::make_shared( - filters, filter_lower_bound, filter_upper_bound); - - const std::vector olower_bound{0, i * filter_step, 0, 0}; - const std::vector oupper_bound{out_shape.at(0), - (i + 1) * filter_step, - out_shape.at(2), out_shape.at(3)}; - auto out_slice = std::make_shared(doutput, olower_bound, - oupper_bound); - - auto ng_conv = std::make_shared( - data_slice->get_shape(), filter_slice, out_slice, strides, dilations, - paddings, paddings, ngraph::Strides{1, 1}); - ng_slices.push_back(ng_conv); - } - - size_t concat_axis = 1; - return std::make_shared(ng_slices, concat_axis); -} - -void BuildConv2dNode( - const std::shared_ptr& op, - std::shared_ptr< - std::unordered_map>> - ngb_node_map) { - auto op_attrs = paddle::framework::AttrReader(op->Attrs()); - auto filters = paddle::platform::GetInputNode(op, "Filter", ngb_node_map); - auto input = paddle::platform::GetInputNode(op, "Input", ngb_node_map); - - std::vector strides = op_attrs.Get>("strides"); - std::vector paddings = op_attrs.Get>("paddings"); - std::vector dilations = op_attrs.Get>("dilations"); - - const ngraph::Strides ng_strides{static_cast(strides.at(0)), - static_cast(strides.at(1))}; - const ngraph::Strides ng_dilations{static_cast(dilations.at(0)), - static_cast(dilations.at(1))}; - const ngraph::CoordinateDiff ng_paddings{ - static_cast(paddings.at(0)), - static_cast(paddings.at(1))}; - - int groups = static_cast(op_attrs.Get("groups")); - PADDLE_ENFORCE_GE(groups, 1, "conv groups needs be no less than 1"); - - std::shared_ptr result; - if (groups == 1) { - result = std::make_shared( - input, filters, ng_strides, ng_dilations, ng_paddings, ng_paddings); - } else { - result = GroupedConvolution(input, filters, ng_strides, ng_dilations, - ng_paddings, groups); - } - paddle::platform::SetOutputNode(op, "Output", result, ngb_node_map); -} - -void BuildConv2dGradNode( - const std::shared_ptr& op, - std::shared_ptr< - std::unordered_map>> - ngb_node_map) { - auto op_attrs = paddle::framework::AttrReader(op->Attrs()); - auto filter = paddle::platform::GetInputNode(op, "Filter", ngb_node_map); - auto input = paddle::platform::GetInputNode(op, "Input", ngb_node_map); - auto doutput = - paddle::platform::GetInputNode(op, "Output@GRAD", ngb_node_map); - - int groups = op_attrs.Get("groups"); - std::vector strides = op_attrs.Get>("strides"); - std::vector paddings = op_attrs.Get>("paddings"); - std::vector dilations = op_attrs.Get>("dilations"); - - const ngraph::Strides ng_strides{static_cast(strides.at(0)), - static_cast(strides.at(1))}; - const ngraph::Strides ng_dilations{static_cast(dilations.at(0)), - static_cast(dilations.at(1))}; - const ngraph::CoordinateDiff ng_paddings{ - static_cast(paddings.at(0)), - static_cast(paddings.at(1))}; - - std::shared_ptr dfilter; - std::shared_ptr dinput; - if (groups == 1) { - dfilter = std::make_shared( - input, filter->get_shape(), doutput, ng_strides, ng_dilations, - ng_paddings, ng_paddings, ngraph::Strides{1, 1}); - - dinput = std::make_shared( - input->get_shape(), filter, doutput, ng_strides, ng_dilations, - ng_paddings, ng_paddings, ngraph::Strides{1, 1}); - - } else { - dfilter = GroupedGradConvolutionFilter(input, filter, doutput, ng_strides, - ng_dilations, ng_paddings, groups); - dinput = GroupedGradConvolutionData(input, filter, doutput, ng_strides, - ng_dilations, ng_paddings, groups); - } - - paddle::platform::SetOutputNode(op, "Filter@GRAD", dfilter, ngb_node_map); - paddle::platform::SetOutputNode(op, "Input@GRAD", dinput, ngb_node_map); -} -} // namespace ngraphs -} // namespace operators -} // namespace paddle - -REGISTER_NG_OP(conv2d, BuildConv2dNode); -REGISTER_NG_OP(conv2d_grad, BuildConv2dGradNode); -REGISTER_NG_OP(depthwise_conv2d, BuildConv2dNode); diff --git a/paddle/fluid/operators/ngraph/ops/cross_entropy_op.h b/paddle/fluid/operators/ngraph/ops/cross_entropy_op.h deleted file mode 100644 index e06446aca9..0000000000 --- a/paddle/fluid/operators/ngraph/ops/cross_entropy_op.h +++ /dev/null @@ -1,244 +0,0 @@ -/*Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#pragma once - -#include -#include -#include -#include - -#include "ngraph/ngraph.hpp" -#include "paddle/fluid/operators/ngraph/ops/op_bridge.h" -#include "paddle/fluid/platform/ngraph_helper.h" - -namespace paddle { -namespace operators { -namespace ngraphs { -std::shared_ptr remove_trailing_one( - const std::shared_ptr& input) { - auto shape = input->get_shape(); - if (shape.back() == 1 && shape.size() > 1) { - shape.pop_back(); - return platform::NgReshaper(input, shape); - } else { - return input; - } -} - -std::shared_ptr flatten_node( - const std::shared_ptr& input) { - auto shape = input->get_shape(); - auto rank = shape.size(); - auto output = input; - if (rank > 2) { - auto shape_2d = paddle::platform::FlattenTo2d(shape, rank - 1); - output = paddle::platform::NgReshaper(input, shape_2d); - } - return output; -} - -std::shared_ptr convert_to_node_type( - const std::shared_ptr& input, - const std::shared_ptr& ref) { - auto output = input; - if (input->get_element_type() != ref->get_element_type()) { - output = - std::make_shared(input, ref->get_element_type()); - } - return output; -} - -std::shared_ptr create_xe( - const std::shared_ptr& one_hot, - const std::shared_ptr& x) { - auto node_log = std::make_shared(x); - - auto node_mul = one_hot * node_log; - auto node_sum = std::make_shared( - node_mul, ngraph::AxisSet{x->get_shape().size() - 1}); - - auto shape = x->get_shape(); - shape.back() = 1; - return platform::NgReshaper(-node_sum, shape); -} - -std::shared_ptr create_mask( - const std::shared_ptr& label, int ignore_index) { - auto ignore_node = paddle::platform::CreateConstant( - label->get_element_type(), label->get_shape(), {ignore_index}); - auto not_equal_node = - std::make_shared(label, ignore_node); - return not_equal_node; -} - -std::shared_ptr create_one_hot( - const std::shared_ptr& label, - const std::shared_ptr& x) { - auto label_shape = label->get_shape(); - return std::make_shared( - remove_trailing_one(label), x->get_shape(), x->get_shape().size() - 1); -} - -std::shared_ptr GetCrossEntropy( - std::shared_ptr x, std::shared_ptr label, - const bool is_soft_label, int ignore_index) { - std::shared_ptr node_1_hot = label; - if (!is_soft_label) { - node_1_hot = create_one_hot(label, x); - } - node_1_hot = convert_to_node_type(node_1_hot, x); - - auto xe = create_xe(node_1_hot, x); - if (!is_soft_label) { - auto mask = convert_to_node_type(create_mask(label, ignore_index), xe); - xe = xe * mask; - } - return xe; -} - -std::shared_ptr GetCrossEntropyGrad( - std::shared_ptr x, std::shared_ptr label, - std::shared_ptr dy, const bool is_soft_label, - int ignore_index) { - auto x_shape = x->get_shape(); - auto rank = x_shape.size(); - - std::shared_ptr mask; - if (!is_soft_label) { - mask = convert_to_node_type(create_mask(label, ignore_index), x); - mask = std::make_shared( - remove_trailing_one(mask), x_shape, ngraph::AxisSet{rank - 1}); - label = create_one_hot(label, x); - } - - auto dy_reshape = remove_trailing_one(dy); - auto dy_bcast = std::make_shared( - dy_reshape, x_shape, ngraph::AxisSet{rank - 1}); - - label = convert_to_node_type(label, x); - - auto xe_grad = -label * dy_bcast / x; - - if (!is_soft_label) { - xe_grad = xe_grad * mask; - } - return xe_grad; -} - -void BuildCrossEntropyNode( - const std::shared_ptr& op, - std::shared_ptr< - std::unordered_map>> - ngb_node_map) { - auto x = paddle::platform::GetInputNode(op, "X", ngb_node_map); - auto label = paddle::platform::GetInputNode(op, "Label", ngb_node_map); - auto op_attrs = paddle::framework::AttrReader(op->Attrs()); - const bool is_soft_label = op_attrs.Get("soft_label"); - int ignore_index = op_attrs.Get("ignore_index"); - auto xe = GetCrossEntropy(x, label, is_soft_label, ignore_index); - paddle::platform::SetOutputNode(op, "Y", xe, ngb_node_map); -} - -void BuildCrossEntropyGradNode( - const std::shared_ptr& op, - std::shared_ptr< - std::unordered_map>> - ngb_node_map) { - auto op_attrs = paddle::framework::AttrReader(op->Attrs()); - const bool is_soft_label = op_attrs.Get("soft_label"); - int ignore_index = op_attrs.Get("ignore_index"); - auto x = paddle::platform::GetInputNode(op, "X", ngb_node_map); - auto label = paddle::platform::GetInputNode(op, "Label", ngb_node_map); - auto dy = paddle::platform::GetInputNode(op, "Y@GRAD", ngb_node_map); - auto xe_grad = GetCrossEntropyGrad(x, label, dy, is_soft_label, ignore_index); - paddle::platform::SetOutputNode(op, "X@GRAD", xe_grad, ngb_node_map); -} - -void BuildCrossEntropy2Node( - const std::shared_ptr& op, - std::shared_ptr< - std::unordered_map>> - ngb_node_map) { - auto x = paddle::platform::GetInputNode(op, "X", ngb_node_map); - auto label = paddle::platform::GetInputNode(op, "Label", ngb_node_map); - auto op_attrs = paddle::framework::AttrReader(op->Attrs()); - int ignore_index = op_attrs.Get("ignore_index"); - - auto rank = x->get_shape().size(); - - auto one_hot = convert_to_node_type(create_one_hot(label, x), x); - auto xe = create_xe(one_hot, x); - auto mask = convert_to_node_type(create_mask(label, ignore_index), xe); - - xe = xe * mask; - - std::shared_ptr node_sum = - std::make_shared(one_hot * x, ngraph::AxisSet{rank - 1}); - node_sum = paddle::platform::NgReshaper(node_sum, mask->get_shape()); - auto matchx = mask * node_sum; - - paddle::platform::SetOutputNode(op, "MatchX", matchx, ngb_node_map); - platform::SetOutputNode(op, "XShape", x, ngb_node_map); - paddle::platform::SetOutputNode(op, "Y", xe, ngb_node_map); -} - -void BuildCrossEntropyGrad2Node( - const std::shared_ptr& op, - std::shared_ptr< - std::unordered_map>> - ngb_node_map) { - auto op_attrs = paddle::framework::AttrReader(op->Attrs()); - int ignore_index = op_attrs.Get("ignore_index"); - auto matchx = paddle::platform::GetInputNode(op, "MatchX", ngb_node_map); - auto label = paddle::platform::GetInputNode(op, "Label", ngb_node_map); - auto x = paddle::platform::GetInputNode(op, "XShape", ngb_node_map); - auto dy = paddle::platform::GetInputNode(op, framework::GradVarName("Y"), - ngb_node_map); - - matchx = remove_trailing_one(matchx); - label = remove_trailing_one(label); - x = remove_trailing_one(x); - dy = remove_trailing_one(dy); - - auto x_shape = x->get_shape(); - auto rank = x_shape.size(); - - auto one_hot = convert_to_node_type(create_one_hot(label, x), x); - auto mask = convert_to_node_type(create_mask(label, ignore_index), x); - - auto zero = paddle::platform::CreateConstant(matchx->get_element_type(), - matchx->get_shape(), {0}); - auto one = paddle::platform::CreateConstant(matchx->get_element_type(), - matchx->get_shape(), {1}); - auto is_zero = std::make_shared(matchx, zero); - matchx = std::make_shared(is_zero, one, matchx); - - auto dy_bcast = std::make_shared( - mask * dy, x_shape, ngraph::AxisSet{rank - 1}); - auto matchx_bcast = std::make_shared( - matchx, x_shape, ngraph::AxisSet{rank - 1}); - - auto xe_grad = -dy_bcast * one_hot / matchx_bcast; - paddle::platform::SetOutputNode(op, framework::GradVarName("X"), xe_grad, - ngb_node_map); -} -} // namespace ngraphs -} // namespace operators -} // namespace paddle - -REGISTER_NG_OP(cross_entropy, BuildCrossEntropyNode); -REGISTER_NG_OP(cross_entropy_grad, BuildCrossEntropyGradNode); -REGISTER_NG_OP(cross_entropy2, BuildCrossEntropy2Node); -REGISTER_NG_OP(cross_entropy_grad2, BuildCrossEntropyGrad2Node); diff --git a/paddle/fluid/operators/ngraph/ops/dropout_op.h b/paddle/fluid/operators/ngraph/ops/dropout_op.h deleted file mode 100644 index 3fb55980d7..0000000000 --- a/paddle/fluid/operators/ngraph/ops/dropout_op.h +++ /dev/null @@ -1,112 +0,0 @@ -/*Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#pragma once - -#include -#include -#include -#include - -#include "ngraph/ngraph.hpp" -#include "ngraph/op/experimental/generate_mask.hpp" -#include "paddle/fluid/operators/ngraph/ops/elementwise_scalar_op.h" -#include "paddle/fluid/operators/ngraph/ops/op_bridge.h" -#include "paddle/fluid/platform/ngraph_helper.h" - -namespace paddle { -namespace operators { -namespace ngraphs { - -static void BuildDropoutNode( - const std::shared_ptr& op, - std::shared_ptr< - std::unordered_map>> - ngb_node_map) { - auto input = platform::GetInputNode(op, "X", ngb_node_map); - auto op_attrs = framework::AttrReader(op->Attrs()); - auto dropout_prob = op_attrs.Get("dropout_prob"); - auto dropout_implementation = - op_attrs.Get("dropout_implementation"); - auto is_test = op_attrs.Get("is_test"); - auto seed = op_attrs.Get("seed"); - auto fix_seed = op_attrs.Get("fix_seed"); - float value = 1.0f - dropout_prob; - bool upscale_in_train = (dropout_implementation == "upscale_in_train"); - - if (is_test) { - if (upscale_in_train) { - platform::SetOutputNode(op, "Out", input, ngb_node_map); - } else { - auto mask_val = paddle::platform::CreateConstant( - input->get_element_type(), input->get_shape(), {value}); - auto out = input * mask_val; - platform::SetOutputNode(op, "Out", out, ngb_node_map); - } - } else { - auto one = paddle::platform::CreateConstant(input->get_element_type(), - ngraph::Shape{}, {1}); - - auto gen_mask = std::make_shared( - one, input->get_shape(), input->get_element_type(), seed, value, - fix_seed); - - if (upscale_in_train) { - auto mask_val = paddle::platform::CreateConstant( - input->get_element_type(), input->get_shape(), {value}); - - auto out = value ? input * gen_mask / mask_val : input * gen_mask; - platform::SetOutputNode(op, "Mask", gen_mask, ngb_node_map); - platform::SetOutputNode(op, "Out", out, ngb_node_map); - } else { - auto out = input * gen_mask; - platform::SetOutputNode(op, "Mask", gen_mask, ngb_node_map); - platform::SetOutputNode(op, "Out", out, ngb_node_map); - } - } -} - -static void BuildDropoutGradNode( - const std::shared_ptr& op, - std::shared_ptr< - std::unordered_map>> - ngb_node_map) { - auto dy = platform::GetInputNode(op, "Out@GRAD", ngb_node_map); - auto mask = platform::GetInputNode(op, "Mask", ngb_node_map); - if (dy->get_element_type() != mask->get_element_type()) { - mask = std::make_shared(mask, dy->get_element_type()); - } - - auto op_attrs = framework::AttrReader(op->Attrs()); - auto dropout_prob = op_attrs.Get("dropout_prob"); - auto dropout_implementation = - op_attrs.Get("dropout_implementation"); - auto dx = dy * mask; - - if (dropout_implementation == "upscale_in_train") { - if (dropout_prob == 1.0f) { - dx = ElementwiseScalar(0., dy); - } else { - dx = - ElementwiseScalar(1. / (1. - dropout_prob), dx); - } - } - platform::SetOutputNode(op, "X@GRAD", dx, ngb_node_map); -} -} // namespace ngraphs -} // namespace operators -} // namespace paddle - -REGISTER_NG_OP(dropout, BuildDropoutNode); -REGISTER_NG_OP(dropout_grad, BuildDropoutGradNode); diff --git a/paddle/fluid/operators/ngraph/ops/elementwise_add_op.h b/paddle/fluid/operators/ngraph/ops/elementwise_add_op.h deleted file mode 100644 index d7485a706a..0000000000 --- a/paddle/fluid/operators/ngraph/ops/elementwise_add_op.h +++ /dev/null @@ -1,93 +0,0 @@ -/*Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#pragma once - -#include -#include -#include -#include - -#include "ngraph/ngraph.hpp" -#include "paddle/fluid/operators/ngraph/ops/elementwise_node.h" -#include "paddle/fluid/operators/ngraph/ops/op_bridge.h" -#include "paddle/fluid/platform/ngraph_helper.h" - -namespace paddle { -namespace operators { -namespace ngraphs { - -void BuildElementwiseAddNode( - const std::shared_ptr& op, - std::shared_ptr< - std::unordered_map>> - ngb_node_map) { - BuildElementwiseBinaryNode(op, ngb_node_map); -} - -void BuildElementwiseAddGradNode( - const std::shared_ptr& op, - std::shared_ptr< - std::unordered_map>> - ngb_node_map) { - auto op_attrs = paddle::framework::AttrReader(op->Attrs()); - int axis = op_attrs.Get("axis"); - - auto dout = paddle::platform::GetInputNode(op, "Out@GRAD", ngb_node_map); - auto y = paddle::platform::GetInputNode(op, "Y", ngb_node_map); - auto dout_shape = dout->get_shape(); - auto y_shape = y->get_shape(); - - if (dout_shape == y_shape) { - paddle::platform::SetOutputNode(op, "X@GRAD", dout, ngb_node_map); - paddle::platform::SetOutputNode(op, "Y@GRAD", dout, ngb_node_map); - } else { - axis = (axis == -1 ? dout_shape.size() - y_shape.size() : axis); - paddle::platform::TrimTrailingSingularDims(&y_shape); - axis = (y_shape.size() == 0 ? dout_shape.size() : axis); - - int pre, n, post; - paddle::platform::GetMidDims(dout_shape, y_shape, axis, &pre, &n, &post); - - ngraph::Shape lhs_shape{}; - lhs_shape.push_back(pre); - lhs_shape.push_back(n); - if (post != 1) { - lhs_shape.push_back(post); - } - - std::vector lhs_order(dout_shape.size()); - std::iota(std::begin(lhs_order), std::end(lhs_order), 0); - auto dout_reshape = std::make_shared( - dout, ngraph::AxisVector(lhs_order), lhs_shape); - - ngraph::AxisSet axis_set{0}; - if (post != 1) { - axis_set.insert(2); - } - - auto dout_sum = std::make_shared(dout_reshape, axis_set); - auto dy = std::make_shared( - dout_sum, ngraph::AxisVector{0}, y->get_shape()); - - paddle::platform::SetOutputNode(op, "X@GRAD", dout, ngb_node_map); - paddle::platform::SetOutputNode(op, "Y@GRAD", dy, ngb_node_map); - } -} -} // namespace ngraphs -} // namespace operators -} // namespace paddle - -REGISTER_NG_OP(elementwise_add, BuildElementwiseAddNode); -REGISTER_NG_OP(elementwise_add_grad, BuildElementwiseAddGradNode); diff --git a/paddle/fluid/operators/ngraph/ops/elementwise_binary_prepare_node.h b/paddle/fluid/operators/ngraph/ops/elementwise_binary_prepare_node.h deleted file mode 100644 index e4e17f5bb2..0000000000 --- a/paddle/fluid/operators/ngraph/ops/elementwise_binary_prepare_node.h +++ /dev/null @@ -1,78 +0,0 @@ -/*Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#pragma once - -#include -#include -#include -#include - -#include "ngraph/ngraph.hpp" -#include "paddle/fluid/platform/ngraph_helper.h" - -namespace paddle { -namespace operators { -namespace ngraphs { - -ngraph::NodeVector ElementwiseBinaryNodePrepare( - const std::shared_ptr& op, - std::shared_ptr< - std::unordered_map>> - ngb_node_map) { - auto op_attrs = paddle::framework::AttrReader(op->Attrs()); - int axis = op_attrs.Get("axis"); - auto lhs = paddle::platform::GetInputNode(op, "X", ngb_node_map); - auto rhs = paddle::platform::GetInputNode(op, "Y", ngb_node_map); - - auto lhs_shape = lhs->get_shape(); - auto rhs_shape = rhs->get_shape(); - - PADDLE_ENFORCE_GE(lhs_shape.size(), rhs_shape.size(), - "Rank of first input must >= rank of second input."); - if (lhs_shape == rhs_shape) { - return ngraph::NodeVector{lhs, rhs}; - } - axis = (rhs_shape.size() == 0) ? lhs_shape.size() - 1 : axis; - axis = (axis == -1 ? lhs_shape.size() - rhs_shape.size() : axis); - PADDLE_ENFORCE(axis >= 0 && axis < (int)(lhs_shape.size()), - "Axis should be in range [0, lhs_shape)"); - paddle::platform::TrimTrailingSingularDims(&rhs_shape); - - int pre, n, post; - paddle::platform::GetMidDims(lhs_shape, rhs_shape, axis, &pre, &n, &post); - - ngraph::Shape l_shape{}; - l_shape.push_back(pre); - l_shape.push_back(n); - l_shape.push_back(post); - - std::vector rhs_order(rhs->get_shape().size()); - std::iota(std::begin(rhs_order), std::end(rhs_order), 0); - ngraph::Shape r_shape{}; - r_shape.push_back(n); - auto rhs_reshape = std::make_shared( - rhs, ngraph::AxisVector(rhs_order), r_shape); - auto rhs_bcast = std::make_shared( - rhs_reshape, l_shape, ngraph::AxisSet{0, 2}); - std::vector bcast_order(rhs_bcast->get_shape().size()); - std::iota(std::begin(bcast_order), std::end(bcast_order), 0); - std::shared_ptr rhs_bcast_reshape = - std::make_shared( - rhs_bcast, ngraph::AxisVector(bcast_order), lhs_shape); - return ngraph::NodeVector{lhs, rhs_bcast_reshape}; -} -} // namespace ngraphs -} // namespace operators -} // namespace paddle diff --git a/paddle/fluid/operators/ngraph/ops/elementwise_div_op.h b/paddle/fluid/operators/ngraph/ops/elementwise_div_op.h deleted file mode 100644 index b4cc2f862b..0000000000 --- a/paddle/fluid/operators/ngraph/ops/elementwise_div_op.h +++ /dev/null @@ -1,103 +0,0 @@ -/*Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#pragma once - -#include -#include -#include -#include - -#include "ngraph/ngraph.hpp" -#include "paddle/fluid/operators/ngraph/ops/elementwise_node.h" -#include "paddle/fluid/operators/ngraph/ops/op_bridge.h" -#include "paddle/fluid/platform/ngraph_helper.h" - -namespace paddle { -namespace operators { -namespace ngraphs { - -void BuildElementwiseDivGradNode( - const std::shared_ptr& op, - std::shared_ptr< - std::unordered_map>> - ngb_node_map) { - auto op_attrs = paddle::framework::AttrReader(op->Attrs()); - int axis = op_attrs.Get("axis"); - - auto dout = paddle::platform::GetInputNode(op, "Out@GRAD", ngb_node_map); - auto y = paddle::platform::GetInputNode(op, "Y", ngb_node_map); - auto out = paddle::platform::GetInputNode(op, "Out", ngb_node_map); - auto dout_shape = dout->get_shape(); - auto y_shape = y->get_shape(); - if (dout->get_element_type() != y->get_element_type()) { - y = std::make_shared(y, dout->get_element_type()); - } - auto dy_hd = std::make_shared(out, dout); - if (dout_shape == y_shape) { - auto dx = std::make_shared(dout, y); - auto dy = std::make_shared(dy_hd, -y); - paddle::platform::SetOutputNode(op, "X@GRAD", dx, ngb_node_map); - paddle::platform::SetOutputNode(op, "Y@GRAD", dy, ngb_node_map); - } else { - auto dy_hd_shape = dy_hd->get_shape(); - axis = (axis == -1 ? dy_hd_shape.size() - y_shape.size() : axis); - paddle::platform::TrimTrailingSingularDims(&y_shape); - axis = (y_shape.size() == 0 ? dy_hd_shape.size() : axis); - int pre, n, post; - paddle::platform::GetMidDims(dy_hd_shape, y_shape, axis, &pre, &n, &post); - ngraph::Shape lhs_shape{}; - lhs_shape.push_back(pre); - lhs_shape.push_back(n); - if (post != 1) { - lhs_shape.push_back(post); - } - - std::vector dy_order(dout_shape.size()); - std::iota(std::begin(dy_order), std::end(dy_order), 0); - auto dy_hd_reshape = std::make_shared( - dy_hd, ngraph::AxisVector(dy_order), lhs_shape); - - ngraph::AxisSet axis_set{0}; - if (post != 1) { - axis_set.insert(2); - } - - auto dy_sum = std::make_shared(dy_hd_reshape, axis_set); - auto dy_sum_yshape = std::make_shared( - dy_sum, ngraph::AxisVector{0}, y->get_shape()); - auto dy_ = std::make_shared(dy_sum_yshape, -y); - paddle::platform::SetOutputNode(op, "Y@GRAD", dy_, ngb_node_map); - - y_shape = y->get_shape(); - std::vector y_order(y_shape.size() == 0 ? 1 : y_shape.size()); - std::iota(std::begin(y_order), std::end(y_order), 0); - auto y_reshape = std::make_shared( - y, ngraph::AxisVector(y_order), ngraph::Shape{(size_t)n}); - auto y_broadcast = - std::make_shared(y_reshape, lhs_shape, axis_set); - std::vector lhs_order(lhs_shape.size()); - std::iota(std::begin(lhs_order), std::end(lhs_order), 0); - auto y_broadcast_reshape = std::make_shared( - y_broadcast, ngraph::AxisVector(lhs_order), dout_shape); - auto dx = std::make_shared(dout, y_broadcast_reshape); - - paddle::platform::SetOutputNode(op, "X@GRAD", dx, ngb_node_map); - } -} -} // namespace ngraphs -} // namespace operators -} // namespace paddle - -REGISTER_NG_OP(elementwise_div_grad, BuildElementwiseDivGradNode); diff --git a/paddle/fluid/operators/ngraph/ops/elementwise_mul_op.h b/paddle/fluid/operators/ngraph/ops/elementwise_mul_op.h deleted file mode 100644 index c74b103ebf..0000000000 --- a/paddle/fluid/operators/ngraph/ops/elementwise_mul_op.h +++ /dev/null @@ -1,111 +0,0 @@ -/*Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#pragma once - -#include -#include -#include -#include - -#include "ngraph/ngraph.hpp" -#include "paddle/fluid/operators/ngraph/ops/elementwise_node.h" -#include "paddle/fluid/operators/ngraph/ops/op_bridge.h" -#include "paddle/fluid/platform/ngraph_helper.h" - -namespace paddle { -namespace operators { -namespace ngraphs { - -void BuildElementwiseMulNode( - const std::shared_ptr& op, - std::shared_ptr< - std::unordered_map>> - ngb_node_map) { - BuildElementwiseBinaryNode(op, ngb_node_map); -} - -void BuildElementwiseMulGradNode( - const std::shared_ptr& op, - std::shared_ptr< - std::unordered_map>> - ngb_node_map) { - auto op_attrs = paddle::framework::AttrReader(op->Attrs()); - int axis = op_attrs.Get("axis"); - - auto dout = paddle::platform::GetInputNode(op, "Out@GRAD", ngb_node_map); - auto y = paddle::platform::GetInputNode(op, "Y", ngb_node_map); - auto x = paddle::platform::GetInputNode(op, "X", ngb_node_map); - auto dout_shape = dout->get_shape(); - auto y_shape = y->get_shape(); - auto x_shape = x->get_shape(); - if (dout->get_element_type() != y->get_element_type()) { - y = std::make_shared(y, dout->get_element_type()); - } - if (dout_shape == y_shape) { - auto dx = std::make_shared(dout, y); - auto dy = std::make_shared(dout, x); - paddle::platform::SetOutputNode(op, "X@GRAD", dx, ngb_node_map); - paddle::platform::SetOutputNode(op, "Y@GRAD", dy, ngb_node_map); - } else { - auto dy_hd = std::make_shared(dout, x); - auto dy_hd_shape = dy_hd->get_shape(); - axis = (axis == -1 ? dy_hd_shape.size() - y_shape.size() : axis); - paddle::platform::TrimTrailingSingularDims(&y_shape); - axis = (y_shape.size() == 0 ? dy_hd_shape.size() : axis); - int pre, n, post; - paddle::platform::GetMidDims(dy_hd_shape, y_shape, axis, &pre, &n, &post); - ngraph::Shape lhs_shape{}; - lhs_shape.push_back(pre); - lhs_shape.push_back(n); - if (post != 1) { - lhs_shape.push_back(post); - } - - std::vector dy_order(dout_shape.size()); - std::iota(std::begin(dy_order), std::end(dy_order), 0); - auto dy_hd_reshape = std::make_shared( - dy_hd, ngraph::AxisVector(dy_order), lhs_shape); - - ngraph::AxisSet axis_set{0}; - if (post != 1) { - axis_set.insert(2); - } - - auto dy_sum = std::make_shared(dy_hd_reshape, axis_set); - auto dy_sum_yshape = std::make_shared( - dy_sum, ngraph::AxisVector{0}, y->get_shape()); - paddle::platform::SetOutputNode(op, "Y@GRAD", dy_sum_yshape, ngb_node_map); - - y_shape = y->get_shape(); - std::vector y_order(y_shape.size() == 0 ? 1 : y_shape.size()); - std::iota(std::begin(y_order), std::end(y_order), 0); - auto y_reshape = std::make_shared( - y, ngraph::AxisVector(y_order), ngraph::Shape{(size_t)n}); - auto y_broadcast = - std::make_shared(y_reshape, lhs_shape, axis_set); - std::vector lhs_order(lhs_shape.size()); - std::iota(std::begin(lhs_order), std::end(lhs_order), 0); - auto y_broadcast_reshape = std::make_shared( - y_broadcast, ngraph::AxisVector(lhs_order), dout_shape); - auto dx = std::make_shared(y_broadcast_reshape, dout); - paddle::platform::SetOutputNode(op, "X@GRAD", dx, ngb_node_map); - } -} -} // namespace ngraphs -} // namespace operators -} // namespace paddle - -REGISTER_NG_OP(elementwise_mul, BuildElementwiseMulNode); -REGISTER_NG_OP(elementwise_mul_grad, BuildElementwiseMulGradNode); diff --git a/paddle/fluid/operators/ngraph/ops/elementwise_node.h b/paddle/fluid/operators/ngraph/ops/elementwise_node.h deleted file mode 100644 index a555f57f99..0000000000 --- a/paddle/fluid/operators/ngraph/ops/elementwise_node.h +++ /dev/null @@ -1,75 +0,0 @@ -/* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#pragma once - -#include -#include -#include - -#include "ngraph/ngraph.hpp" -#include "paddle/fluid/operators/ngraph/ops/elementwise_binary_prepare_node.h" -#include "paddle/fluid/operators/ngraph/ops/op_bridge.h" -#include "paddle/fluid/platform/ngraph_helper.h" - -namespace paddle { -namespace operators { -namespace ngraphs { - -template -void BuildElementwiseBinaryNode( - const std::shared_ptr& op, - std::shared_ptr< - std::unordered_map>> - ngb_node_map) { - auto nodes = ElementwiseBinaryNodePrepare(op, ngb_node_map); - std::shared_ptr& x = nodes.at(0); - std::shared_ptr& y = nodes.at(1); - - y = std::make_shared(y, x->get_element_type()); - auto out = std::make_shared(x, y); - paddle::platform::SetOutputNode(op, "Out", out, ngb_node_map); -} - -template -void BuildElementwiseCompareNode( - const std::shared_ptr& op, - std::shared_ptr< - std::unordered_map>> - ngb_node_map) { - auto nodes = ElementwiseBinaryNodePrepare(op, ngb_node_map); - std::shared_ptr& x = nodes.at(0); - std::shared_ptr& y = nodes.at(1); - - if (x->get_element_type() != y->get_element_type()) { - x = std::make_shared(x, ngraph::element::f64); - y = std::make_shared(y, ngraph::element::f64); - } - auto out = std::make_shared(x, y); - paddle::platform::SetOutputNode(op, "Out", out, ngb_node_map); -} - -} // namespace ngraphs -} // namespace operators -} // namespace paddle - -REGISTER_NG_OP(elementwise_max, - BuildElementwiseBinaryNode); -REGISTER_NG_OP(elementwise_pow, BuildElementwiseBinaryNode); -REGISTER_NG_OP(elementwise_sub, - BuildElementwiseBinaryNode); -REGISTER_NG_OP(elementwise_min, - BuildElementwiseBinaryNode); -REGISTER_NG_OP(less_than, BuildElementwiseCompareNode); -REGISTER_NG_OP(elementwise_div, BuildElementwiseBinaryNode); diff --git a/paddle/fluid/operators/ngraph/ops/elementwise_scalar_op.h b/paddle/fluid/operators/ngraph/ops/elementwise_scalar_op.h deleted file mode 100644 index 8f5092963c..0000000000 --- a/paddle/fluid/operators/ngraph/ops/elementwise_scalar_op.h +++ /dev/null @@ -1,59 +0,0 @@ -/*Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#pragma once - -#include -#include "ngraph/ngraph.hpp" -#include "paddle/fluid/platform/ngraph_helper.h" - -namespace paddle { -namespace operators { -namespace ngraphs { - -template -std::shared_ptr ElementwiseScalar( - float scale, std::shared_ptr node) { - auto node_shape = node->get_shape(); - auto scale_const = ngraph::op::Constant::create(node->get_element_type(), - node_shape, {scale}); - return std::make_shared(scale_const, node); -} - -template -std::shared_ptr ElementwiseScalar( - std::shared_ptr scale_1d, - std::shared_ptr node) { - auto scale_shape = scale_1d->get_shape(); - PADDLE_ENFORCE_EQ(scale_shape.size(), 1, "Supporting 1d scale node"); - PADDLE_ENFORCE_EQ(scale_shape.at(0), 1, "scale 1d in in shape {1}"); - - auto node_shape = node->get_shape(); - ngraph::AxisSet axis_set; - for (size_t i = 0; i < node_shape.size(); ++i) { - axis_set.insert(i); - } - node_shape.push_back(1); - - auto scale_bcast = - std::make_shared(scale_1d, node_shape, axis_set); - - auto scale_reshape = - paddle::platform::NgReshaper(scale_bcast, node->get_shape()); - - return std::make_shared(scale_reshape, node); -} -} // namespace ngraphs -} // namespace operators -} // namespace paddle diff --git a/paddle/fluid/operators/ngraph/ops/fill_constant_op.h b/paddle/fluid/operators/ngraph/ops/fill_constant_op.h deleted file mode 100644 index fee5f57e48..0000000000 --- a/paddle/fluid/operators/ngraph/ops/fill_constant_op.h +++ /dev/null @@ -1,51 +0,0 @@ -/*Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#pragma once - -#include -#include -#include -#include -#include "ngraph/ngraph.hpp" -#include "paddle/fluid/operators/ngraph/ops/op_bridge.h" -#include "paddle/fluid/platform/ngraph_helper.h" - -namespace paddle { -namespace operators { -namespace ngraphs { - -void BuildFillConstantNode( - const std::shared_ptr& op, - std::shared_ptr< - std::unordered_map>> - ngb_node_map) { - auto op_attrs = paddle::framework::AttrReader(op->Attrs()); - auto vsp = op_attrs.Get>("shape"); - ngraph::Shape shape; - for (auto& sp : vsp) { - shape.push_back(sp); - } - float value = op_attrs.Get("value"); - auto ng_dtype = - platform::GetNgType(static_cast( - op_attrs.Get("dtype"))); - auto out = ngraph::op::Constant::create(ng_dtype, shape, {value}); - paddle::platform::SetOutputNode(op, "Out", out, ngb_node_map); -} -} // namespace ngraphs -} // namespace operators -} // namespace paddle - -REGISTER_NG_OP(fill_constant, BuildFillConstantNode); diff --git a/paddle/fluid/operators/ngraph/ops/fill_zeros_like_op.h b/paddle/fluid/operators/ngraph/ops/fill_zeros_like_op.h deleted file mode 100644 index 163734be87..0000000000 --- a/paddle/fluid/operators/ngraph/ops/fill_zeros_like_op.h +++ /dev/null @@ -1,45 +0,0 @@ -/*Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#pragma once - -#include -#include -#include -#include - -#include "ngraph/ngraph.hpp" -#include "paddle/fluid/operators/ngraph/ops/op_bridge.h" -#include "paddle/fluid/platform/ngraph_helper.h" - -namespace paddle { -namespace operators { -namespace ngraphs { - -static void BuildFillZerosLikeNode( - const std::shared_ptr& op, - std::shared_ptr< - std::unordered_map>> - ngb_node_map) { - auto x = platform::GetInputNode(op, "X", ngb_node_map); - auto out = paddle::platform::CreateConstant(x->get_element_type(), - x->get_shape(), {0}); - platform::SetOutputNode(op, "Out", out, ngb_node_map); -} - -} // namespace ngraphs -} // namespace operators -} // namespace paddle - -REGISTER_NG_OP(fill_zeros_like, BuildFillZerosLikeNode); diff --git a/paddle/fluid/operators/ngraph/ops/gather_op.h b/paddle/fluid/operators/ngraph/ops/gather_op.h deleted file mode 100644 index 7d369b27d3..0000000000 --- a/paddle/fluid/operators/ngraph/ops/gather_op.h +++ /dev/null @@ -1,77 +0,0 @@ -/* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#pragma once - -#include -#include -#include -#include -#include -#include "ngraph/ngraph.hpp" -#include "paddle/fluid/operators/ngraph/ops/op_bridge.h" -#include "paddle/fluid/platform/ngraph_helper.h" - -namespace paddle { -namespace operators { -namespace ngraphs { - -void BuildGatherNode( - const std::shared_ptr& op, - std::shared_ptr< - std::unordered_map>> - ngb_node_map) { - auto x = platform::GetInputNode(op, "X", ngb_node_map); - PADDLE_ENFORCE_NOT_NULL(x); - - auto index = platform::GetInputNode(op, "Index", ngb_node_map); - auto& index_shape = index->get_shape(); - PADDLE_ENFORCE(index_shape.size() == 1 || - (index_shape.size() == 2 && index_shape[1] == 1)); - if (index_shape.size() == 2) { - index = platform::NgReshaper(index, ngraph::Shape{index_shape[0]}); - } - - auto out = std::make_shared(x, index); - - paddle::platform::SetOutputNode(op, "Out", out, ngb_node_map); -} -void BuildGatherGradNode( - const std::shared_ptr& op, - std::shared_ptr< - std::unordered_map>> - ngb_node_map) { - auto dout = platform::GetInputNode(op, "Out@GRAD", ngb_node_map); - PADDLE_ENFORCE_NOT_NULL(dout); - auto x = platform::GetInputNode(op, "X", ngb_node_map); - - auto index = platform::GetInputNode(op, "Index", ngb_node_map); - auto& index_shape = index->get_shape(); - PADDLE_ENFORCE(index_shape.size() == 1 || - (index_shape.size() == 2 && index_shape[1] == 1)); - if (index_shape.size() == 2) { - index = platform::NgReshaper(index, ngraph::Shape{index_shape[0]}); - } - - std::shared_ptr x0 = paddle::platform::CreateConstant( - dout->get_element_type(), x->get_shape(), {0}); - auto dx = std::make_shared(x0, index, dout); - paddle::platform::SetOutputNode(op, "X@GRAD", dx, ngb_node_map); -} -} // namespace ngraphs -} // namespace operators -} // namespace paddle - -REGISTER_NG_OP(gather, BuildGatherNode); -REGISTER_NG_OP(gather_grad, BuildGatherGradNode); diff --git a/paddle/fluid/operators/ngraph/ops/increment_op.h b/paddle/fluid/operators/ngraph/ops/increment_op.h deleted file mode 100644 index 4c4287e274..0000000000 --- a/paddle/fluid/operators/ngraph/ops/increment_op.h +++ /dev/null @@ -1,49 +0,0 @@ -/*Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#pragma once - -#include -#include -#include -#include - -#include "ngraph/ngraph.hpp" -#include "paddle/fluid/operators/ngraph/ops/elementwise_node.h" -#include "paddle/fluid/platform/ngraph_helper.h" - -namespace paddle { -namespace operators { -namespace ngraphs { - -void BuildIncrementNode( - const std::shared_ptr& op, - std::shared_ptr< - std::unordered_map>> - ngb_node_map) { - auto x = paddle::platform::GetInputNode(op, "X", ngb_node_map); - auto op_attrs = paddle::framework::AttrReader(op->Attrs()); - float step = op_attrs.Get("step"); - auto step_op = std::make_shared( - x->get_element_type(), x->get_shape(), std::vector{step}); - std::shared_ptr out = - std::make_shared(x, step_op); - paddle::platform::SetOutputNode(op, "Out", out, ngb_node_map); -} - -} // namespace ngraphs -} // namespace operators -} // namespace paddle - -REGISTER_NG_OP(increment, BuildIncrementNode); diff --git a/paddle/fluid/operators/ngraph/ops/layer_norm_op.h b/paddle/fluid/operators/ngraph/ops/layer_norm_op.h deleted file mode 100644 index f56110f969..0000000000 --- a/paddle/fluid/operators/ngraph/ops/layer_norm_op.h +++ /dev/null @@ -1,195 +0,0 @@ -/*Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#pragma once - -#include -#include -#include -#include -#include "ngraph/ngraph.hpp" -#include "paddle/fluid/operators/ngraph/ops/op_bridge.h" -#include "paddle/fluid/platform/ngraph_helper.h" - -namespace paddle { -namespace operators { -namespace ngraphs { - -std::shared_ptr reshape_reduction( - std::shared_ptr node, const ngraph::Shape shape, - int begin_norm_axis) { - ngraph::Shape keepdims_shape(shape.begin(), shape.begin() + begin_norm_axis); - return paddle::platform::NgReshaper(node, keepdims_shape); -} - -std::shared_ptr broadcast_reduction( - std::shared_ptr node, const ngraph::Shape shape, - int begin_norm_axis) { - ngraph::AxisSet axis_set; - for (size_t i = begin_norm_axis; i < shape.size(); ++i) axis_set.insert(i); - auto reshape = reshape_reduction(node, shape, begin_norm_axis); - return std::make_shared(reshape, shape, axis_set); -} - -std::shared_ptr reshape_bias_scale( - std::shared_ptr node, const ngraph::Shape shape, - int begin_norm_axis) { - ngraph::Shape keepdims_shape(shape.begin() + begin_norm_axis, shape.end()); - return paddle::platform::NgReshaper(node, keepdims_shape); -} - -std::shared_ptr broadcast_bias_scale( - std::shared_ptr node, const ngraph::Shape shape, - int begin_norm_axis) { - auto reshape = reshape_bias_scale(node, shape, begin_norm_axis); - ngraph::AxisSet axis_set; - for (int i = 0; i < begin_norm_axis; ++i) axis_set.insert(i); - return std::make_shared(reshape, shape, axis_set); -} - -std::shared_ptr flatten(const std::shared_ptr& node, - bool insert_leading_one = false) { - size_t out = 1; - for (auto s : node->get_shape()) out *= s; - if (insert_leading_one) { - return paddle::platform::NgReshaper(node, ngraph::Shape{1, out}); - } else { - return paddle::platform::NgReshaper(node, ngraph::Shape{out}); - } -} - -static void BuildLayerNormNode( - const std::shared_ptr& op, - std::shared_ptr< - std::unordered_map>> - ngb_node_map) { - auto op_attrs = paddle::framework::AttrReader(op->Attrs()); - const auto begin_norm_axis = op_attrs.Get("begin_norm_axis"); - const auto epsilon = op_attrs.Get("epsilon"); - - auto x = paddle::platform::GetInputNode(op, "X", ngb_node_map); - auto scale = paddle::platform::GetInputNode(op, "Scale", ngb_node_map); - auto bias = paddle::platform::GetInputNode(op, "Bias", ngb_node_map); - - auto shape = x->get_shape(); - std::vector reduction_axes(shape.size() - begin_norm_axis); - std::iota(reduction_axes.begin(), reduction_axes.end(), begin_norm_axis); - - auto mean = ngraph::builder::mean(x, reduction_axes); - auto broadcast_mean = broadcast_reduction(mean, shape, begin_norm_axis); - - auto delta = x - broadcast_mean; - auto variance = ngraph::builder::mean(delta * delta, reduction_axes); - - auto eps = paddle::platform::CreateConstant(variance->get_element_type(), - variance->get_shape(), {epsilon}); - - auto stddev = std::make_shared(variance + eps); - auto broadcast_stddev = broadcast_reduction(stddev, shape, begin_norm_axis); - - auto norm = delta / broadcast_stddev; - - if (scale) { - auto broadcast_scale = broadcast_bias_scale(scale, shape, begin_norm_axis); - norm = norm * broadcast_scale; - } - if (bias) { - auto broadcast_bias = broadcast_bias_scale(bias, shape, begin_norm_axis); - norm = norm + broadcast_bias; - } - mean = flatten(mean); - variance = flatten(variance); - paddle::platform::SetOutputNode(op, "Y", norm, ngb_node_map); - paddle::platform::SetOutputNode(op, "Mean", mean, ngb_node_map); - paddle::platform::SetOutputNode(op, "Variance", variance, ngb_node_map); -} - -static void BuildLayerNormGradNode( - const std::shared_ptr& op, - std::shared_ptr< - std::unordered_map>> - ngb_node_map) { - auto op_attrs = paddle::framework::AttrReader(op->Attrs()); - const auto begin_norm_axis = op_attrs.Get("begin_norm_axis"); - const auto epsilon = op_attrs.Get("epsilon"); - - auto x = paddle::platform::GetInputNode(op, "X", ngb_node_map); - auto mean = paddle::platform::GetInputNode(op, "Mean", ngb_node_map); - auto variance = paddle::platform::GetInputNode(op, "Variance", ngb_node_map); - auto scale = paddle::platform::GetInputNode(op, "Scale", ngb_node_map); - auto dy = paddle::platform::GetInputNode(op, framework::GradVarName("Y"), - ngb_node_map); - - auto dx = paddle::platform::GetOutputNode(op, framework::GradVarName("X"), - ngb_node_map); - auto dscale = paddle::platform::GetOutputNode( - op, framework::GradVarName("Scale"), ngb_node_map); - auto dbias = paddle::platform::GetOutputNode( - op, framework::GradVarName("Bias"), ngb_node_map); - - auto shape = x->get_shape(); - - auto broadcast_mean = broadcast_reduction(mean, shape, begin_norm_axis); - - auto delta = x - broadcast_mean; - auto eps = paddle::platform::CreateConstant(variance->get_element_type(), - variance->get_shape(), {epsilon}); - - auto stddev = std::make_shared(variance + eps); - auto broadcast_stddev = broadcast_reduction(stddev, shape, begin_norm_axis); - - auto norm = delta / broadcast_stddev; - - if (dbias) { - std::vector reduction_axes(begin_norm_axis); - std::iota(reduction_axes.begin(), reduction_axes.end(), 0); - auto sum_dy = std::make_shared(dy, reduction_axes); - paddle::platform::SetOutputNode(op, framework::GradVarName("Bias"), - flatten(sum_dy), ngb_node_map); - } - if (dscale) { - std::vector reduction_axes(begin_norm_axis); - std::iota(reduction_axes.begin(), reduction_axes.end(), 0); - auto sum_dy = std::make_shared(dy * norm, reduction_axes); - paddle::platform::SetOutputNode(op, framework::GradVarName("Scale"), - flatten(sum_dy), ngb_node_map); - } - - if (dx) { - std::shared_ptr dx_end = dy / broadcast_stddev; - if (dscale) - dx_end = dx_end * broadcast_bias_scale(scale, shape, begin_norm_axis); - - std::vector reduction_axes(shape.size() - begin_norm_axis); - std::iota(reduction_axes.begin(), reduction_axes.end(), begin_norm_axis); - - auto dx_mean = broadcast_reduction( - ngraph::builder::mean(-dx_end, reduction_axes), shape, begin_norm_axis); - - auto dx_std = - norm * broadcast_reduction( - ngraph::builder::mean(-dx_end * norm, reduction_axes), shape, - begin_norm_axis); - - paddle::platform::SetOutputNode(op, framework::GradVarName("X"), - dx_end + dx_mean + dx_std, ngb_node_map); - } -} - -REGISTER_NG_OP(layer_norm, BuildLayerNormNode); -REGISTER_NG_OP(layer_norm_grad, BuildLayerNormGradNode); - -} // namespace ngraphs -} // namespace operators -} // namespace paddle diff --git a/paddle/fluid/operators/ngraph/ops/lookup_table_op.h b/paddle/fluid/operators/ngraph/ops/lookup_table_op.h deleted file mode 100644 index 45bb31599b..0000000000 --- a/paddle/fluid/operators/ngraph/ops/lookup_table_op.h +++ /dev/null @@ -1,109 +0,0 @@ -/*Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#pragma once - -#include -#include -#include -#include - -#include "ngraph/ngraph.hpp" -#include "ngraph/op/embedding_lookup.hpp" -#include "paddle/fluid/operators/lookup_table_op.h" -#include "paddle/fluid/operators/ngraph/ops/op_bridge.h" -#include "paddle/fluid/platform/ngraph_helper.h" - -namespace paddle { -namespace operators { -namespace ngraphs { - -void BuildLookupTableNode( - const std::shared_ptr& op, - std::shared_ptr< - std::unordered_map>> - ngb_node_map) { - auto op_attrs = paddle::framework::AttrReader(op->Attrs()); - const bool is_sparse = op_attrs.Get("is_sparse"); - const int64_t padding_idx = op_attrs.Get("padding_idx"); - - auto ng_ids = paddle::platform::GetInputNode(op, "Ids", ngb_node_map); - PADDLE_ENFORCE_NOT_NULL(ng_ids); - - const auto ng_w = paddle::platform::GetInputNode(op, "W", ngb_node_map); - PADDLE_ENFORCE_NOT_NULL(ng_w); - - if (is_sparse) { - PADDLE_THROW("Sparsity is not yet supported in nGraph lookup_table op."); - } - auto ng_w_mask = ng_w; - if (padding_idx != kNoPadding) { - auto w_shape = ng_w->get_shape(); - - std::vector maskV(w_shape[0], 1); - maskV[padding_idx] = 0; - auto maskV_node = std::make_shared( - ng_w->get_element_type(), ngraph::Shape{w_shape[0]}, maskV); - ngraph::AxisSet axis_set; - for (unsigned int i = 1; i < w_shape.size(); ++i) axis_set.insert(i); - auto maskV_bd = - std::make_shared(maskV_node, w_shape, axis_set); - ng_w_mask = std::make_shared(ng_w, maskV_bd); - } - auto shape = ng_ids->get_shape(); - if (shape.back() == 1) { - shape.pop_back(); - ng_ids = platform::NgReshaper(ng_ids, shape); - } - - auto ng_lookup = std::make_shared(ng_w_mask, ng_ids); - platform::SetOutputNode(op, "Out", ng_lookup, ngb_node_map); -} - -void BuildLookupTableGradNode( - const std::shared_ptr& op, - std::shared_ptr< - std::unordered_map>> - ngb_node_map) { - auto op_attrs = paddle::framework::AttrReader(op->Attrs()); - const bool is_sparse = op_attrs.Get("is_sparse"); - auto ng_ids = paddle::platform::GetInputNode(op, "Ids", ngb_node_map); - PADDLE_ENFORCE_NOT_NULL(ng_ids); - - const auto ng_w = paddle::platform::GetInputNode(op, "W", ngb_node_map); - PADDLE_ENFORCE_NOT_NULL(ng_w); - - auto dout = paddle::platform::GetInputNode(op, "Out@GRAD", ngb_node_map); - - if (is_sparse) { - PADDLE_THROW("Sparsity is not yet supported in nGraph lookup_table op."); - } - - auto shape = ng_ids->get_shape(); - if (shape.back() == 1) { - shape.pop_back(); - ng_ids = platform::NgReshaper(ng_ids, shape); - } - - std::shared_ptr W0 = paddle::platform::CreateConstant( - dout->get_element_type(), ng_w->get_shape(), {0}); - auto dW = std::make_shared(W0, ng_ids, dout); - platform::SetOutputNode(op, "W@GRAD", dW, ngb_node_map); -} -} // namespace ngraphs -} // namespace operators -} // namespace paddle - -REGISTER_NG_OP(lookup_table, BuildLookupTableNode); -REGISTER_NG_OP(lookup_table_grad, BuildLookupTableGradNode); diff --git a/paddle/fluid/operators/ngraph/ops/lrn_op.h b/paddle/fluid/operators/ngraph/ops/lrn_op.h deleted file mode 100644 index 68a0eea089..0000000000 --- a/paddle/fluid/operators/ngraph/ops/lrn_op.h +++ /dev/null @@ -1,54 +0,0 @@ -// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#pragma once - -#include -#include -#include -#include - -#include "ngraph/ngraph.hpp" -#include "paddle/fluid/operators/ngraph/ops/op_bridge.h" -#include "paddle/fluid/platform/ngraph_helper.h" - -namespace paddle { -namespace operators { -namespace ngraphs { -static void BuildLrnNode( - const std::shared_ptr& op, - std::shared_ptr< - std::unordered_map>> - ngb_node_map) { - auto input = platform::GetInputNode(op, "X", ngb_node_map); - - auto op_attrs = framework::AttrReader(op->Attrs()); - const int n = op_attrs.Get("n"); - const float alpha = op_attrs.Get("alpha") * static_cast(n); - const float beta = op_attrs.Get("beta"); - const float k = op_attrs.Get("k"); - - auto lrn_out = std::make_shared(input, alpha, beta, k, n); - std::shared_ptr mid_out = paddle::platform::CreateConstant( - input->get_element_type(), input->get_shape(), {k}); - - platform::SetOutputNode(op, "MidOut", mid_out, ngb_node_map); - platform::SetOutputNode(op, "Out", lrn_out, ngb_node_map); -} - -} // namespace ngraphs -} // namespace operators -} // namespace paddle - -REGISTER_NG_OP(lrn, BuildLrnNode); diff --git a/paddle/fluid/operators/ngraph/ops/matmul_op.h b/paddle/fluid/operators/ngraph/ops/matmul_op.h deleted file mode 100644 index fe239afa0b..0000000000 --- a/paddle/fluid/operators/ngraph/ops/matmul_op.h +++ /dev/null @@ -1,248 +0,0 @@ -/*Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#pragma once - -#include -#include -#include -#include - -#include "ngraph/ngraph.hpp" -#include "paddle/fluid/operators/ngraph/ops/elementwise_scalar_op.h" -#include "paddle/fluid/operators/ngraph/ops/op_bridge.h" -#include "paddle/fluid/platform/ngraph_helper.h" - -namespace paddle { -namespace operators { -namespace ngraphs { - -std::shared_ptr transposeAndFlat3D( - const std::shared_ptr& input, const bool transpose, - bool x = true) { - auto shape = input->get_shape(); - size_t n = shape.size(); - std::shared_ptr output; - if (n >= 3) { - std::vector order(n); - std::iota(std::begin(order), std::end(order), 0); - size_t outer = 1; - for (size_t i = 0; i < n - 2; i++) { - outer = outer * shape[i]; - } - std::vector reshape{outer, shape[n - 2], shape[n - 1]}; - - if (transpose == true) { - order[n - 2] = n - 1; - order[n - 1] = n - 2; - reshape[2] = shape[n - 2]; - reshape[1] = shape[n - 1]; - } - output = std::make_shared( - input, ngraph::AxisVector(order), ngraph::Shape(reshape)); - } else { - std::shared_ptr temp; - if (n == 1 && x == true) { - temp = std::make_shared(input, ngraph::AxisVector{0}, - ngraph::Shape{1, shape[0]}); - } else if (n == 1 && x == false) { - temp = std::make_shared(input, ngraph::AxisVector{0}, - ngraph::Shape{shape[0], 1}); - } else { - temp = input; - } - auto temp_shape = temp->get_shape(); - if (transpose == true) { - output = std::make_shared( - temp, ngraph::AxisVector{1, 0}, - ngraph::Shape{temp_shape[1], temp_shape[0]}); - } else { - output = temp; - } - } - return output; -} -std::shared_ptr broadcast3D( - const std::shared_ptr& input, size_t axis0) { - auto shape = input->get_shape(); - size_t n = shape.size(); - if (n == 2) { - auto output = std::make_shared( - input, ngraph::Shape{axis0, shape[0], shape[1]}, ngraph::AxisSet{0}); - return output; - } - return input; -} -std::shared_ptr dotOp(const std::shared_ptr& a, - const std::shared_ptr& b) { - std::shared_ptr out; - auto a_shape = a->get_shape(); - auto na = a_shape.size(); - auto b_shape = b->get_shape(); - auto nb = b_shape.size(); - if (na > 2 && nb > 2) { - out = std::make_shared(a, b); - } else { - out = std::make_shared(a, b); - } - return out; -} -std::shared_ptr reshapeToOriginal( - std::shared_ptr input, const ngraph::Shape& shape) { - auto input_shape = input->get_shape(); - std::vector axis(input_shape.size()); - std::iota(axis.begin(), axis.end(), 0); - auto out = std::make_shared(input, axis, shape); - return out; -} -void BuildMatMulNode( - const std::shared_ptr& op, - std::shared_ptr< - std::unordered_map>> - ngb_node_map) { - auto x = paddle::platform::GetInputNode(op, "X", ngb_node_map); - auto y = paddle::platform::GetInputNode(op, "Y", ngb_node_map); - auto op_attrs = paddle::framework::AttrReader(op->Attrs()); - bool transpose_x = op_attrs.Get("transpose_X"); - bool transpose_y = op_attrs.Get("transpose_Y"); - float alpha = op_attrs.Get("alpha"); - - std::shared_ptr out; - auto x_shape = x->get_shape(); - auto y_shape = y->get_shape(); - size_t nx = x_shape.size(); - size_t ny = y_shape.size(); - x = transposeAndFlat3D(x, transpose_x, true); - y = transposeAndFlat3D(y, transpose_y, false); - auto y_shape3 = y->get_shape(); - auto x_shape3 = x->get_shape(); - if (nx > 2 || ny > 2) { - ngraph::Shape out_shape = x_shape; - if (nx != 3) { - x = broadcast3D(x, y_shape3[0]); - out_shape = y_shape; - } - if (ny != 3) { - y = broadcast3D(y, x_shape3[0]); - out_shape = x_shape; - } - auto nout = out_shape.size(); - auto out3 = std::make_shared(x, y); - auto out3_shape = out3->get_shape(); - out_shape[nout - 1] = out3_shape[2]; - out_shape[nout - 2] = out3_shape[1]; - out = std::make_shared( - out3, ngraph::AxisVector{0, 1, 2}, out_shape); - } else { - out = std::make_shared(x, y); - } - auto out_shape = out->get_shape(); - std::vector axis(out_shape.size()); - std::iota(axis.begin(), axis.end(), 0); - for (size_t i = out_shape.size() - 1; i > 0; i--) { - if (out_shape[i] == 1) { - out_shape.erase(out_shape.begin() + i); - } - } - auto out_ = std::make_shared( - out, ngraph::AxisVector(axis), out_shape); - auto out_alpha = ElementwiseScalar(alpha, out_); - paddle::platform::SetOutputNode(op, "Out", out_alpha, ngb_node_map); -} - -void BuildMatMulGradNode( - const std::shared_ptr& op, - std::shared_ptr< - std::unordered_map>> - ngb_node_map) { - auto op_attrs = paddle::framework::AttrReader(op->Attrs()); - - auto dout = paddle::platform::GetInputNode(op, "Out@GRAD", ngb_node_map); - auto y = paddle::platform::GetInputNode(op, "Y", ngb_node_map); - auto x = paddle::platform::GetInputNode(op, "X", ngb_node_map); - - bool is_dx = paddle::platform::HasOutput(op, "X@GRAD") ? true : false; - bool is_dy = paddle::platform::HasOutput(op, "Y@GRAD") ? true : false; - bool transpose_x = op_attrs.Get("transpose_X"); - bool transpose_y = op_attrs.Get("transpose_Y"); - float alpha = op_attrs.Get("alpha"); - auto dout_shape = dout->get_shape(); - auto x_shape = x->get_shape(); - auto y_shape = y->get_shape(); - size_t nx = x_shape.size(); - size_t ny = y_shape.size(); - size_t ndout = dout_shape.size(); - std::shared_ptr x2, y2; - std::shared_ptr dout2; - - x2 = transposeAndFlat3D(x, false); - y2 = transposeAndFlat3D(y, false, false); - dout2 = transposeAndFlat3D(dout, false); - auto x2_shape = x2->get_shape(); - auto y2_shape = y2->get_shape(); - if (nx >= 3 || ny >= 3) { - std::shared_ptr dout_temp; - if (ndout == 2) { - dout_temp = std::make_shared( - dout, ngraph::AxisVector{0, 1}, - ngraph::Shape{dout_shape[0], dout_shape[1], 1}); - if (ny < 3) { - dout2 = dout_temp; - } else { - dout2 = transposeAndFlat3D(dout_temp, true); - } - } - x2 = broadcast3D(x2, y_shape[0]); - y2 = broadcast3D(y2, x_shape[0]); - - } else { - dout2 = transposeAndFlat3D(dout, false, nx == 1 && transpose_x == false); - } - - if (transpose_y == false) { - y2 = transposeAndFlat3D(y2, true); - } - if (transpose_x == false) { - x2 = transposeAndFlat3D(x2, true); - } - auto dx = dotOp(dout2, y2); - auto dy = dotOp(x2, dout2); - if (transpose_x == true) { - dx = transposeAndFlat3D(dx, true); - } - if (transpose_y == true) { - dy = transposeAndFlat3D(dy, true); - } - - if (nx < 3 && ny >= 3) { - dx = std::make_shared(dx, ngraph::AxisSet{0}); - } - if (ny < 3 && nx >= 3) { - dy = std::make_shared(dy, ngraph::AxisSet{0}); - } - auto dx_t = reshapeToOriginal(dx, x_shape); - auto dy_t = reshapeToOriginal(dy, y_shape); - auto dx_scale = ElementwiseScalar(1 / alpha, dx_t); - auto dy_scale = ElementwiseScalar(1 / alpha, dy_t); - if (is_dx) - paddle::platform::SetOutputNode(op, "X@GRAD", dx_scale, ngb_node_map); - if (is_dy) - paddle::platform::SetOutputNode(op, "Y@GRAD", dy_scale, ngb_node_map); -} -} // namespace ngraphs -} // namespace operators -} // namespace paddle - -REGISTER_NG_OP(matmul, BuildMatMulNode); -REGISTER_NG_OP(matmul_grad, BuildMatMulGradNode); diff --git a/paddle/fluid/operators/ngraph/ops/mean_op.h b/paddle/fluid/operators/ngraph/ops/mean_op.h deleted file mode 100644 index 86e697d260..0000000000 --- a/paddle/fluid/operators/ngraph/ops/mean_op.h +++ /dev/null @@ -1,72 +0,0 @@ -/*Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#pragma once - -#include -#include -#include -#include - -#include "ngraph/ngraph.hpp" -#include "paddle/fluid/operators/ngraph/ops/elementwise_scalar_op.h" -#include "paddle/fluid/operators/ngraph/ops/op_bridge.h" -#include "paddle/fluid/platform/ngraph_helper.h" - -namespace paddle { -namespace operators { -namespace ngraphs { - -void BuildMeanNode( - const std::shared_ptr& op, - std::shared_ptr< - std::unordered_map>> - ngb_node_map) { - auto input = paddle::platform::GetInputNode(op, "X", ngb_node_map); - ngraph::AxisSet axes; - for (size_t i = 0; i < input->get_shape().size(); ++i) { - axes.insert(i); - } - - auto mean = ngraph::builder::mean(input, axes); - auto mean_1d = std::make_shared( - mean, ngraph::AxisVector{}, ngraph::Shape{1}); - paddle::platform::SetOutputNode(op, "Out", mean_1d, ngb_node_map); -} - -void BuildMeanGradNode( - const std::shared_ptr& op, - std::shared_ptr< - std::unordered_map>> - ngb_node_map) { - auto x = paddle::platform::GetInputNode(op, "X", ngb_node_map); - auto og = paddle::platform::GetInputNode(op, "Out@GRAD", ngb_node_map); - auto x_shape = x->get_shape(); - float x_size = std::accumulate(std::begin(x_shape), std::end(x_shape), 1, - std::multiplies()); - auto node_const = ngraph::op::Constant::create(og->get_element_type(), - ngraph::Shape{1}, {x_size}); - auto node_div = std::make_shared(og, node_const); - - auto result = ElementwiseScalar( - og / node_const, - ngraph::op::Constant::create(og->get_element_type(), x_shape, {0})); - paddle::platform::SetOutputNode(op, "X@GRAD", result, ngb_node_map); -} -} // namespace ngraphs -} // namespace operators -} // namespace paddle - -REGISTER_NG_OP(mean, BuildMeanNode); -REGISTER_NG_OP(mean_grad, BuildMeanGradNode); diff --git a/paddle/fluid/operators/ngraph/ops/momentum_op.h b/paddle/fluid/operators/ngraph/ops/momentum_op.h deleted file mode 100644 index 84bddacba8..0000000000 --- a/paddle/fluid/operators/ngraph/ops/momentum_op.h +++ /dev/null @@ -1,106 +0,0 @@ -/*Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#pragma once - -#include -#include -#include -#include -#include "ngraph/ngraph.hpp" -#include "paddle/fluid/operators/ngraph/ops/op_bridge.h" -#include "paddle/fluid/platform/ngraph_helper.h" - -namespace paddle { -namespace operators { -namespace ngraphs { - -void BuildMomentumNode( - const std::shared_ptr& op, - std::shared_ptr< - std::unordered_map>> - ngb_node_map) { - auto op_attrs = paddle::framework::AttrReader(op->Attrs()); - auto param = paddle::platform::GetInputNode(op, "Param", ngb_node_map); - auto grad = paddle::platform::GetInputNode(op, "Grad", ngb_node_map); - auto velocity = paddle::platform::GetInputNode(op, "Velocity", ngb_node_map); - auto learning_rate = - paddle::platform::GetInputNode(op, "LearningRate", ngb_node_map); - - auto mu = op_attrs.Get("mu"); - bool use_nesterov = op_attrs.Get("use_nesterov"); - - auto param_shape = param->get_shape(); - auto velocity_shape = velocity->get_shape(); - auto grad_shape = grad->get_shape(); - auto lr_shape = learning_rate->get_shape(); - - auto shape_velocity = ngraph::Shape{velocity_shape}; - auto mu_create = - ngraph::op::Constant::create(ngraph::element::f32, shape_velocity, {mu}); - - auto vel_mul = std::make_shared(velocity, mu_create); - auto vel_out = std::make_shared(vel_mul, grad); - - ngraph::NodeVector result; - if (use_nesterov) { - auto mul_res = std::make_shared(vel_out, mu_create); - auto add_res = std::make_shared(grad, mul_res); - - auto add_2d = paddle::platform::FlattenTo2d(add_res->get_shape(), 0); - auto vel_reshape = paddle::platform::NgReshaper(vel_out, add_2d); - - auto lr_bcast = std::make_shared( - learning_rate, vel_reshape->get_shape(), - ngraph::AxisSet{vel_reshape->get_shape().size() - 1}); - - auto lr_1d = paddle::platform::FlattenTo1d(lr_bcast->get_shape(), 0); - auto lr_reshape = std::make_shared( - lr_bcast, ngraph::AxisVector{0, 1}, lr_1d); - - lr_reshape = std::make_shared( - lr_reshape, ngraph::AxisVector{0}, param->get_shape()); - - auto mul_res1 = std::make_shared(add_res, lr_reshape); - auto res = std::make_shared(param, mul_res1); - paddle::platform::SetOutputNode(op, "ParamOut", res, ngb_node_map); - } else { - auto vel_2d = paddle::platform::FlattenTo2d(vel_out->get_shape(), 0); - auto vel_reshape = paddle::platform::NgReshaper(vel_out, vel_2d); - - auto lr_bcast = std::make_shared( - learning_rate, vel_reshape->get_shape(), - ngraph::AxisSet{vel_reshape->get_shape().size() - 1}); - - auto lr_1d = paddle::platform::FlattenTo1d(lr_bcast->get_shape(), 0); - auto lr_reshape = std::make_shared( - lr_bcast, ngraph::AxisVector{0, 1}, lr_1d); - - lr_reshape = std::make_shared( - lr_reshape, ngraph::AxisVector{0}, param->get_shape()); - - auto mul_result = - std::make_shared(lr_reshape, vel_out); - - auto res = std::make_shared(param, mul_result); - paddle::platform::SetOutputNode(op, "ParamOut", res, ngb_node_map); - } - paddle::platform::SetOutputNode(op, "VelocityOut", vel_out, ngb_node_map); -} - -} // namespace ngraphs -} // namespace operators -} // namespace paddle - -REGISTER_NG_OP(momentum, BuildMomentumNode); diff --git a/paddle/fluid/operators/ngraph/ops/mul_op.h b/paddle/fluid/operators/ngraph/ops/mul_op.h deleted file mode 100644 index cb46478ee8..0000000000 --- a/paddle/fluid/operators/ngraph/ops/mul_op.h +++ /dev/null @@ -1,143 +0,0 @@ -/*Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#pragma once - -#include -#include -#include -#include "ngraph/ngraph.hpp" -#include "paddle/fluid/operators/ngraph/ops/op_bridge.h" -#include "paddle/fluid/platform/ngraph_helper.h" - -namespace paddle { -namespace operators { -namespace ngraphs { - -static void BuildMulNode( - const std::shared_ptr& op, - std::shared_ptr< - std::unordered_map>> - ngb_node_map) { - auto op_attrs = paddle::framework::AttrReader(op->Attrs()); - int x_num_col_dims = op_attrs.Get("x_num_col_dims"); - int y_num_col_dims = op_attrs.Get("y_num_col_dims"); - auto x = paddle::platform::GetInputNode(op, "X", ngb_node_map); - auto y = paddle::platform::GetInputNode(op, "Y", ngb_node_map); - int y_rank = y->get_shape().size(); - - auto x_reshape = x; - auto y_reshape = y; - - if (x->get_shape().size() > 2) { - auto x_2d = paddle::platform::FlattenTo2d(x->get_shape(), x_num_col_dims); - x_reshape = paddle::platform::NgReshaper(x, x_2d); - } - - if (y->get_shape().size() > 2) { - auto y_2d = paddle::platform::FlattenTo2d(y->get_shape(), y_num_col_dims); - y_reshape = paddle::platform::NgReshaper(y, y_2d); - } - - std::shared_ptr out = - std::make_shared(x_reshape, y_reshape); - - ngraph::Shape out_shape; - for (int i = 0; i < x_num_col_dims; ++i) { - out_shape.push_back(x->get_shape()[i]); - } - for (int i = y_num_col_dims; i < y_rank; ++i) { - out_shape.push_back(y->get_shape()[i]); - } - out = paddle::platform::NgReshaper(out, out_shape); - paddle::platform::SetOutputNode(op, "Out", out, ngb_node_map); -} - -static void BuildMulGradNode( - const std::shared_ptr& op, - std::shared_ptr< - std::unordered_map>> - ngb_node_map) { - auto op_attrs = paddle::framework::AttrReader(op->Attrs()); - int x_num_col_dims = op_attrs.Get("x_num_col_dims"); - int y_num_col_dims = op_attrs.Get("y_num_col_dims"); - auto x = paddle::platform::GetInputNode(op, "X", ngb_node_map); - auto y = paddle::platform::GetInputNode(op, "Y", ngb_node_map); - auto dout = paddle::platform::GetInputNode(op, "Out@GRAD", ngb_node_map); - - bool is_dx = paddle::platform::HasOutput(op, "X@GRAD") ? true : false; - bool is_dy = paddle::platform::HasOutput(op, "Y@GRAD") ? true : false; - - auto x_shape = x->get_shape(); - auto y_shape = y->get_shape(); - - auto x_reshape = x; - auto y_reshape = y; - - if (x_shape.size() > 2) { - auto x_2d_shape = paddle::platform::FlattenTo2d(x_shape, x_num_col_dims); - x_reshape = paddle::platform::NgReshaper(x, x_2d_shape); - } - - if (y_shape.size() > 2) { - auto y_2d_shape = paddle::platform::FlattenTo2d(y_shape, y_num_col_dims); - y_reshape = paddle::platform::NgReshaper(y, y_2d_shape); - } - - auto x_reshape_shape = x_reshape->get_shape(); - std::reverse(x_reshape_shape.begin(), x_reshape_shape.end()); - auto x_transpose = std::make_shared( - x_reshape, ngraph::AxisVector{1, 0}, x_reshape_shape); - - auto y_reshape_shape = y_reshape->get_shape(); - std::reverse(y_reshape_shape.begin(), y_reshape_shape.end()); - auto y_transpose = std::make_shared( - y_reshape, ngraph::AxisVector{1, 0}, y_reshape_shape); - - if (is_dx) { - if (dout->get_shape().size() > 2) { - auto dout_2d_shape = paddle::platform::FlattenTo2d(dout->get_shape(), 2); - dout = paddle::platform::NgReshaper(dout, dout_2d_shape); - } - auto dx = std::make_shared(dout, y_transpose); - - if (dx->get_shape() == x_shape) { - paddle::platform::SetOutputNode(op, "X@GRAD", dx, ngb_node_map); - } else { - auto dx_reshape = paddle::platform::NgReshaper(dx, x_shape); - paddle::platform::SetOutputNode(op, "X@GRAD", dx_reshape, ngb_node_map); - } - } - - if (is_dy) { - if (dout->get_shape().size() > 2) { - auto dout_2d_shape = paddle::platform::FlattenTo2d(dout->get_shape(), 2); - dout = paddle::platform::NgReshaper(dout, dout_2d_shape); - } - auto dy = std::make_shared(x_transpose, dout); - - if (dy->get_shape() == y_shape) { - paddle::platform::SetOutputNode(op, "Y@GRAD", dy, ngb_node_map); - } else { - auto dy_reshape = paddle::platform::NgReshaper(dy, y_shape); - paddle::platform::SetOutputNode(op, "Y@GRAD", dy_reshape, ngb_node_map); - } - } -} -} // namespace ngraphs -} // namespace operators -} // namespace paddle - -REGISTER_NG_OP(mul, BuildMulNode); -REGISTER_NG_OP(mul_grad, BuildMulGradNode); diff --git a/paddle/fluid/operators/ngraph/ops/op_bridge.h b/paddle/fluid/operators/ngraph/ops/op_bridge.h deleted file mode 100644 index 93df0ad806..0000000000 --- a/paddle/fluid/operators/ngraph/ops/op_bridge.h +++ /dev/null @@ -1,84 +0,0 @@ -/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ - -#pragma once -#include -#include -#include -#include - -#include "ngraph/node.hpp" -#include "paddle/fluid/framework/operator.h" -#include "paddle/fluid/operators/ngraph/ngraph_bridge.h" -#include "paddle/fluid/platform/enforce.h" - -namespace paddle { -namespace operators { -namespace ops { - -class NgraphSingleton { - NgraphSingleton() = default; - NgraphSingleton(NgraphSingleton const&) = delete; - void operator=(NgraphSingleton const) = delete; - - ~NgraphSingleton() = default; - - static std::map< - std::string, - std::function&, - std::shared_ptr>>)>> - ng_node_maps_; - - public: - template - static void Register(TF&& tf, const std::string& name) { - ng_node_maps_[name] = tf; - } - - static bool Lookup(const std::string& name) { - auto it = ng_node_maps_.find(name); - if (it == ng_node_maps_.end()) { - return true; - } - return false; - } - - static void BuildNode( - const std::shared_ptr>>& ng_maps, - const std::shared_ptr& op, - const std::string& name) { - ng_node_maps_[name](op, ng_maps); - } -}; - -std::map&, - std::shared_ptr>>)>> - NgraphSingleton::ng_node_maps_; - -} // namespace ops -} // namespace operators -} // namespace paddle - -#define REGISTER_NG_OP(op_type__, Converter__) \ - struct ng_##op_type__##_converter { \ - ng_##op_type__##_converter() { \ - paddle::operators::ops::NgraphSingleton::Register( \ - paddle::operators::ngraphs::Converter__, #op_type__); \ - } \ - }; \ - ng_##op_type__##_converter ng_##op_type__##_converter__; diff --git a/paddle/fluid/operators/ngraph/ops/pool2d_op.h b/paddle/fluid/operators/ngraph/ops/pool2d_op.h deleted file mode 100644 index e5542d4715..0000000000 --- a/paddle/fluid/operators/ngraph/ops/pool2d_op.h +++ /dev/null @@ -1,191 +0,0 @@ -/*Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#pragma once - -#include -#include -#include -#include - -#include "ngraph/ngraph.hpp" -#include "paddle/fluid/operators/ngraph/ops/op_bridge.h" -#include "paddle/fluid/platform/ngraph_helper.h" - -namespace paddle { -namespace operators { -namespace ngraphs { - -void BuildPool2dNode( - const std::shared_ptr& op, - std::shared_ptr< - std::unordered_map>> - ngb_node_map) { - auto op_attrs = paddle::framework::AttrReader(op->Attrs()); - auto x = paddle::platform::GetInputNode(op, "X", ngb_node_map); - auto x_shape = x->get_shape(); - - std::string pooling_type = op_attrs.Get("pooling_type"); - std::vector ksize = op_attrs.Get>("ksize"); - std::vector strides = op_attrs.Get>("strides"); - std::vector paddings = op_attrs.Get>("paddings"); - - PADDLE_ENFORCE_EQ(x_shape.size() - 2, ksize.size(), - "Handling 2d pooling only"); - - if (op_attrs.Get("global_pooling")) { - for (size_t i = 0; i < ksize.size(); ++i) { - paddings[i] = 0; - ksize[i] = static_cast(x_shape.at(i + 2)); - } - } - - ngraph::Shape ng_padding_below{static_cast(paddings.at(0)), - static_cast(paddings.at(1))}; - ngraph::Shape ng_padding_above{static_cast(paddings.at(0)), - static_cast(paddings.at(1))}; - ngraph::Shape ng_ksize_shape{static_cast(ksize.at(0)), - static_cast(ksize.at(1))}; - ngraph::Strides ng_strides{static_cast(strides.at(0)), - static_cast(strides.at(1))}; - - auto ComputeFlooredOutput = [](size_t in, size_t k, size_t p, size_t s) { - return (in - k + 2 * p) / s + 1; - }; - auto ComputeCeiledOutput = [](size_t in, size_t k, size_t p, size_t s) { - return ceil(static_cast(in - k + 2 * p) / s) + 1; - }; - - if (op_attrs.Get("ceil_mode")) { - for (size_t i = 0; i < ng_padding_above.size(); ++i) { - auto ceiled_size = ComputeCeiledOutput(x_shape[i + 2], ksize[i], - paddings[i], strides[i]); - auto floored_size = ComputeFlooredOutput(x_shape[i + 2], ksize[i], - paddings[i], strides[i]); - if (ceiled_size != floored_size) { - ng_padding_above[i] += strides[i]; - } - } - } - - bool padding_exclusive = op_attrs.Get("exclusive"); - if (pooling_type == "max") { - auto pool2d = std::make_shared( - x, ng_ksize_shape, ng_strides, ng_padding_below, ng_padding_above); - paddle::platform::SetOutputNode(op, "Out", pool2d, ngb_node_map); - } else if (pooling_type == "avg") { - std::shared_ptr pool2d; - if (op_attrs.Get("adaptive")) { - auto ComputeAdaptive = [](size_t in, size_t k) { - return std::floor(in / k); - }; - ng_strides[0] = x_shape.size() == 4 - ? ComputeAdaptive(x_shape[3], ksize[0]) - : ng_strides[0]; - ng_strides[1] = x_shape.size() == 4 - ? ComputeAdaptive(x_shape[3], ksize[0]) - : ng_strides[1]; - pool2d = - std::make_shared(x, ng_ksize_shape, ng_strides); - } else { - if ((ng_padding_below[0] == 0) && (ng_padding_below[1] == 0) && - (ng_padding_above[0] == 0) && (ng_padding_above[1] == 0)) { - padding_exclusive = false; - } - pool2d = std::make_shared( - x, ng_ksize_shape, ng_strides, ng_padding_below, ng_padding_above, - !padding_exclusive); - } - paddle::platform::SetOutputNode(op, "Out", pool2d, ngb_node_map); - } else { - PADDLE_THROW("Support max and avg pooling only"); - } -} - -void BuildPool2dGradNode( - const std::shared_ptr& op, - std::shared_ptr< - std::unordered_map>> - ngb_node_map) { - auto op_attrs = paddle::framework::AttrReader(op->Attrs()); - auto out = paddle::platform::GetInputNode(op, "Out", ngb_node_map); - auto dout = paddle::platform::GetInputNode(op, "Out@GRAD", ngb_node_map); - auto x = paddle::platform::GetInputNode(op, "X", ngb_node_map); - auto x_shape = x->get_shape(); - - std::string pooling_type = op_attrs.Get("pooling_type"); - std::vector ksize = op_attrs.Get>("ksize"); - std::vector strides = op_attrs.Get>("strides"); - std::vector paddings = op_attrs.Get>("paddings"); - - PADDLE_ENFORCE_EQ(x_shape.size() - 2, ksize.size(), - "Handling 2d pooling only"); - - if (op_attrs.Get("global_pooling")) { - for (size_t i = 0; i < ksize.size(); ++i) { - paddings[i] = 0; - ksize[i] = static_cast(x_shape.at(i + 2)); - } - } - - ngraph::Shape ng_padding_below{static_cast(paddings.at(0)), - static_cast(paddings.at(1))}; - ngraph::Shape ng_padding_above{static_cast(paddings.at(0)), - static_cast(paddings.at(1))}; - ngraph::Shape ng_ksize_shape{static_cast(ksize.at(0)), - static_cast(ksize.at(1))}; - ngraph::Strides ng_strides{static_cast(strides.at(0)), - static_cast(strides.at(1))}; - - bool padding_exclusive = op_attrs.Get("exclusive"); - if (pooling_type == "max") { - auto pool2d_grad = std::make_shared( - x, dout, out, ng_ksize_shape, ng_strides, ng_padding_below, - ng_padding_above); - paddle::platform::SetOutputNode(op, "X@GRAD", pool2d_grad, ngb_node_map); - } else if (pooling_type == "avg") { - std::shared_ptr pool2d_grad; - if (op_attrs.Get("adaptive")) { - auto ComputeAdaptive = [](size_t in, size_t k) { - return std::floor(in / k); - }; - ng_strides[0] = x_shape.size() == 4 - ? ComputeAdaptive(x_shape[3], ksize[0]) - : ng_strides[0]; - ng_strides[1] = x_shape.size() == 4 - ? ComputeAdaptive(x_shape[3], ksize[0]) - : ng_strides[1]; - pool2d_grad = std::make_shared( - x->get_shape(), dout, ng_ksize_shape, ng_strides, ng_padding_below, - ng_padding_above, !padding_exclusive); - } else { - if ((ng_padding_below[0] == 0) && (ng_padding_below[1] == 0) && - (ng_padding_above[0] == 0) && (ng_padding_above[1] == 0)) { - padding_exclusive = false; - } - pool2d_grad = std::make_shared( - x->get_shape(), dout, ng_ksize_shape, ng_strides, ng_padding_below, - ng_padding_above, !padding_exclusive); - } - paddle::platform::SetOutputNode(op, "X@GRAD", pool2d_grad, ngb_node_map); - } else { - PADDLE_THROW("Support max and avg pooling only"); - } -} -} // namespace ngraphs -} // namespace operators -} // namespace paddle - -REGISTER_NG_OP(pool2d, BuildPool2dNode); -REGISTER_NG_OP(pool2d_grad, BuildPool2dGradNode); diff --git a/paddle/fluid/operators/ngraph/ops/reduce_sum_op.h b/paddle/fluid/operators/ngraph/ops/reduce_sum_op.h deleted file mode 100644 index ad89052880..0000000000 --- a/paddle/fluid/operators/ngraph/ops/reduce_sum_op.h +++ /dev/null @@ -1,161 +0,0 @@ -/*Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#pragma once - -#include -#include -#include -#include -#include -#include -#include "ngraph/ngraph.hpp" -#include "paddle/fluid/operators/ngraph/ops/op_bridge.h" -#include "paddle/fluid/platform/ngraph_helper.h" - -namespace paddle { -namespace operators { -namespace ngraphs { - -void BuildReduceSumNode( - const std::shared_ptr &op, - std::shared_ptr< - std::unordered_map>> - ngb_node_map) { - auto input = paddle::platform::GetInputNode(op, "X", ngb_node_map); - auto op_attrs = paddle::framework::AttrReader(op->Attrs()); - bool reduce_all = op_attrs.Get("reduce_all"); - bool keep_dim = op_attrs.Get("keep_dim"); - std::vector dim = op_attrs.Get>("dim"); - auto input_shape = input->get_shape(); - ngraph::AxisSet axes; - if (reduce_all == true) { - for (size_t i = 0; i < input_shape.size(); ++i) { - axes.insert(i); - } - } else { - for (auto &i : dim) { - if (i < 0) { - axes.insert(input_shape.size() + i); - } else { - axes.insert(i); - } - } - } - std::shared_ptr reduce_sum = - std::make_shared(input, axes); - - if (keep_dim == true) { - std::vector dim_shape; - std::copy(input_shape.begin(), input_shape.end(), - std::back_inserter(dim_shape)); - for (auto &i : dim) { - if (i < 0) { - i = input_shape.size() + i; - } - dim_shape[i] = 1; - } - - std::vector axis_vector(input_shape.size() - dim.size()); - std::iota(axis_vector.begin(), axis_vector.end(), 0); - - auto reduce_sum_dim = std::make_shared( - reduce_sum, ngraph::AxisVector(axis_vector), ngraph::Shape(dim_shape)); - - paddle::platform::SetOutputNode(op, "Out", reduce_sum_dim, ngb_node_map); - } else { - if (reduce_sum->get_shape() == ngraph::Shape{}) { - reduce_sum = paddle::platform::NgReshaper(reduce_sum, ngraph::Shape{1}); - } - paddle::platform::SetOutputNode(op, "Out", reduce_sum, ngb_node_map); - } -} - -void BuildReduceSumGradNode( - const std::shared_ptr &op, - std::shared_ptr< - std::unordered_map>> - ngb_node_map) { - auto x = paddle::platform::GetInputNode(op, "X", ngb_node_map); - auto og = paddle::platform::GetInputNode(op, "Out@GRAD", ngb_node_map); - auto op_attrs = paddle::framework::AttrReader(op->Attrs()); - std::vector dim = op_attrs.Get>("dim"); - bool reduce_all = op_attrs.Get("reduce_all"); - bool keep_dim = op_attrs.Get("keep_dim"); - - auto og_shape = og->get_shape(); - auto x_shape = x->get_shape(); - float x_size = std::accumulate(std::begin(x_shape), std::end(x_shape), 1, - std::multiplies()); - float og_size = std::accumulate(std::begin(og_shape), std::end(og_shape), 1, - std::multiplies()); - ngraph::AxisSet axes; - - if (reduce_all == true) { - for (size_t i = 0; i < x_shape.size(); i++) { - axes.insert(i); - } - } else { - for (auto &i : dim) { - if (i < 0) { - axes.insert(x_shape.size() + i); - } else { - axes.insert(i); - } - } - } - std::vector axis_vector(og_shape.size()); - std::iota(axis_vector.begin(), axis_vector.end(), 0); - std::vector dim_shape; - - for (size_t i = 0; i < x_shape.size(); i++) { - if (std::find(dim.begin(), dim.end(), i) == dim.end() && - std::find(dim.begin(), dim.end(), i - x_shape.size()) == dim.end()) { - dim_shape.push_back(x_shape[i]); - } - } - - if (keep_dim == true) { - // reshape - if (x_size == og_size) { - paddle::platform::SetOutputNode(op, "X@GRAD", og, ngb_node_map); - return; - } - auto og_dim = std::make_shared( - og, ngraph::AxisVector(axis_vector), ngraph::Shape(dim_shape)); - auto result = - std::make_shared(og_dim, x_shape, axes); - paddle::platform::SetOutputNode(op, "X@GRAD", result, ngb_node_map); - - } else { - if (x_size == og_size) { - auto og_dim = std::make_shared( - og, ngraph::AxisVector(axis_vector), x_shape); - paddle::platform::SetOutputNode(op, "X@GRAD", og_dim, ngb_node_map); - } else { - if (og->get_shape().size() == 1 && og->get_shape()[0] == 1) { - og = std::make_shared(og, ngraph::AxisVector{0}, - ngraph::Shape{}); - } - auto result = std::make_shared(og, x_shape, axes); - paddle::platform::SetOutputNode(op, "X@GRAD", result, ngb_node_map); - } - } -} -} // namespace ngraphs -} // namespace operators -} // namespace paddle - -REGISTER_NG_OP(reduce_sum, BuildReduceSumNode); -REGISTER_NG_OP(reduce_sum_grad, BuildReduceSumGradNode); diff --git a/paddle/fluid/operators/ngraph/ops/reshape_op.h b/paddle/fluid/operators/ngraph/ops/reshape_op.h deleted file mode 100644 index a0fe441e47..0000000000 --- a/paddle/fluid/operators/ngraph/ops/reshape_op.h +++ /dev/null @@ -1,112 +0,0 @@ -/*Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#pragma once - -#include -#include -#include -#include -#include -#include - -#include "ngraph/ngraph.hpp" -#include "paddle/fluid/operators/ngraph/ops/op_bridge.h" -#include "paddle/fluid/platform/enforce.h" -#include "paddle/fluid/platform/ngraph_helper.h" - -namespace paddle { -namespace operators { -namespace ngraphs { - -ngraph::Shape calc_output_shape(const ngraph::Shape& input_shape, - const std::vector& v_shape) { - auto out_shape = v_shape; - for (size_t i = 0; i < v_shape.size(); ++i) { - if (v_shape[i] == 0) { - out_shape[i] = input_shape[i]; - } - } - int size_input = ngraph::shape_size(input_shape); - int size_out = 1; - for (auto o : out_shape) { - if (o > 0) size_out *= o; - } - for (auto& o : out_shape) { - if (o == -1) o = size_input / size_out; - } - return ngraph::Shape(out_shape.begin(), out_shape.end()); -} - -template -static void BuildReshapeNode( - const std::shared_ptr& op, - std::shared_ptr< - std::unordered_map>> - ngb_node_map) { - std::shared_ptr input = - platform::GetInputNode(op, "X", ngb_node_map); - auto input_shape = input->get_shape(); - - std::shared_ptr shape = - platform::GetInputNode(op, "Shape", ngb_node_map); - PADDLE_ENFORCE_EQ(shape, nullptr, - platform::errors::Unimplemented( - "Support for Shape input is not implemented")); - - auto op_attrs = framework::AttrReader(op->Attrs()); - std::vector v_shape = op_attrs.Get>("shape"); - - auto out_shape = calc_output_shape(input_shape, v_shape); - auto out = platform::NgReshaper(input, out_shape); - platform::SetOutputNode(op, "Out", out, ngb_node_map); - - if (is_v2) { - ngraph::Shape input_xshape(input_shape.size() + 1); - input_xshape[0] = 0; - std::copy(input_shape.begin(), input_shape.end(), input_xshape.begin() + 1); - auto xshape_node = std::make_shared( - input->get_element_type(), input_xshape, std::vector{}); - platform::SetOutputNode(op, "XShape", xshape_node, ngb_node_map); - } -} - -template -void BuildReshapeGradNode( - const std::shared_ptr& op, - std::shared_ptr< - std::unordered_map>> - ngb_node_map) { - auto dout = paddle::platform::GetInputNode(op, "Out@GRAD", ngb_node_map); - ngraph::Shape out_shape; - if (is_v2) { - auto& xshape = - platform::GetInputNode(op, "XShape", ngb_node_map)->get_shape(); - out_shape.resize(xshape.size() - 1); - std::copy(xshape.begin() + 1, xshape.end(), out_shape.begin()); - } else { - auto input = paddle::platform::GetInputNode(op, "X", ngb_node_map); - out_shape = input->get_shape(); - } - auto dx = platform::NgReshaper(dout, out_shape); - paddle::platform::SetOutputNode(op, "X@GRAD", dx, ngb_node_map); -} -} // namespace ngraphs -} // namespace operators -} // namespace paddle - -REGISTER_NG_OP(reshape, BuildReshapeNode); -REGISTER_NG_OP(reshape2, BuildReshapeNode); -REGISTER_NG_OP(reshape_grad, BuildReshapeGradNode); -REGISTER_NG_OP(reshape2_grad, BuildReshapeGradNode); diff --git a/paddle/fluid/operators/ngraph/ops/scale_op.h b/paddle/fluid/operators/ngraph/ops/scale_op.h deleted file mode 100644 index 1461b85b16..0000000000 --- a/paddle/fluid/operators/ngraph/ops/scale_op.h +++ /dev/null @@ -1,44 +0,0 @@ -/*Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#pragma once - -#include -#include -#include -#include "ngraph/ngraph.hpp" -#include "paddle/fluid/operators/ngraph/ops/elementwise_scalar_op.h" -#include "paddle/fluid/operators/ngraph/ops/op_bridge.h" -#include "paddle/fluid/platform/ngraph_helper.h" - -namespace paddle { -namespace operators { -namespace ngraphs { - -void BuildScaleNode( - const std::shared_ptr& op, - std::shared_ptr< - std::unordered_map>> - ngb_node_map) { - auto op_attrs = paddle::framework::AttrReader(op->Attrs()); - float scale = op_attrs.Get("scale"); - auto x = paddle::platform::GetInputNode(op, "X", ngb_node_map); - auto out = ElementwiseScalar(scale, x); - paddle::platform::SetOutputNode(op, "Out", out, ngb_node_map); -} -} // namespace ngraphs -} // namespace operators -} // namespace paddle - -REGISTER_NG_OP(scale, BuildScaleNode); diff --git a/paddle/fluid/operators/ngraph/ops/slice_op.h b/paddle/fluid/operators/ngraph/ops/slice_op.h deleted file mode 100644 index f5ab413540..0000000000 --- a/paddle/fluid/operators/ngraph/ops/slice_op.h +++ /dev/null @@ -1,121 +0,0 @@ -/*Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#pragma once - -#include -#include -#include -#include -#include -#include "ngraph/ngraph.hpp" -#include "paddle/fluid/operators/ngraph/ops/op_bridge.h" -#include "paddle/fluid/platform/ngraph_helper.h" - -namespace paddle { -namespace operators { -namespace ngraphs { - -void BuildSliceNode( - const std::shared_ptr& op, - std::shared_ptr< - std::unordered_map>> - ngb_node_map) { - auto input = paddle::platform::GetInputNode(op, "Input", ngb_node_map); - auto input_shape = input->get_shape(); - auto op_attrs = framework::AttrReader(op->Attrs()); - auto axes = op_attrs.Get>("axes"); - auto starts = op_attrs.Get>("starts"); - auto ends = op_attrs.Get>("ends"); - ngraph::Coordinate ng_start, ng_end; - int axis, start, end; - for (size_t i = 0; i < input_shape.size(); ++i) { - ng_start.push_back(0); - ng_end.push_back(input_shape[i]); - } - for (size_t i = 0; i < axes.size(); ++i) { - axis = input_shape[axes[i]]; - start = starts[i] < 0 ? (starts[i] + axis) : starts[i]; - end = ends[i] < 0 ? (ends[i] + axis) : ends[i]; - start = std::max(start, 0); - end = std::max(end, 0); - start = std::min(start, axis); - end = std::min(end, axis); - start = std::min(start, end); - ng_start[axes[i]] = start; - ng_end[axes[i]] = end; - } - auto out = std::make_shared(input, ng_start, ng_end); - auto out_shape = out->get_shape(); - - std::vector out_axis_vec(out_shape.size()); - std::iota(out_axis_vec.begin(), out_axis_vec.end(), 0); - - paddle::platform::TrimTrailingSingularDims(&out_shape); - auto out_dim = std::make_shared( - out, ngraph::AxisVector(out_axis_vec), ngraph::Shape(out_shape)); - - platform::SetOutputNode(op, "Out", out_dim, ngb_node_map); -} - -void BuildSliceGradNode( - const std::shared_ptr& op, - std::shared_ptr< - std::unordered_map>> - ngb_node_map) { - auto input = paddle::platform::GetInputNode(op, "Input", ngb_node_map); - auto input_shape = input->get_shape(); - auto dout = paddle::platform::GetInputNode(op, "Out@GRAD", ngb_node_map); - auto op_attrs = framework::AttrReader(op->Attrs()); - auto axes = op_attrs.Get>("axes"); - auto starts = op_attrs.Get>("starts"); - auto ends = op_attrs.Get>("ends"); - auto reshape = input_shape; - ngraph::Coordinate ng_start, ng_end; - int axis, start, end; - for (size_t i = 0; i < input_shape.size(); ++i) { - ng_start.push_back(0); - ng_end.push_back(input_shape[i]); - } - for (size_t i = 0; i < axes.size(); ++i) { - axis = input_shape[axes[i]]; - start = starts[i] < 0 ? (starts[i] + axis) : starts[i]; - end = ends[i] < 0 ? (ends[i] + axis) : ends[i]; - start = std::max(start, 0); - end = std::max(end, 0); - start = std::min(start, axis); - end = std::min(end, axis); - start = std::min(start, end); - ng_start[axes[i]] = start; - ng_end[axes[i]] = end; - reshape[axes[i]] = end - start; - } - std::vector axisVec(dout->get_shape().size()); - std::iota(axisVec.begin(), axisVec.end(), 0); - auto dout_reshape = std::make_shared( - dout, ngraph::AxisVector(axisVec), reshape); - - std::shared_ptr input0 = paddle::platform::CreateConstant( - dout->get_element_type(), input_shape, {0}); - - auto din = std::make_shared(input0, dout_reshape, - ng_start, ng_end); - platform::SetOutputNode(op, "Input@GRAD", din, ngb_node_map); -} -} // namespace ngraphs -} // namespace operators -} // namespace paddle - -REGISTER_NG_OP(slice, BuildSliceNode); -REGISTER_NG_OP(slice_grad, BuildSliceGradNode); diff --git a/paddle/fluid/operators/ngraph/ops/softmax_op.h b/paddle/fluid/operators/ngraph/ops/softmax_op.h deleted file mode 100644 index e1f6e8d3cf..0000000000 --- a/paddle/fluid/operators/ngraph/ops/softmax_op.h +++ /dev/null @@ -1,93 +0,0 @@ -/*Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#pragma once - -#include -#include -#include -#include -#include "ngraph/ngraph.hpp" -#include "paddle/fluid/operators/ngraph/ops/elementwise_scalar_op.h" -#include "paddle/fluid/operators/ngraph/ops/op_bridge.h" -#include "paddle/fluid/platform/ngraph_helper.h" - -namespace paddle { -namespace operators { -namespace ngraphs { - -std::shared_ptr GetSoftmax(std::shared_ptr x, - int axis = -1) { - auto x_shape = x->get_shape(); - size_t rank = x_shape.size(); - size_t softmax_axis = axis; - if (axis < 0) softmax_axis = rank + axis; - - auto x_max = - std::make_shared(x, ngraph::AxisSet{softmax_axis}); - auto x_max_bcast = std::make_shared( - x_max, x_shape, ngraph::AxisSet{softmax_axis}); - auto x_shifted = x - x_max_bcast; - auto x_clipped = - paddle::operators::ngraphs::ElementwiseScalar( - -64., x_shifted); - auto softmax = std::make_shared( - x_clipped, ngraph::AxisSet{softmax_axis}); - return softmax; -} - -std::shared_ptr GetSoftmaxGrad(std::shared_ptr out, - std::shared_ptr dout, - int axis = -1) { - auto out_shape = out->get_shape(); - size_t rank = out_shape.size(); - size_t softmax_axis = axis; - if (axis < 0) softmax_axis = rank + axis; - - auto node_sum = std::make_shared( - out * dout, ngraph::AxisSet{softmax_axis}); - auto node_bcast = std::make_shared( - node_sum, out_shape, ngraph::AxisSet{softmax_axis}); - auto dx = (dout - node_bcast) * out; - return dx; -} - -void BuildSoftmaxNode( - const std::shared_ptr& op, - std::shared_ptr< - std::unordered_map>> - ngb_node_map) { - auto op_attrs = framework::AttrReader(op->Attrs()); - auto x = paddle::platform::GetInputNode(op, "X", ngb_node_map); - auto softmax = GetSoftmax(x, op_attrs.Get("axis")); - paddle::platform::SetOutputNode(op, "Out", softmax, ngb_node_map); -} - -void BuildSoftmaxGradNode( - const std::shared_ptr& op, - std::shared_ptr< - std::unordered_map>> - ngb_node_map) { - auto op_attrs = framework::AttrReader(op->Attrs()); - auto out = paddle::platform::GetInputNode(op, "Out", ngb_node_map); - auto dout = paddle::platform::GetInputNode(op, "Out@GRAD", ngb_node_map); - auto dx = GetSoftmaxGrad(out, dout, op_attrs.Get("axis")); - paddle::platform::SetOutputNode(op, "X@GRAD", dx, ngb_node_map); -} -} // namespace ngraphs -} // namespace operators -} // namespace paddle - -REGISTER_NG_OP(softmax, BuildSoftmaxNode); -REGISTER_NG_OP(softmax_grad, BuildSoftmaxGradNode); diff --git a/paddle/fluid/operators/ngraph/ops/softmax_with_cross_entropy_op.h b/paddle/fluid/operators/ngraph/ops/softmax_with_cross_entropy_op.h deleted file mode 100644 index a6bdf4de95..0000000000 --- a/paddle/fluid/operators/ngraph/ops/softmax_with_cross_entropy_op.h +++ /dev/null @@ -1,90 +0,0 @@ -/*Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#pragma once - -#include -#include -#include -#include -#include "ngraph/ngraph.hpp" -#include "paddle/fluid/operators/ngraph/ops/cross_entropy_op.h" -#include "paddle/fluid/operators/ngraph/ops/softmax_op.h" -#include "paddle/fluid/platform/ngraph_helper.h" - -namespace paddle { -namespace operators { -namespace ngraphs { - -void BuildSoftmaxWithCrossEntropyNode( - const std::shared_ptr& op, - std::shared_ptr< - std::unordered_map>> - ngb_node_map) { - auto logits = paddle::platform::GetInputNode(op, "Logits", ngb_node_map); - auto label = paddle::platform::GetInputNode(op, "Label", ngb_node_map); - auto softmax = paddle::operators::ngraphs::GetSoftmax(logits); - - auto op_attrs = framework::AttrReader(op->Attrs()); - const bool is_soft_label = op_attrs.Get("soft_label"); - int ignore_index = op_attrs.Get("ignore_index"); - auto xe = paddle::operators::ngraphs::GetCrossEntropy( - softmax, label, is_soft_label, ignore_index); - - paddle::platform::SetOutputNode(op, "Softmax", softmax, ngb_node_map); - paddle::platform::SetOutputNode(op, "Loss", xe, ngb_node_map); -} - -void BuildSoftmaxWithCrossEntropyGradNode( - const std::shared_ptr& op, - std::shared_ptr< - std::unordered_map>> - ngb_node_map) { - auto op_attrs = framework::AttrReader(op->Attrs()); - const bool is_soft_label = op_attrs.Get("soft_label"); - auto label = paddle::platform::GetInputNode(op, "Label", ngb_node_map); - auto softmax = paddle::platform::GetInputNode(op, "Softmax", ngb_node_map); - auto loss_grad = - paddle::platform::GetInputNode(op, "Loss@GRAD", ngb_node_map); - auto softmax_shape = softmax->get_shape(); - auto rank = softmax_shape.size(); - if (!is_soft_label) { - auto label_shape = label->get_shape(); - label_shape.pop_back(); - label = platform::NgReshaper(label, label_shape); - - label = - std::make_shared(label, softmax_shape, rank - 1); - } - - auto loss_grad_shape = loss_grad->get_shape(); - loss_grad_shape.pop_back(); - auto loss_grad_reshape = platform::NgReshaper(loss_grad, loss_grad_shape); - auto loss_grad_bcast = std::make_shared( - loss_grad_reshape, softmax_shape, ngraph::AxisSet{rank - 1}); - if (softmax->get_element_type() != label->get_element_type()) { - label = std::make_shared(label, - softmax->get_element_type()); - } - - auto logits_grad = loss_grad_bcast * (softmax - label); - paddle::platform::SetOutputNode(op, "Logits@GRAD", logits_grad, ngb_node_map); -} -} // namespace ngraphs -} // namespace operators -} // namespace paddle - -REGISTER_NG_OP(softmax_with_cross_entropy, BuildSoftmaxWithCrossEntropyNode); -REGISTER_NG_OP(softmax_with_cross_entropy_grad, - BuildSoftmaxWithCrossEntropyGradNode); diff --git a/paddle/fluid/operators/ngraph/ops/stack_op.h b/paddle/fluid/operators/ngraph/ops/stack_op.h deleted file mode 100644 index d0e9545fd7..0000000000 --- a/paddle/fluid/operators/ngraph/ops/stack_op.h +++ /dev/null @@ -1,56 +0,0 @@ -/*Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#pragma once - -#include -#include -#include -#include -#include "ngraph/ngraph.hpp" -#include "paddle/fluid/operators/ngraph/ops/op_bridge.h" -#include "paddle/fluid/platform/ngraph_helper.h" - -namespace paddle { -namespace operators { -namespace ngraphs { - -void BuildStackNode( - const std::shared_ptr& op, - std::shared_ptr< - std::unordered_map>> - ngb_node_map) { - auto op_attrs = framework::AttrReader(op->Attrs()); - auto axis = op_attrs.Get("axis"); - std::vector> args; - for (auto& var_name_item : op->Inputs()) { - for (auto& var_name : var_name_item.second) { - auto& node = ngb_node_map->at(var_name); - auto shape = node->get_shape(); - axis = (axis < 0) ? axis + shape.size() + 1 : axis; - shape.insert(shape.begin() + axis, 1); - std::vector input_order(shape.size() - 1); - std::iota(std::begin(input_order), std::end(input_order), 0); - args.push_back(std::make_shared( - node, ngraph::AxisVector(input_order), shape)); - } - } - auto out = std::make_shared(args, axis); - platform::SetOutputNode(op, "Y", out, ngb_node_map); -} -} // namespace ngraphs -} // namespace operators -} // namespace paddle - -REGISTER_NG_OP(stack, BuildStackNode); diff --git a/paddle/fluid/operators/ngraph/ops/sum_op.h b/paddle/fluid/operators/ngraph/ops/sum_op.h deleted file mode 100644 index 804f932d24..0000000000 --- a/paddle/fluid/operators/ngraph/ops/sum_op.h +++ /dev/null @@ -1,59 +0,0 @@ -/*Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#pragma once - -#include -#include -#include -#include - -#include "ngraph/ngraph.hpp" -#include "paddle/fluid/operators/ngraph/ops/op_bridge.h" -#include "paddle/fluid/platform/ngraph_helper.h" - -namespace paddle { -namespace operators { -namespace ngraphs { - -void BuildSumNode( - const std::shared_ptr& op, - std::shared_ptr< - std::unordered_map>> - ngb_node_map) { - std::vector op_inputs; - for (auto& var_name_item : op->Inputs()) { - for (auto& var_name : var_name_item.second) { - op_inputs.push_back(var_name); - PADDLE_ENFORCE_NE( - ngb_node_map->find(var_name), ngb_node_map->end(), - platform::errors::NotFound( - "op %s input varname %s is not found in var_node_map", op->Type(), - var_name)); - } - } - std::shared_ptr& sum = ngb_node_map->at(op_inputs[0]); - for (size_t k = 1; k < op_inputs.size(); ++k) { - std::shared_ptr& nodek = ngb_node_map->at(op_inputs[k]); - nodek = - std::make_shared(nodek, sum->get_element_type()); - sum = sum + nodek; - } - platform::SetOutputNode(op, "Out", sum, ngb_node_map); -} -} // namespace ngraphs -} // namespace operators -} // namespace paddle - -REGISTER_NG_OP(sum, BuildSumNode); diff --git a/paddle/fluid/operators/ngraph/ops/top_k_op.h b/paddle/fluid/operators/ngraph/ops/top_k_op.h deleted file mode 100644 index cdc26f6afd..0000000000 --- a/paddle/fluid/operators/ngraph/ops/top_k_op.h +++ /dev/null @@ -1,49 +0,0 @@ -/*Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#pragma once - -#include -#include -#include -#include "ngraph/ngraph.hpp" -#include "paddle/fluid/operators/ngraph/ops/op_bridge.h" -#include "paddle/fluid/platform/ngraph_helper.h" - -namespace paddle { -namespace operators { -namespace ngraphs { - -void BuildTopKNode( - const std::shared_ptr& op, - std::shared_ptr< - std::unordered_map>> - ngb_node_map) { - auto op_attrs = paddle::framework::AttrReader(op->Attrs()); - int k = op_attrs.Get("k"); - auto input = paddle::platform::GetInputNode(op, "X", ngb_node_map); - auto top_k = std::make_shared( - input, input->get_shape().size() - 1, ngraph::element::i64, k); - std::shared_ptr indices = - std::make_shared(top_k, 0); - std::shared_ptr out = - std::make_shared(top_k, 1); - paddle::platform::SetOutputNode(op, "Indices", indices, ngb_node_map); - paddle::platform::SetOutputNode(op, "Out", out, ngb_node_map); -} -} // namespace ngraphs -} // namespace operators -} // namespace paddle - -REGISTER_NG_OP(top_k, BuildTopKNode); diff --git a/paddle/fluid/operators/ngraph/ops/transpose_op.h b/paddle/fluid/operators/ngraph/ops/transpose_op.h deleted file mode 100644 index 7d9428977a..0000000000 --- a/paddle/fluid/operators/ngraph/ops/transpose_op.h +++ /dev/null @@ -1,101 +0,0 @@ -/*Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#pragma once - -#include -#include -#include -#include -#include -#include - -#include "ngraph/ngraph.hpp" -#include "paddle/fluid/operators/ngraph/ops/op_bridge.h" -#include "paddle/fluid/platform/ngraph_helper.h" - -namespace paddle { -namespace operators { -namespace ngraphs { - -template -static void BuildTransposeNode( - const std::shared_ptr& op, - std::shared_ptr< - std::unordered_map>> - ngb_node_map) { - auto input = platform::GetInputNode(op, "X", ngb_node_map); - auto op_attrs = framework::AttrReader(op->Attrs()); - std::vector axis = op_attrs.Get>("axis"); - - auto input_shape = input->get_shape(); - ngraph::Shape x_reshape_shape; - ngraph::AxisVector axis_vec; - for (auto& v : axis) { - axis_vec.push_back(v); - x_reshape_shape.push_back(input_shape[v]); - } - std::shared_ptr x_transpose = - std::make_shared(input, axis_vec, input_shape); - x_transpose = platform::NgReshaper(x_transpose, x_reshape_shape); - platform::SetOutputNode(op, "Out", x_transpose, ngb_node_map); - if (is_v2) { - ngraph::Shape input_xshape(input_shape.size() + 1); - input_xshape[0] = 0; - std::copy(input_shape.begin(), input_shape.end(), input_xshape.begin() + 1); - auto xshape_node = std::make_shared( - input->get_element_type(), input_xshape, std::vector{}); - platform::SetOutputNode(op, "XShape", xshape_node, ngb_node_map); - } -} - -template -static void BuildTransposeGradNode( - const std::shared_ptr& op, - std::shared_ptr< - std::unordered_map>> - ngb_node_map) { - auto input = platform::GetInputNode(op, "Out@GRAD", ngb_node_map); - auto op_attrs = framework::AttrReader(op->Attrs()); - std::vector axis = op_attrs.Get>("axis"); - - ngraph::AxisVector axis_vec(axis.size()); - for (size_t i = 0; i < axis.size(); ++i) { - axis_vec[axis.at(i)] = i; - } - - ngraph::Shape out_shape; - if (is_v2) { - auto& xshape = - platform::GetInputNode(op, "XShape", ngb_node_map)->get_shape(); - out_shape.resize(xshape.size() - 1); - std::copy(xshape.begin() + 1, xshape.end(), out_shape.begin()); - } else { - out_shape = platform::GetInputNode(op, "X", ngb_node_map)->get_shape(); - } - - std::shared_ptr x_transpose = - std::make_shared(input, axis_vec, out_shape); - - platform::SetOutputNode(op, "X@GRAD", x_transpose, ngb_node_map); -} - -} // namespace ngraphs -} // namespace operators -} // namespace paddle - -REGISTER_NG_OP(transpose, BuildTransposeNode); -REGISTER_NG_OP(transpose_grad, BuildTransposeGradNode); -REGISTER_NG_OP(transpose2, BuildTransposeNode); -REGISTER_NG_OP(transpose2_grad, BuildTransposeGradNode); diff --git a/paddle/fluid/platform/ngraph_helper.h b/paddle/fluid/platform/ngraph_helper.h deleted file mode 100644 index 2bacd5bd4c..0000000000 --- a/paddle/fluid/platform/ngraph_helper.h +++ /dev/null @@ -1,199 +0,0 @@ -/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#ifdef PADDLE_WITH_NGRAPH -#pragma once - -#include -#include -#include -#include -#include -#include "ngraph/ngraph.hpp" - -namespace paddle { -namespace platform { - -std::shared_ptr Nhwc2Nchw(std::shared_ptr in) { - auto in_shape = in->get_shape(); - in_shape[0] = in->get_shape()[0]; - in_shape[1] = in->get_shape()[3]; - in_shape[2] = in->get_shape()[1]; - in_shape[3] = in->get_shape()[2]; - ngraph::AxisVector axis_vec = {0, 3, 1, 2}; - return std::make_shared(in, axis_vec, in_shape); -} - -std::shared_ptr Nchw2Nhwc(std::shared_ptr in) { - auto in_shape = in->get_shape(); - in_shape[0] = in->get_shape()[0]; - in_shape[1] = in->get_shape()[2]; - in_shape[2] = in->get_shape()[3]; - in_shape[3] = in->get_shape()[1]; - ngraph::AxisVector axis_vec = {0, 2, 3, 1}; - return std::make_shared(in, axis_vec, in_shape); -} - -ngraph::Shape FlattenTo1d(ngraph::Shape sh, int num) { - auto x1 = std::accumulate(std::begin(sh), std::end(sh) + num, 1, - std::multiplies()); - size_t x1_l = (size_t)x1; - return ngraph::Shape{x1_l}; -} - -ngraph::Shape FlattenTo2d(ngraph::Shape sh, int num) { - auto x1 = std::accumulate(std::begin(sh), std::begin(sh) + num, 1, - std::multiplies()); - auto x2 = std::accumulate(std::begin(sh) + num, std::end(sh), 1, - std::multiplies()); - size_t x1_l = static_cast(x1); - size_t x2_l = static_cast(x2); - return ngraph::Shape{x1_l, x2_l}; -} - -std::shared_ptr NgReshaper(std::shared_ptr input, - ngraph::Shape shape) { - std::vector input_order(input->get_shape().size()); - std::iota(std::begin(input_order), std::end(input_order), 0); - return std::make_shared( - input, ngraph::AxisVector(input_order), shape); -} - -std::shared_ptr GetNode( - const std::shared_ptr& op, - const std::string name, const paddle::framework::VariableNameMap& var_map, - std::shared_ptr< - std::unordered_map>> - ngb_node_map) { - auto& var_names = var_map.at(name); - if (var_names.size() == 0) return nullptr; - if (ngb_node_map->find(var_names[0]) != ngb_node_map->end()) { - return (*ngb_node_map)[var_names[0]]; - } else { - return nullptr; - } -} - -std::shared_ptr GetInputNode( - const std::shared_ptr& op, - const std::string name, - std::shared_ptr< - std::unordered_map>> - ngb_node_map) { - return GetNode(op, name, op->Inputs(), ngb_node_map); -} - -std::shared_ptr GetOutputNode( - const std::shared_ptr& op, - const std::string name, - std::shared_ptr< - std::unordered_map>> - ngb_node_map) { - return GetNode(op, name, op->Outputs(), ngb_node_map); -} - -template -std::shared_ptr CreateConstant(const ngraph::element::Type& type, - ngraph::Shape shape, - std::initializer_list values) { - std::shared_ptr result; - if (values.size() == 1 && shape != ngraph::Shape{} && // NOLINT - shape != ngraph::Shape{1}) { - result = std::make_shared(type, ngraph::Shape{}, - std::vector{values}); - ngraph::AxisSet axis_set; - for (size_t i = 0; i < shape.size(); ++i) axis_set.insert(i); - result = std::make_shared(result, shape, axis_set); - } else { - result = std::make_shared(type, shape, - std::vector{values}); - } - return result; -} - -void SetOutputNode( - const std::shared_ptr& op, - const std::string name, std::shared_ptr node, - std::shared_ptr< - std::unordered_map>> - ngb_node_map) { - auto& var_names = op->Outputs().at(name); - if (var_names.size() == 1) { - (*ngb_node_map)[var_names[0]] = node; - } else if (var_names.size() == 0) { - (*ngb_node_map)[""] = node; - } else { - PADDLE_THROW("name %s has more than 1 var_names.", name); - } -} - -bool HasOutput(const std::shared_ptr& op, - const std::string name) { - auto& outputs = op->Outputs(); - if (outputs.find(name) == outputs.end()) return false; - return outputs.at(name).size() > 0; -} - -inline void GetMidDims(const ngraph::Shape& x_shape, - const ngraph::Shape& y_shape, int axis, int* pre, int* n, - int* post) { - *pre = 1; - *n = 1; - *post = 1; - for (int i = 0; i < axis; ++i) { - (*pre) *= x_shape[i]; - } - - for (size_t i = 0; i < y_shape.size(); ++i) { - PADDLE_ENFORCE_EQ(x_shape[i + axis], y_shape[i], - "Broadcast dimension mismatch."); - (*n) *= y_shape[i]; - } - - for (size_t i = axis + y_shape.size(); i < x_shape.size(); ++i) { - (*post) *= x_shape[i]; - } -} - -inline void TrimTrailingSingularDims(ngraph::Shape* shape) { - // Remove trailing dimensions of size 1 for y - auto actual_shape_size = shape->size(); - for (; actual_shape_size != 0; --actual_shape_size) { - if ((*shape)[actual_shape_size - 1] != 1) { - break; - } else { - shape->pop_back(); - } - } -} - -ngraph::element::Type GetNgType(paddle::framework::proto::VarType::Type dtype) { - ngraph::element::Type ng_dtype; - if (dtype == paddle::framework::proto::VarType::FP32) { - ng_dtype = ngraph::element::f32; - } else if (dtype == paddle::framework::proto::VarType::FP64) { - ng_dtype = ngraph::element::f64; - } else if (dtype == paddle::framework::proto::VarType::INT64) { - ng_dtype = ngraph::element::i64; - } else if (dtype == paddle::framework::proto::VarType::INT32) { - ng_dtype = ngraph::element::i32; - } else { - PADDLE_THROW("unsupported data type: %s", dtype); - } - return ng_dtype; -} -} // namespace platform -} // namespace paddle - -#endif diff --git a/paddle/fluid/pybind/global_value_getter_setter.cc b/paddle/fluid/pybind/global_value_getter_setter.cc index 839bd1f464..5178b5f89a 100644 --- a/paddle/fluid/pybind/global_value_getter_setter.cc +++ b/paddle/fluid/pybind/global_value_getter_setter.cc @@ -331,7 +331,7 @@ void BindGlobalValueGetterSetter(pybind11::module *module) { static void RegisterGlobalVarGetterSetter() { REGISTER_PRIVATE_GLOBAL_VAR(/*is_writable=*/false, FLAGS_use_mkldnn, - FLAGS_use_ngraph, FLAGS_free_idle_chunk, + FLAGS_free_idle_chunk, FLAGS_free_when_no_cache_hit); REGISTER_PUBLIC_GLOBAL_VAR( diff --git a/paddle/fluid/pybind/inference_api.cc b/paddle/fluid/pybind/inference_api.cc index 3a19ad2d02..4803aabf94 100644 --- a/paddle/fluid/pybind/inference_api.cc +++ b/paddle/fluid/pybind/inference_api.cc @@ -550,7 +550,6 @@ void BindPaddlePassBuilder(py::module *m) { .def(py::init &>()) .def("enable_cudnn", &PassStrategy::EnableCUDNN) .def("enable_mkldnn", &PassStrategy::EnableMKLDNN) - .def("enable_ngraph", &PassStrategy::EnableNgraph) .def("enable_mkldnn_quantizer", &PassStrategy::EnableMkldnnQuantizer) .def("use_gpu", &PassStrategy::use_gpu); @@ -559,7 +558,6 @@ void BindPaddlePassBuilder(py::module *m) { .def(py::init()) .def("enable_cudnn", &CpuPassStrategy::EnableCUDNN) .def("enable_mkldnn", &CpuPassStrategy::EnableMKLDNN) - .def("enable_ngraph", &CpuPassStrategy::EnableNgraph) .def("enable_mkldnn_quantizer", &CpuPassStrategy::EnableMkldnnQuantizer); py::class_(*m, "GpuPassStrategy") @@ -567,7 +565,6 @@ void BindPaddlePassBuilder(py::module *m) { .def(py::init()) .def("enable_cudnn", &GpuPassStrategy::EnableCUDNN) .def("enable_mkldnn", &GpuPassStrategy::EnableMKLDNN) - .def("enable_ngraph", &GpuPassStrategy::EnableNgraph) .def("enable_mkldnn_quantizer", &GpuPassStrategy::EnableMkldnnQuantizer); } } // namespace diff --git a/paddle/fluid/pybind/pybind.cc b/paddle/fluid/pybind/pybind.cc index 0d165dc07f..a5c99aa6fc 100644 --- a/paddle/fluid/pybind/pybind.cc +++ b/paddle/fluid/pybind/pybind.cc @@ -94,9 +94,6 @@ limitations under the License. */ #include "pybind11/stl.h" DECLARE_bool(use_mkldnn); -#ifdef PADDLE_WITH_NGRAPH -DECLARE_bool(use_ngraph); -#endif // disable auto conversion to list in Python PYBIND11_MAKE_OPAQUE(paddle::framework::LoDTensorArray); @@ -120,14 +117,6 @@ bool IsCompiledWithMKLDNN() { #endif } -bool IsCompiledWithNGRAPH() { -#ifndef PADDLE_WITH_NGRAPH - return false; -#else - return true; -#endif -} - bool IsCompiledWithBrpc() { #ifndef PADDLE_WITH_DISTRIBUTE return false; @@ -1495,7 +1484,6 @@ All parameter, weight, gradient are variables in Paddle. m.def("init_devices", [](bool init_p2p) { framework::InitDevices(init_p2p); }); - m.def("is_compiled_with_ngraph", IsCompiledWithNGRAPH); m.def("is_compiled_with_cuda", IsCompiledWithCUDA); m.def("is_compiled_with_mkldnn", IsCompiledWithMKLDNN); m.def("is_compiled_with_brpc", IsCompiledWithBrpc); diff --git a/paddle/scripts/paddle_build.sh b/paddle/scripts/paddle_build.sh index ac13ad5ec8..b7f13ba10c 100755 --- a/paddle/scripts/paddle_build.sh +++ b/paddle/scripts/paddle_build.sh @@ -198,7 +198,6 @@ function cmake_base() { -DWITH_AMD_GPU=${WITH_AMD_GPU:-OFF} -DWITH_DISTRIBUTE=${distibuted_flag} -DWITH_MKL=${WITH_MKL:-ON} - -DWITH_NGRAPH=${WITH_NGRAPH:-OFF} -DWITH_AVX=${WITH_AVX:-OFF} -DWITH_GOLANG=${WITH_GOLANG:-OFF} -DCUDA_ARCH_NAME=${CUDA_ARCH_NAME:-All} @@ -231,7 +230,6 @@ EOF -DWITH_AMD_GPU=${WITH_AMD_GPU:-OFF} \ -DWITH_DISTRIBUTE=${distibuted_flag} \ -DWITH_MKL=${WITH_MKL:-ON} \ - -DWITH_NGRAPH=${WITH_NGRAPH:-OFF} \ -DWITH_AVX=${WITH_AVX:-OFF} \ -DNOAVX_CORE_FILE=${NOAVX_CORE_FILE:-""} \ -DWITH_GOLANG=${WITH_GOLANG:-OFF} \ diff --git a/python/paddle/fluid/__init__.py b/python/paddle/fluid/__init__.py index 16e411fa24..951e39e1c4 100644 --- a/python/paddle/fluid/__init__.py +++ b/python/paddle/fluid/__init__.py @@ -183,9 +183,6 @@ def __bootstrap__(): if core.is_compiled_with_mkldnn(): read_env_flags.append('use_mkldnn') - if core.is_compiled_with_ngraph(): - read_env_flags.append('use_ngraph') - if core.is_compiled_with_dist(): #env for rpc read_env_flags.append('rpc_deadline') diff --git a/python/paddle/fluid/tests/unittests/CMakeLists.txt b/python/paddle/fluid/tests/unittests/CMakeLists.txt index b2983234b8..37f12d2109 100644 --- a/python/paddle/fluid/tests/unittests/CMakeLists.txt +++ b/python/paddle/fluid/tests/unittests/CMakeLists.txt @@ -347,10 +347,6 @@ set_tests_properties(test_parallel_executor_seresnext_with_fuse_all_reduce_cpu P add_subdirectory(sequence) add_subdirectory(dygraph_to_static) -if (WITH_NGRAPH) - add_subdirectory(ngraph) -endif() - if (WITH_MKLDNN) add_subdirectory(mkldnn) endif() diff --git a/python/paddle/fluid/tests/unittests/ngraph/CMakeLists.txt b/python/paddle/fluid/tests/unittests/ngraph/CMakeLists.txt deleted file mode 100644 index 5ed2d0aa80..0000000000 --- a/python/paddle/fluid/tests/unittests/ngraph/CMakeLists.txt +++ /dev/null @@ -1,6 +0,0 @@ -file(GLOB TEST_OPS RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}" "test_*.py") -string(REPLACE ".py" "" TEST_OPS "${TEST_OPS}") - -foreach(TEST_OP ${TEST_OPS}) - py_test_modules(${TEST_OP} MODULES ${TEST_OP} ENVS FLAGS_use_ngraph=true) -endforeach(TEST_OP) diff --git a/python/paddle/fluid/tests/unittests/ngraph/__init__.py b/python/paddle/fluid/tests/unittests/ngraph/__init__.py deleted file mode 100644 index b94a21a7e4..0000000000 --- a/python/paddle/fluid/tests/unittests/ngraph/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/python/paddle/fluid/tests/unittests/ngraph/test_accuracy_ngraph_op.py b/python/paddle/fluid/tests/unittests/ngraph/test_accuracy_ngraph_op.py deleted file mode 100644 index e837e504a4..0000000000 --- a/python/paddle/fluid/tests/unittests/ngraph/test_accuracy_ngraph_op.py +++ /dev/null @@ -1,22 +0,0 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import print_function - -import unittest, sys -sys.path.append("../") -from test_accuracy_op import TestAccuracyOp - -if __name__ == '__main__': - unittest.main() diff --git a/python/paddle/fluid/tests/unittests/ngraph/test_activation_ngraph_op.py b/python/paddle/fluid/tests/unittests/ngraph/test_activation_ngraph_op.py deleted file mode 100644 index 19e1c8cccb..0000000000 --- a/python/paddle/fluid/tests/unittests/ngraph/test_activation_ngraph_op.py +++ /dev/null @@ -1,48 +0,0 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import print_function - -import unittest, sys -sys.path.append("../") -import numpy as np -from op_test import OpTest -from test_activation_op import TestAbs, TestGelu, TestSigmoid, TestSquare, TestRelu, TestTanh - - -class TestNGRAPHReluDim4(TestRelu): - def setUp(self): - super(TestNGRAPHReluDim4, self).setUp() - - x = np.random.uniform(-1, 1, [2, 4, 3, 5]).astype("float32") - # The same reason with TestAbs - x[np.abs(x) < 0.005] = 0.02 - out = np.maximum(x, 0) - - self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} - self.outputs = {'Out': out} - - -class TestNGRAPHTanhDim4(TestTanh): - def setUp(self): - super(TestNGRAPHTanhDim4, self).setUp() - - self.inputs = { - 'X': np.random.uniform(0.1, 1, [2, 4, 3, 5]).astype("float32") - } - self.outputs = {'Out': np.tanh(self.inputs['X'])} - - -if __name__ == '__main__': - unittest.main() diff --git a/python/paddle/fluid/tests/unittests/ngraph/test_adam_ngraph_op.py b/python/paddle/fluid/tests/unittests/ngraph/test_adam_ngraph_op.py deleted file mode 100644 index cdba579d8d..0000000000 --- a/python/paddle/fluid/tests/unittests/ngraph/test_adam_ngraph_op.py +++ /dev/null @@ -1,22 +0,0 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import print_function - -import unittest, sys -sys.path.append("../") -from test_adam_op import TestAdamOp1, TestAdamOp2, TestAdamOpMultipleSteps, TestSparseAdamOp - -if __name__ == "__main__": - unittest.main() diff --git a/python/paddle/fluid/tests/unittests/ngraph/test_assign_ngraph_op.py b/python/paddle/fluid/tests/unittests/ngraph/test_assign_ngraph_op.py deleted file mode 100644 index 2c3e7ee6cc..0000000000 --- a/python/paddle/fluid/tests/unittests/ngraph/test_assign_ngraph_op.py +++ /dev/null @@ -1,22 +0,0 @@ -# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import print_function - -import unittest, sys -sys.path.append("../") -from test_assign_op import TestAssignOp - -if __name__ == '__main__': - unittest.main() diff --git a/python/paddle/fluid/tests/unittests/ngraph/test_batch_norm_ngraph_op.py b/python/paddle/fluid/tests/unittests/ngraph/test_batch_norm_ngraph_op.py deleted file mode 100644 index a6b6e90551..0000000000 --- a/python/paddle/fluid/tests/unittests/ngraph/test_batch_norm_ngraph_op.py +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import print_function - -import unittest, sys -sys.path.append("../") -from test_batch_norm_op import TestBatchNormOpTraining, TestBatchNormOpInference -from op_test import _set_use_system_allocator - -_set_use_system_allocator(True) - -if __name__ == '__main__': - unittest.main() diff --git a/python/paddle/fluid/tests/unittests/ngraph/test_cast_ngraph_op.py b/python/paddle/fluid/tests/unittests/ngraph/test_cast_ngraph_op.py deleted file mode 100644 index 8b8a52258b..0000000000 --- a/python/paddle/fluid/tests/unittests/ngraph/test_cast_ngraph_op.py +++ /dev/null @@ -1,22 +0,0 @@ -# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import print_function - -import unittest, sys -sys.path.append("../") -from test_cast_op import TestCastOp1 - -if __name__ == '__main__': - unittest.main() diff --git a/python/paddle/fluid/tests/unittests/ngraph/test_compare_ngraph_op.py b/python/paddle/fluid/tests/unittests/ngraph/test_compare_ngraph_op.py deleted file mode 100644 index 54dab6c475..0000000000 --- a/python/paddle/fluid/tests/unittests/ngraph/test_compare_ngraph_op.py +++ /dev/null @@ -1,23 +0,0 @@ -# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import print_function - -import unittest -import sys -sys.path.append("../") -from test_compare_op import * - -if __name__ == '__main__': - unittest.main() diff --git a/python/paddle/fluid/tests/unittests/ngraph/test_concat_ngraph_op.py b/python/paddle/fluid/tests/unittests/ngraph/test_concat_ngraph_op.py deleted file mode 100644 index de831703b7..0000000000 --- a/python/paddle/fluid/tests/unittests/ngraph/test_concat_ngraph_op.py +++ /dev/null @@ -1,22 +0,0 @@ -# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import print_function - -import unittest, sys -sys.path.append("../") -from test_concat_op import TestConcatOp, TestConcatOp2, TestConcatOp3, TestConcatOp4, TestConcatOp5 - -if __name__ == '__main__': - unittest.main() diff --git a/python/paddle/fluid/tests/unittests/ngraph/test_conv2d_ngraph_op.py b/python/paddle/fluid/tests/unittests/ngraph/test_conv2d_ngraph_op.py deleted file mode 100644 index 7a9adcf4cd..0000000000 --- a/python/paddle/fluid/tests/unittests/ngraph/test_conv2d_ngraph_op.py +++ /dev/null @@ -1,68 +0,0 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import print_function - -import unittest, sys -sys.path.append("../") -from test_conv2d_op import TestConv2dOp, TestWithPad, TestWithStride, TestWithGroup, TestWith1x1, TestWithInput1x1Filter1x1, TestDepthwiseConv, TestDepthwiseConv2, TestDepthwiseConv3, TestDepthwiseConvWithDilation, TestDepthwiseConvWithDilation2 -import numpy as np - - -class TestNGRAPHWithStride(TestWithStride): - def init_test_case(self): - super(TestNGRAPHWithStride, self).init_test_case() - self.use_cuda = False - self.dtype = np.float32 - - -class TestNGRAPHDepthwiseConv(TestDepthwiseConv): - def init_test_case(self): - super(TestNGRAPHDepthwiseConv, self).init_test_case() - self.use_cuda = False - self.dtype = np.float32 - - -class TestNGRAPHDepthwiseConv2(TestDepthwiseConv2): - def init_test_case(self): - super(TestNGRAPHDepthwiseConv2, self).init_test_case() - self.use_cuda = False - self.dtype = np.float32 - - -class TestNGRAPHDepthwiseConv3(TestDepthwiseConv3): - def init_test_case(self): - super(TestNGRAPHDepthwiseConv3, self).init_test_case() - self.use_cuda = False - self.dtype = np.float32 - - -class TestNGRAPHDepthwiseConvWithDilation(TestDepthwiseConvWithDilation): - def init_test_case(self): - super(TestNGRAPHDepthwiseConvWithDilation, self).init_test_case() - self.use_cuda = False - self.dtype = np.float32 - - -class TestNGRAPHDepthwiseConvWithDilation2(TestDepthwiseConvWithDilation2): - def init_test_case(self): - super(TestNGRAPHDepthwiseConvWithDilation2, self).init_test_case() - self.use_cuda = False - self.dtype = np.float32 - - -del TestWithStride, TestDepthwiseConv, TestDepthwiseConv2, TestDepthwiseConv3, TestDepthwiseConvWithDilation, TestDepthwiseConvWithDilation2 - -if __name__ == '__main__': - unittest.main() diff --git a/python/paddle/fluid/tests/unittests/ngraph/test_cross_entropy_ngraph_op.py b/python/paddle/fluid/tests/unittests/ngraph/test_cross_entropy_ngraph_op.py deleted file mode 100644 index 2a8edec7e7..0000000000 --- a/python/paddle/fluid/tests/unittests/ngraph/test_cross_entropy_ngraph_op.py +++ /dev/null @@ -1,22 +0,0 @@ -# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import print_function - -import unittest, sys -sys.path.append("../") -from test_cross_entropy_op import TestCrossEntropyOp, TestCrossEntropyOp2, TestCrossEntropyOp3, TestCrossEntropyOp4, TestCrossEntropyOp5, TestCrossEntropyOp6, TestCrossEntropyOp7 - -if __name__ == "__main__": - unittest.main() diff --git a/python/paddle/fluid/tests/unittests/ngraph/test_dropout_ngraph_op.py b/python/paddle/fluid/tests/unittests/ngraph/test_dropout_ngraph_op.py deleted file mode 100644 index 484e76a896..0000000000 --- a/python/paddle/fluid/tests/unittests/ngraph/test_dropout_ngraph_op.py +++ /dev/null @@ -1,22 +0,0 @@ -# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import print_function - -import unittest, sys -sys.path.append("../") -from test_dropout_op import TestDropoutOp, TestDropoutOp2, TestDropoutOp3, TestDropoutOp4, TestDropoutOp5, TestDropoutOp6, TestDropoutOp7, TestDropoutOp8, TestDropoutOp9 - -if __name__ == '__main__': - unittest.main() diff --git a/python/paddle/fluid/tests/unittests/ngraph/test_elementwise_add_ngraph_op.py b/python/paddle/fluid/tests/unittests/ngraph/test_elementwise_add_ngraph_op.py deleted file mode 100644 index 32fef44cd4..0000000000 --- a/python/paddle/fluid/tests/unittests/ngraph/test_elementwise_add_ngraph_op.py +++ /dev/null @@ -1,22 +0,0 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import print_function - -import unittest, sys -sys.path.append("../") -from test_elementwise_add_op import TestElementwiseAddOp, TestElementwiseAddOp_broadcast_0 - -if __name__ == '__main__': - unittest.main() diff --git a/python/paddle/fluid/tests/unittests/ngraph/test_elementwise_div_ngraph_op.py b/python/paddle/fluid/tests/unittests/ngraph/test_elementwise_div_ngraph_op.py deleted file mode 100644 index 55a2a05e23..0000000000 --- a/python/paddle/fluid/tests/unittests/ngraph/test_elementwise_div_ngraph_op.py +++ /dev/null @@ -1,22 +0,0 @@ -# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import print_function - -import unittest, sys -sys.path.append("../") -from test_elementwise_div_op import ElementwiseDivOp, TestElementwiseDivOp_scalar, TestElementwiseDivOp_Vector, TestElementwiseDivOp_broadcast_0, TestElementwiseDivOp_broadcast_1, TestElementwiseDivOp_broadcast_2, TestElementwiseDivOp_broadcast_3 - -if __name__ == '__main__': - unittest.main() diff --git a/python/paddle/fluid/tests/unittests/ngraph/test_elementwise_max_ngraph_op.py b/python/paddle/fluid/tests/unittests/ngraph/test_elementwise_max_ngraph_op.py deleted file mode 100644 index c680241720..0000000000 --- a/python/paddle/fluid/tests/unittests/ngraph/test_elementwise_max_ngraph_op.py +++ /dev/null @@ -1,22 +0,0 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import print_function - -import unittest, sys -sys.path.append("../") -from test_elementwise_max_op import TestElementwiseMaxOp_scalar, TestElementwiseMaxOp_Vector, TestElementwiseMaxOp_broadcast_0 - -if __name__ == '__main__': - unittest.main() diff --git a/python/paddle/fluid/tests/unittests/ngraph/test_elementwise_min_ngraph_op.py b/python/paddle/fluid/tests/unittests/ngraph/test_elementwise_min_ngraph_op.py deleted file mode 100644 index 443445288a..0000000000 --- a/python/paddle/fluid/tests/unittests/ngraph/test_elementwise_min_ngraph_op.py +++ /dev/null @@ -1,22 +0,0 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import print_function - -import unittest, sys -sys.path.append("../") -from test_elementwise_min_op import TestElementwiseMinOp_scalar, TestElementwiseMinOp_Vector, TestElementwiseMinOp_broadcast_0 - -if __name__ == '__main__': - unittest.main() diff --git a/python/paddle/fluid/tests/unittests/ngraph/test_elementwise_mul_ngraph_op.py b/python/paddle/fluid/tests/unittests/ngraph/test_elementwise_mul_ngraph_op.py deleted file mode 100644 index 4f3d913d38..0000000000 --- a/python/paddle/fluid/tests/unittests/ngraph/test_elementwise_mul_ngraph_op.py +++ /dev/null @@ -1,22 +0,0 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import print_function - -import unittest, sys -sys.path.append("../") -from test_elementwise_mul_op import TestElementwiseMulOp_scalar, TestElementwiseMulOp_Vector, TestElementwiseMulOp_broadcast_0, TestElementwiseMulOp_broadcast_1, TestElementwiseMulOp_broadcast_2, TestElementwiseMulOp_broadcast_3 - -if __name__ == '__main__': - unittest.main() diff --git a/python/paddle/fluid/tests/unittests/ngraph/test_elementwise_pow_ngraph_op.py b/python/paddle/fluid/tests/unittests/ngraph/test_elementwise_pow_ngraph_op.py deleted file mode 100644 index 1601de2313..0000000000 --- a/python/paddle/fluid/tests/unittests/ngraph/test_elementwise_pow_ngraph_op.py +++ /dev/null @@ -1,22 +0,0 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import print_function - -import unittest, sys -sys.path.append("../") -from test_elementwise_pow_op import TestElementwisePowOp_scalar, TestElementwisePowOp_tensor, TestElementwisePowOp_broadcast_0 - -if __name__ == '__main__': - unittest.main() diff --git a/python/paddle/fluid/tests/unittests/ngraph/test_elementwise_sub_ngraph_op.py b/python/paddle/fluid/tests/unittests/ngraph/test_elementwise_sub_ngraph_op.py deleted file mode 100644 index fe29008f5e..0000000000 --- a/python/paddle/fluid/tests/unittests/ngraph/test_elementwise_sub_ngraph_op.py +++ /dev/null @@ -1,22 +0,0 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import print_function - -import unittest, sys -sys.path.append("../") -from test_elementwise_sub_op import TestElementwiseSubOp_scalar, TestElementwiseSubOp_Vector, TestElementwiseSubOp_broadcast_0 - -if __name__ == '__main__': - unittest.main() diff --git a/python/paddle/fluid/tests/unittests/ngraph/test_fill_constant_ngraph_op.py b/python/paddle/fluid/tests/unittests/ngraph/test_fill_constant_ngraph_op.py deleted file mode 100644 index 1ef14b5244..0000000000 --- a/python/paddle/fluid/tests/unittests/ngraph/test_fill_constant_ngraph_op.py +++ /dev/null @@ -1,48 +0,0 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import print_function - -import unittest, sys -sys.path.append("../") -import numpy as np -from test_fill_constant_op import TestFillConstantOp1, TestFillConstantOp2, TestFillConstantOpWithSelectedRows - - -class TestNGRAPHFillConstantFP64(TestFillConstantOp1): - def setUp(self): - super(TestNGRAPHFillConstantFP64, self).setUp() - - self.attrs = {'shape': [123, 92], 'value': 3.8, 'dtype': 6} - self.outputs = {'Out': np.full((123, 92), 3.8)} - - -class TestNGRAPHFillConstantINT32(TestFillConstantOp2): - def setUp(self): - super(TestNGRAPHFillConstantINT32, self).setUp() - - self.attrs = {'shape': [123, 92], 'dtype': 2} - self.outputs = {'Out': np.full((123, 92), 0)} - - -class TestNGRAPHFillConstantINT64(TestFillConstantOp2): - def setUp(self): - super(TestNGRAPHFillConstantINT64, self).setUp() - - self.attrs = {'shape': [123, 92], 'dtype': 3} - self.outputs = {'Out': np.full((123, 92), 0)} - - -if __name__ == "__main__": - unittest.main() diff --git a/python/paddle/fluid/tests/unittests/ngraph/test_fill_zeros_like_ngraph_op.py b/python/paddle/fluid/tests/unittests/ngraph/test_fill_zeros_like_ngraph_op.py deleted file mode 100644 index 32f72ada81..0000000000 --- a/python/paddle/fluid/tests/unittests/ngraph/test_fill_zeros_like_ngraph_op.py +++ /dev/null @@ -1,21 +0,0 @@ -# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from __future__ import print_function - -import unittest, sys -sys.path.append("../") -from test_fill_zeros_like_op import TestFillZerosLikeOp - -if __name__ == "__main__": - unittest.main() diff --git a/python/paddle/fluid/tests/unittests/ngraph/test_gather_ngraph_op.py b/python/paddle/fluid/tests/unittests/ngraph/test_gather_ngraph_op.py deleted file mode 100644 index 403145dd73..0000000000 --- a/python/paddle/fluid/tests/unittests/ngraph/test_gather_ngraph_op.py +++ /dev/null @@ -1,21 +0,0 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from __future__ import print_function - -import unittest, sys -sys.path.append("../") -from test_gather_op import TestGatherOp, TestCase1 - -if __name__ == "__main__": - unittest.main() diff --git a/python/paddle/fluid/tests/unittests/ngraph/test_increment_ngraph_op.py b/python/paddle/fluid/tests/unittests/ngraph/test_increment_ngraph_op.py deleted file mode 100644 index 33e9520d00..0000000000 --- a/python/paddle/fluid/tests/unittests/ngraph/test_increment_ngraph_op.py +++ /dev/null @@ -1,46 +0,0 @@ -# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import print_function - -import unittest, sys -sys.path.append("../") -import numpy as np -from op_test import OpTest - - -class TestNGRAPHIncrementOp(OpTest): - def setUp(self): - self.op_type = "increment" - self.dtype = np.float32 - self.init_dtype_type() - self.inputs = {'X': np.random.random(1).astype(self.dtype)} - self.attrs = {'step': 2.0} - self.outputs = { - 'Out': self.inputs['X'] + self.dtype(self.attrs['step']) - } - self._cpu_only = True - - def init_dtype_type(self): - pass - - def test_check_output(self): - self.check_output() - - def test_check_grad(self): - self.check_grad(['X'], 'Out', check_dygraph=False) - - -if __name__ == "__main__": - unittest.main() diff --git a/python/paddle/fluid/tests/unittests/ngraph/test_layer_norm_ngraph_op.py b/python/paddle/fluid/tests/unittests/ngraph/test_layer_norm_ngraph_op.py deleted file mode 100644 index ffdc64a230..0000000000 --- a/python/paddle/fluid/tests/unittests/ngraph/test_layer_norm_ngraph_op.py +++ /dev/null @@ -1,30 +0,0 @@ -# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from __future__ import print_function - -import unittest, sys -sys.path.append("../") -from test_layer_norm_op import TestLayerNormOp - - -class TestLayerNormNGRAPHOp(TestLayerNormOp): - def setUp(self): - super(TestLayerNormNGRAPHOp, self).setUp() - self.use_cudnn = False - - -del TestLayerNormOp - -if __name__ == "__main__": - unittest.main() diff --git a/python/paddle/fluid/tests/unittests/ngraph/test_logical_ngraph_op.py b/python/paddle/fluid/tests/unittests/ngraph/test_logical_ngraph_op.py deleted file mode 100644 index 01f6008dba..0000000000 --- a/python/paddle/fluid/tests/unittests/ngraph/test_logical_ngraph_op.py +++ /dev/null @@ -1,24 +0,0 @@ -# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import print_function - -import unittest, sys -sys.path.append("../") -import numpy as np - -from test_logical_op import * - -if __name__ == '__main__': - unittest.main() diff --git a/python/paddle/fluid/tests/unittests/ngraph/test_lookup_table_ngraph_op.py b/python/paddle/fluid/tests/unittests/ngraph/test_lookup_table_ngraph_op.py deleted file mode 100644 index d6ec4b2232..0000000000 --- a/python/paddle/fluid/tests/unittests/ngraph/test_lookup_table_ngraph_op.py +++ /dev/null @@ -1,21 +0,0 @@ -# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import print_function -import unittest, sys -sys.path.append("../") -from test_lookup_table_op import TestLookupTableOp, TestLookupTableOpWithTensorIds, TestLookupTableOpWithPadding, TestLookupTableOpWithTensorIdsAndPadding, TestLookupTableWIsSelectedRows, TestLookupTableWithTensorIdsWIsSelectedRows - -if __name__ == "__main__": - unittest.main() diff --git a/python/paddle/fluid/tests/unittests/ngraph/test_lrn_ngraph_op.py b/python/paddle/fluid/tests/unittests/ngraph/test_lrn_ngraph_op.py deleted file mode 100644 index 88d9e9c86f..0000000000 --- a/python/paddle/fluid/tests/unittests/ngraph/test_lrn_ngraph_op.py +++ /dev/null @@ -1,30 +0,0 @@ -# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import print_function - -import unittest, sys -sys.path.append("../") -from test_lrn_op import TestLRNOp - - -class TestLRNNGRAPHOp(TestLRNOp): - def test_check_output(self): - self.check_output(atol=0.002) - - -del TestLRNOp - -if __name__ == '__main__': - unittest.main() diff --git a/python/paddle/fluid/tests/unittests/ngraph/test_matmul_ngraph_op.py b/python/paddle/fluid/tests/unittests/ngraph/test_matmul_ngraph_op.py deleted file mode 100644 index 4c5772c5f0..0000000000 --- a/python/paddle/fluid/tests/unittests/ngraph/test_matmul_ngraph_op.py +++ /dev/null @@ -1,22 +0,0 @@ -# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import print_function - -import unittest, sys -sys.path.append("../") -from test_matmul_op import * - -if __name__ == '__main__': - unittest.main() diff --git a/python/paddle/fluid/tests/unittests/ngraph/test_mean_ngraph_op.py b/python/paddle/fluid/tests/unittests/ngraph/test_mean_ngraph_op.py deleted file mode 100644 index 0ac0e72ac7..0000000000 --- a/python/paddle/fluid/tests/unittests/ngraph/test_mean_ngraph_op.py +++ /dev/null @@ -1,21 +0,0 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from __future__ import print_function - -import unittest, sys -sys.path.append("../") -from test_mean_op import TestMeanOp - -if __name__ == "__main__": - unittest.main() diff --git a/python/paddle/fluid/tests/unittests/ngraph/test_momentum_ngraph_op.py b/python/paddle/fluid/tests/unittests/ngraph/test_momentum_ngraph_op.py deleted file mode 100644 index 1bbcbfb2fe..0000000000 --- a/python/paddle/fluid/tests/unittests/ngraph/test_momentum_ngraph_op.py +++ /dev/null @@ -1,22 +0,0 @@ -# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import print_function - -import unittest, sys -sys.path.append("../") -from test_momentum_op import TestMomentumOp1, TestMomentumOp2, TestLarsMomentumOp, TestSparseMomentumOp, TestSparseMomentumOp2 - -if __name__ == '__main__': - unittest.main() diff --git a/python/paddle/fluid/tests/unittests/ngraph/test_mul_ngraph_op.py b/python/paddle/fluid/tests/unittests/ngraph/test_mul_ngraph_op.py deleted file mode 100644 index 412127af70..0000000000 --- a/python/paddle/fluid/tests/unittests/ngraph/test_mul_ngraph_op.py +++ /dev/null @@ -1,22 +0,0 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import print_function - -import unittest, sys -sys.path.append("../") -from test_mul_op import TestMulOp, TestMulOp2 - -if __name__ == "__main__": - unittest.main() diff --git a/python/paddle/fluid/tests/unittests/ngraph/test_parallel_executor_ngraph.py b/python/paddle/fluid/tests/unittests/ngraph/test_parallel_executor_ngraph.py deleted file mode 100644 index 3afe2b9908..0000000000 --- a/python/paddle/fluid/tests/unittests/ngraph/test_parallel_executor_ngraph.py +++ /dev/null @@ -1,87 +0,0 @@ -# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import print_function - -from paddle.fluid.tests.unittests.simple_nets import simple_fc_net -import paddle.fluid as fluid -import paddle.fluid.core as core -from paddle.fluid import compiler -import numpy as np -import unittest -import os -import sys -import math - - -class TestPallelExecutorNgraph(unittest.TestCase): - def check_network_convergence(self, build_strategy=None): - os.environ['CPU_NUM'] = str(2) - main = fluid.Program() - startup = fluid.Program() - with fluid.program_guard(main, startup): - loss = simple_fc_net() - test_program = main.clone(for_test=True) - - opt = fluid.optimizer.Adam(learning_rate=0.001) - opt.minimize(loss) - - batch_size = 32 - image = np.random.normal(size=(batch_size, 784)).astype('float32') - label = np.random.randint(0, 10, (batch_size, 1), dtype="int64") - - place = fluid.CPUPlace() - exe = fluid.Executor(place) - exe.run(startup) - feed_dict = {'image': image, 'label': label} - - train_cp = compiler.CompiledProgram(main).with_data_parallel( - loss_name=loss.name, build_strategy=build_strategy) - test_cp = compiler.CompiledProgram(test_program).with_data_parallel( - loss_name=loss.name, - build_strategy=build_strategy, - share_vars_from=train_cp) - - for i in range(5): - _ = exe.run(train_cp, fetch_list=[loss.name], feed=feed_dict) - test_loss, = exe.run(test_cp, - fetch_list=[loss.name], - feed=feed_dict) - train_loss = exe.run(train_cp, - fetch_list=[loss.name], - feed=feed_dict) - - avg_test_loss_val = np.array(test_loss).mean() - if math.isnan(float(avg_test_loss_val)): - sys.exit("got NaN loss, testing failed.") - - avg_train_loss_val = np.array(train_loss).mean() - if math.isnan(float(avg_train_loss_val)): - sys.exit("got NaN loss, training failed.") - - self.assertTrue( - np.allclose( - train_loss, test_loss, atol=1e-8), - "Train loss: " + str(train_loss) + "\n Test loss:" + - str(test_loss)) - - def test_parallel_testing(self): - build_strategy = fluid.BuildStrategy() - build_strategy.enable_inplace = False - build_strategy.memory_optimize = False - self.check_network_convergence(build_strategy=build_strategy) - - -if __name__ == '__main__': - unittest.main() diff --git a/python/paddle/fluid/tests/unittests/ngraph/test_pool2d_ngraph_op.py b/python/paddle/fluid/tests/unittests/ngraph/test_pool2d_ngraph_op.py deleted file mode 100644 index 5ce46fbf57..0000000000 --- a/python/paddle/fluid/tests/unittests/ngraph/test_pool2d_ngraph_op.py +++ /dev/null @@ -1,39 +0,0 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import print_function - -import unittest, sys -sys.path.append("../") -from test_pool2d_op import TestPool2D_Op, TestCase1, TestCase2, TestCase3, TestCase4, TestCase5 - - -class TestNGRAPHCeilMode(TestCase1): - def setUp(self): - super(TestNGRAPHCeilMode, self).setUp() - - def init_ceil_mode(self): - self.ceil_mode = True - - -class TestNGRAPHAdaptive(TestCase1): - def setUp(self): - super(TestNGRAPHAdaptive, self).setUp() - - def init_adaptive(self): - self.adaptive = True - - -if __name__ == '__main__': - unittest.main() diff --git a/python/paddle/fluid/tests/unittests/ngraph/test_reduce_ngraph_op.py b/python/paddle/fluid/tests/unittests/ngraph/test_reduce_ngraph_op.py deleted file mode 100644 index 458f65338d..0000000000 --- a/python/paddle/fluid/tests/unittests/ngraph/test_reduce_ngraph_op.py +++ /dev/null @@ -1,37 +0,0 @@ -# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import print_function - -import unittest, sys -sys.path.append("../") -import numpy as np -from test_reduce_op import TestSumOp, Test1DReduce, \ - Test2DReduce0, Test2DReduce1, Test3DReduce0, Test3DReduce1, Test3DReduce2, \ - Test3DReduce3, TestKeepDimReduce, TestKeepDimReduceSumMultiAxises, \ - TestReduceSumWithDimOne, TestReduceSumWithNumelOne - - -class Test3DReduce21(Test1DReduce): - def setUp(self): - self.op_type = "reduce_sum" - self.attrs = {'dim': [1, 2]} - self.inputs = {'X': np.random.random((20, 1, 5)).astype("float64")} - self.outputs = { - 'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim'])) - } - - -if __name__ == '__main__': - unittest.main() diff --git a/python/paddle/fluid/tests/unittests/ngraph/test_reshape_ngraph_op.py b/python/paddle/fluid/tests/unittests/ngraph/test_reshape_ngraph_op.py deleted file mode 100644 index 928e1cb4de..0000000000 --- a/python/paddle/fluid/tests/unittests/ngraph/test_reshape_ngraph_op.py +++ /dev/null @@ -1,23 +0,0 @@ -# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import print_function - -import unittest, sys -sys.path.append("../") - -from test_reshape_op import TestReshapeOp, TestReshapeOpDimInfer1, TestReshapeOpDimInfer2 - -if __name__ == '__main__': - unittest.main() diff --git a/python/paddle/fluid/tests/unittests/ngraph/test_scale_ngraph_op.py b/python/paddle/fluid/tests/unittests/ngraph/test_scale_ngraph_op.py deleted file mode 100644 index abeb929594..0000000000 --- a/python/paddle/fluid/tests/unittests/ngraph/test_scale_ngraph_op.py +++ /dev/null @@ -1,21 +0,0 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import print_function -import unittest, sys -sys.path.append("../") -from test_scale_op import TestScaleOp, TestScaleOpSelectedRows - -if __name__ == "__main__": - unittest.main() diff --git a/python/paddle/fluid/tests/unittests/ngraph/test_slice_ngraph_op.py b/python/paddle/fluid/tests/unittests/ngraph/test_slice_ngraph_op.py deleted file mode 100644 index b6f1f4e0dc..0000000000 --- a/python/paddle/fluid/tests/unittests/ngraph/test_slice_ngraph_op.py +++ /dev/null @@ -1,22 +0,0 @@ -# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import print_function - -import unittest, sys -sys.path.append("../") -from test_slice_op import TestSliceOp, TestSliceOp_decs_dim, TestSliceOp_decs_dim_2, TestSliceOp_decs_dim_3, TestSliceOp_decs_dim_5, TestSliceOp_decs_dim_6, TestCase1, TestCase2 - -if __name__ == '__main__': - unittest.main() diff --git a/python/paddle/fluid/tests/unittests/ngraph/test_softmax_ngraph_op.py b/python/paddle/fluid/tests/unittests/ngraph/test_softmax_ngraph_op.py deleted file mode 100644 index 4b895f81a8..0000000000 --- a/python/paddle/fluid/tests/unittests/ngraph/test_softmax_ngraph_op.py +++ /dev/null @@ -1,21 +0,0 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import print_function -import unittest, sys -sys.path.append("../") -from test_softmax_op import TestSoftmaxOp, TestSoftmaxOp2, TestSoftmaxOp3, TestSoftmaxOp4, TestSoftmaxOp5 - -if __name__ == "__main__": - unittest.main() diff --git a/python/paddle/fluid/tests/unittests/ngraph/test_softmax_with_cross_entropy_ngraph_op.py b/python/paddle/fluid/tests/unittests/ngraph/test_softmax_with_cross_entropy_ngraph_op.py deleted file mode 100644 index 6dab134c68..0000000000 --- a/python/paddle/fluid/tests/unittests/ngraph/test_softmax_with_cross_entropy_ngraph_op.py +++ /dev/null @@ -1,21 +0,0 @@ -# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import print_function -import unittest, sys -sys.path.append("../") -from test_softmax_with_cross_entropy_op import TestSoftmaxWithCrossEntropyOp, TestSoftmaxWithCrossEntropyOp2, TestSoftmaxWithCrossEntropyOp3 - -if __name__ == "__main__": - unittest.main() diff --git a/python/paddle/fluid/tests/unittests/ngraph/test_stack_ngraph_op.py b/python/paddle/fluid/tests/unittests/ngraph/test_stack_ngraph_op.py deleted file mode 100644 index 23ef261331..0000000000 --- a/python/paddle/fluid/tests/unittests/ngraph/test_stack_ngraph_op.py +++ /dev/null @@ -1,22 +0,0 @@ -# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import print_function - -import unittest, sys -sys.path.append("../") -from test_stack_op import TestStackOpBase, TestStackOp1, TestStackOp2, TestStackOp3, TestStackOp4, TestStackOp5, TestStackOp6 - -if __name__ == '__main__': - unittest.main() diff --git a/python/paddle/fluid/tests/unittests/ngraph/test_sum_ngraph_op.py b/python/paddle/fluid/tests/unittests/ngraph/test_sum_ngraph_op.py deleted file mode 100644 index acccf58466..0000000000 --- a/python/paddle/fluid/tests/unittests/ngraph/test_sum_ngraph_op.py +++ /dev/null @@ -1,21 +0,0 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import print_function -import unittest, sys -sys.path.append("../") -from test_sum_op import TestSumOp, TestSelectedRowsSumOp, TestLoDTensorAndSelectedRowsOp - -if __name__ == "__main__": - unittest.main() diff --git a/python/paddle/fluid/tests/unittests/ngraph/test_top_k_ngraph_op.py b/python/paddle/fluid/tests/unittests/ngraph/test_top_k_ngraph_op.py deleted file mode 100644 index a42f781c65..0000000000 --- a/python/paddle/fluid/tests/unittests/ngraph/test_top_k_ngraph_op.py +++ /dev/null @@ -1,21 +0,0 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from __future__ import print_function - -import unittest, sys -sys.path.append("../") -from test_top_k_op import TestTopkOp - -if __name__ == "__main__": - unittest.main() diff --git a/python/paddle/fluid/tests/unittests/ngraph/test_transpose_ngraph_op.py b/python/paddle/fluid/tests/unittests/ngraph/test_transpose_ngraph_op.py deleted file mode 100644 index 27bf82fc59..0000000000 --- a/python/paddle/fluid/tests/unittests/ngraph/test_transpose_ngraph_op.py +++ /dev/null @@ -1,22 +0,0 @@ -# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import print_function - -import unittest, sys -sys.path.append("../") -from test_transpose_op import TestTransposeOp, TestCase0, TestCase1, TestCase2, TestCase3, TestCase4 - -if __name__ == '__main__': - unittest.main() diff --git a/python/paddle/fluid/tests/unittests/op_test.py b/python/paddle/fluid/tests/unittests/op_test.py index 0e4c0522df..f13a4ceee6 100644 --- a/python/paddle/fluid/tests/unittests/op_test.py +++ b/python/paddle/fluid/tests/unittests/op_test.py @@ -207,9 +207,6 @@ class OpTest(unittest.TestCase): def is_mkldnn_op_test(): return hasattr(cls, "use_mkldnn") and cls.use_mkldnn == True - def is_ngraph_op_test(): - return hasattr(cls, "use_ngraph") and cls.use_ngraph == True - if not hasattr(cls, "op_type"): raise AssertionError( "This test do not have op_type in class attrs, " @@ -229,7 +226,6 @@ class OpTest(unittest.TestCase): if cls.dtype in [np.float32, np.float64] \ and cls.op_type not in op_accuracy_white_list.NO_FP64_CHECK_GRAD_OP_LIST \ and not hasattr(cls, 'exist_fp64_check_grad') \ - and not is_ngraph_op_test() \ and not is_mkldnn_op_test(): raise AssertionError( "This test of %s op needs check_grad with fp64 precision." % @@ -321,9 +317,6 @@ class OpTest(unittest.TestCase): (hasattr(self, "attrs") and "use_mkldnn" in self.attrs and \ self.attrs["use_mkldnn"] == True): self.__class__.use_mkldnn = True - if fluid.core.is_compiled_with_ngraph() and \ - fluid.core.globals()['FLAGS_use_ngraph']: - self.__class__.use_ngraph = True op_proto = OpProtoHolder.instance().get_op_proto(self.op_type) "infer datatype from inputs and outputs for this test case" @@ -935,26 +928,17 @@ class OpTest(unittest.TestCase): res[op_desc] = self._calc_output( place, no_check_set=no_check_set, for_inplace_test=True) else: - # TODO(zhiqiu): enhance inplace_grad test for ops (sum and activation) using mkldnn/ngraph - # skip op that use_mkldnn and use_ngraph currently + # TODO(zhiqiu): enhance inplace_grad test for ops (sum and activation) using mkldnn + # skip op that use_mkldnn currently flags_use_mkldnn = fluid.core.globals()["FLAGS_use_mkldnn"] attrs_use_mkldnn = hasattr( self, 'attrs') and bool(self.attrs.get('use_mkldnn', False)) - flags_use_ngraph = fluid.core.globals()["FLAGS_use_ngraph"] - attrs_use_ngraph = hasattr( - self, - 'attrs') and bool(self.attrs.get('use_ngraph', False)) if flags_use_mkldnn or attrs_use_mkldnn: warnings.warn( "check inplace_grad for ops using mkldnn is not supported" ) continue - if flags_use_ngraph or attrs_use_ngraph: - warnings.warn( - "check inplace_grad for ops using ngraph is not supported" - ) - continue if has_infer_inplace: fwd_res = res[father_op_desc] res[op_desc] = self._check_grad_inplace( @@ -1177,10 +1161,6 @@ class OpTest(unittest.TestCase): return [] places = [fluid.CPUPlace()] cpu_only = self._cpu_only if hasattr(self, '_cpu_only') else False - use_ngraph = fluid.core.is_compiled_with_ngraph( - ) and fluid.core.globals()['FLAGS_use_ngraph'] - if use_ngraph: - cpu_only = True if core.is_compiled_with_cuda() and core.op_support_gpu(self.op_type)\ and not cpu_only: places.append(core.CUDAPlace(0)) @@ -1197,9 +1177,6 @@ class OpTest(unittest.TestCase): (hasattr(self, "attrs") and "use_mkldnn" in self.attrs and \ self.attrs["use_mkldnn"] == True): self.__class__.use_mkldnn = True - if fluid.core.is_compiled_with_ngraph() and \ - fluid.core.globals()['FLAGS_use_ngraph']: - self.__class__.use_ngraph = True places = self._get_places() for place in places: diff --git a/python/paddle/fluid/tests/unittests/test_eager_deletion_delete_vars.py b/python/paddle/fluid/tests/unittests/test_eager_deletion_delete_vars.py index adf07897d5..835f693ab6 100644 --- a/python/paddle/fluid/tests/unittests/test_eager_deletion_delete_vars.py +++ b/python/paddle/fluid/tests/unittests/test_eager_deletion_delete_vars.py @@ -14,7 +14,6 @@ import os import numpy as np -os.environ['FLAGS_use_ngraph'] = '0' os.environ['FLAGS_use_mkldnn'] = '0' os.environ['CPU_NUM'] = '4' diff --git a/python/paddle/fluid/tests/unittests/test_global_var_getter_setter.py b/python/paddle/fluid/tests/unittests/test_global_var_getter_setter.py index f6ad1b3082..548b758311 100644 --- a/python/paddle/fluid/tests/unittests/test_global_var_getter_setter.py +++ b/python/paddle/fluid/tests/unittests/test_global_var_getter_setter.py @@ -27,7 +27,6 @@ class TestGlobalVarGetterSetter(unittest.TestCase): def test_main(self): var_infos = [ VarInfo("FLAGS_use_mkldnn", bool, False), - VarInfo("FLAGS_use_ngraph", bool, False), VarInfo("FLAGS_eager_delete_tensor_gb", float, True), ] diff --git a/python/setup.py.in b/python/setup.py.in index d70e93dc15..23799e6189 100644 --- a/python/setup.py.in +++ b/python/setup.py.in @@ -225,24 +225,6 @@ if '${WITH_MKLDNN}' == 'ON': else: package_data['paddle.libs']+=['mkldnn.dll'] -if '${WITH_NGRAPH}' == 'ON': - # only change rpath in Release mode, - # since in Debug mode, nGraph lib may be too large to be changed? - if '${CMAKE_BUILD_TYPE}' == 'Release': - if os.name != 'nt': - if "@APPLE@" == "1": - command = "install_name_tool -id \"@loader_path/\" ${NGRAPH_SHARED_LIB}" - else: - command = "patchelf --set-rpath '$ORIGIN/' ${NGRAPH_SHARED_LIB}" - if os.system(command) != 0: - raise Exception("patch ${NGRAPH_SHARED_LIB_NAME} failed, command: %s" % command) - shutil.copy('${NGRAPH_SHARED_LIB}', libs_path) - shutil.copy('${NGRAPH_CPU_LIB}', libs_path) - shutil.copy('${NGRAPH_TBB_LIB}', libs_path) - package_data['paddle.libs']+=['${NGRAPH_SHARED_LIB_NAME}', - '${NGRAPH_CPU_LIB_NAME}', - '${NGRAPH_TBB_LIB_NAME}'] - # copy libfuild_framework.so to libs if os.name != 'nt' and sys.platform != 'darwin': paddle_framework_lib='${FLUID_FRAMEWORK_SHARED_LIB}' @@ -301,7 +283,7 @@ headers = ( list(find_files('*', '${BOOST_INCLUDE_DIR}/boost')) + # boost list(find_files('*', '${XXHASH_INSTALL_DIR}/include')) + # xxhash list(find_files('*', '${PROTOBUF_INCLUDE_DIR}')) + # protobuf - list(find_files('*', '${DLPACK_INCLUDE_DIR}')) + # dlpack + list(find_files('*', '${DLPACK_INCLUDE_DIR}')) + # dlpack list(find_files('*.h', '${THREADPOOL_INCLUDE_DIR}'))) # threadpool if '${WITH_MKLDNN}' == 'ON': @@ -315,31 +297,31 @@ class InstallCommand(InstallCommandBase): 'include') self.install_lib = self.install_platlib return ret - + class InstallHeaders(Command): """Override how headers are copied. """ description = 'install C/C++ header files' - + user_options = [('install-dir=', 'd', 'directory to install header files to'), ('force', 'f', 'force installation (overwrite existing files)'), ] - + boolean_options = ['force'] - + def initialize_options(self): self.install_dir = None self.force = 0 self.outfiles = [] - + def finalize_options(self): self.set_undefined_options('install', ('install_headers', 'install_dir'), ('force', 'force')) - + def mkdir_and_copy_file(self, header): if 'pb.h' in header: install_dir = re.sub('${PADDLE_BINARY_DIR}/', '', header) @@ -362,7 +344,7 @@ class InstallHeaders(Command): if not os.path.exists(install_dir): self.mkpath(install_dir) return self.copy_file(header, install_dir) - + def run(self): if os.name == 'nt' or sys.platform == 'darwin': return @@ -373,10 +355,10 @@ class InstallHeaders(Command): for header in hdrs: (out, _) = self.mkdir_and_copy_file(header) self.outfiles.append(out) - + def get_inputs(self): return self.distribution.headers or [] - + def get_outputs(self): return self.outfiles @@ -415,7 +397,7 @@ with redirect_stdout(): ) # As there are a lot of files in purelib which causes many logs, -# we don't print them on the screen, and you can open `setup.py.log` +# we don't print them on the screen, and you can open `setup.py.log` # for the full logs. if os.path.exists('${SETUP_LOG_FILE}'): os.system('grep -v "purelib" ${SETUP_LOG_FILE}') -- GitLab