未验证 提交 3baaee9a 编写于 作者: M mozga-intel 提交者: GitHub

Remove: NGraph engine from PDPD repository (#23545)

* Remove the NGraph engine from PDPD repository
1. Each operator was removed from the operator's directory
2. Each test was removed from the unittest directory
3. The parallel executor support was removed from the PDPD
4. The CMake file was removed from the PDPD
5. The NG flags were removed from the repository
test=develop

* Remove ngraph from:
1. Cmake file
2. Python file
test=develop
上级 81e8fd4a
......@@ -72,7 +72,6 @@ option(ON_INFER "Turn on inference optimization and inference-lib genera
################################ Internal Configurations #######################################
option(WITH_AMD_GPU "Compile PaddlePaddle with AMD GPU" OFF)
option(WITH_NV_JETSON "Compile PaddlePaddle with NV JETSON" OFF)
option(WITH_NGRAPH "Compile PaddlePaddle with nGraph support." OFF)
option(WITH_PROFILER "Compile PaddlePaddle with GPU profiler and gperftools" OFF)
option(WITH_COVERAGE "Compile PaddlePaddle with code coverage" OFF)
OPTION(WITH_LIBXSMM "Compile with libxsmm" OFF)
......
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
INCLUDE(GNUInstallDirs)
INCLUDE(ExternalProject)
SET(NGRAPH_PROJECT "extern_ngraph")
SET(NGRAPH_GIT_TAG "972dd2f5ecfa18e3819b17c47698fae9795b499f")
SET(NGRAPH_SOURCES_DIR ${THIRD_PARTY_PATH}/ngraph)
SET(NGRAPH_INSTALL_DIR ${THIRD_PARTY_PATH}/install/ngraph)
SET(NGRAPH_INC_DIR ${NGRAPH_INSTALL_DIR}/include)
SET(NGRAPH_LIB_DIR ${NGRAPH_INSTALL_DIR}/${CMAKE_INSTALL_LIBDIR})
SET(NGRAPH_SHARED_LIB_NAME libngraph.so)
SET(NGRAPH_CPU_LIB_NAME libcpu_backend.so)
if(CMAKE_BUILD_TYPE STREQUAL "Debug")
SET(NGRAPH_TBB_LIB_NAME libtbb_debug.so.2)
else()
SET(NGRAPH_TBB_LIB_NAME libtbb.so.2)
endif()
SET(NGRAPH_GIT_REPO "https://github.com/NervanaSystems/ngraph.git")
SET(NGRAPH_SHARED_LIB ${NGRAPH_LIB_DIR}/${NGRAPH_SHARED_LIB_NAME})
SET(NGRAPH_CPU_LIB ${NGRAPH_LIB_DIR}/${NGRAPH_CPU_LIB_NAME})
SET(NGRAPH_TBB_LIB ${NGRAPH_LIB_DIR}/${NGRAPH_TBB_LIB_NAME})
ExternalProject_Add(
${NGRAPH_PROJECT}
${EXTERNAL_PROJECT_LOG_ARGS}
${SHALLOW_CLONE}
DEPENDS ${MKLDNN_PROJECT} ${MKLML_PROJECT}
GIT_REPOSITORY ${NGRAPH_GIT_REPO}
GIT_TAG ${NGRAPH_GIT_TAG}
PREFIX ${NGRAPH_SOURCES_DIR}
UPDATE_COMMAND ""
CMAKE_GENERATOR ${CMAKE_GENERATOR}
CMAKE_GENERATOR_PLATFORM ${CMAKE_GENERATOR_PLATFORM}
CMAKE_GENERATOR_TOOLSET ${CMAKE_GENERATOR_TOOLSET}
CMAKE_ARGS -DCMAKE_C_COMPILER=${CMAKE_C_COMPILER}
CMAKE_ARGS -DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER}
CMAKE_ARGS -DCMAKE_INSTALL_PREFIX=${NGRAPH_INSTALL_DIR}
CMAKE_ARGS -DNGRAPH_UNIT_TEST_ENABLE=FALSE
CMAKE_ARGS -DNGRAPH_TOOLS_ENABLE=FALSE
CMAKE_ARGS -DNGRAPH_INTERPRETER_ENABLE=FALSE
CMAKE_ARGS -DNGRAPH_DEX_ONLY=TRUE
CMAKE_ARGS -DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE}
CMAKE_ARGS -DMKLDNN_INCLUDE_DIR=${MKLDNN_INC_DIR}
CMAKE_ARGS -DMKLDNN_LIB_DIR=${MKLDNN_INSTALL_DIR}/${CMAKE_INSTALL_LIBDIR}
CMAKE_ARGS -DMKLML_LIB_DIR=${MKLML_INSTALL_DIR}/lib
)
add_library(ngraph INTERFACE)
add_dependencies(ngraph ${NGRAPH_PROJECT})
target_compile_definitions(ngraph INTERFACE -DPADDLE_WITH_NGRAPH)
target_include_directories(ngraph INTERFACE ${NGRAPH_INC_DIR})
target_link_libraries(ngraph INTERFACE ${NGRAPH_SHARED_LIB})
......@@ -117,13 +117,6 @@ function(copy_part_of_thrid_party TARGET DST)
DSTS ${dst_dir} ${dst_dir}/lib)
endif ()
if (WITH_NGRAPH)
set(dst_dir "${DST}/third_party/install/ngraph")
copy(${TARGET}
SRCS ${NGRAPH_INC_DIR} ${NGRAPH_LIB_DIR}
DSTS ${dst_dir} ${dst_dir})
endif ()
if (LITE_BINARY_DIR)
set(dst_dir "${DST}/third_party/install/lite")
copy(${TARGET}
......
......@@ -120,13 +120,6 @@ if(WIN32 OR APPLE)
SET(WITH_LIBXSMM OFF CACHE STRING "Disable LIBXSMM in Windows and MacOS" FORCE)
endif()
if(WITH_NGRAPH)
MESSAGE(WARNING
"Windows or Mac is not supported with nGraph in Paddle yet."
"Force WITH_NGRAPH=OFF")
SET(WITH_NGRAPH OFF CACHE STRING "Disable nGraph in Windows and MacOS" FORCE)
endif()
if(WITH_BOX_PS)
MESSAGE(WARNING
"Windows or Mac is not supported with BOX_PS in Paddle yet."
......@@ -260,18 +253,6 @@ if(WITH_DISTRIBUTE)
endif()
endif()
if(WITH_NGRAPH)
if(WITH_MKLDNN)
include(external/ngraph) # download, build, install nGraph
list(APPEND third_party_deps extern_ngraph)
else()
MESSAGE(WARNING
"nGraph needs mkl-dnn to be enabled."
"Force WITH_NGRAPH=OFF")
SET(WITH_NGRAPH OFF CACHE STRING "Disable nGraph if mkl-dnn is disabled" FORCE)
endif()
endif()
if(WITH_XBYAK)
include(external/xbyak) # download, build, install xbyak
list(APPEND third_party_deps extern_xbyak)
......
......@@ -138,14 +138,6 @@ func (config *AnalysisConfig) SwitchIrDebug(x bool) {
C.PD_SwitchIrDebug(config.c, C.bool(x))
}
func (config *AnalysisConfig) EnableNgraph() {
C.PD_EnableNgraph(config.c)
}
func (config *AnalysisConfig) NgraphEnabled() bool {
return ConvertCBooleanToGo(C.PD_NgraphEnabled(config.c))
}
func (config *AnalysisConfig) EnableMkldnn() {
C.PD_EnableMKLDNN(config.c)
}
......
......@@ -181,12 +181,6 @@ cc_library(variable_helper SRCS variable_helper.cc DEPS lod_tensor)
cc_library(naive_executor SRCS naive_executor.cc DEPS op_registry device_context scope framework_proto glog lod_rank_table feed_fetch_method graph_to_program_pass variable_helper)
if(WITH_NGRAPH)
set(NGRAPH_EXE_DEPS ngraph_engine)
else()
set(NGRAPH_EXE_DEPS)
endif()
cc_library(executor_gc_helper SRCS executor_gc_helper.cc DEPS scope proto_desc operator garbage_collector)
if(WITH_DISTRIBUTE)
cc_library(executor SRCS executor.cc multi_trainer.cc pipeline_trainer.cc dataset_factory.cc
......@@ -195,7 +189,7 @@ if(WITH_DISTRIBUTE)
pull_dense_worker.cc section_worker.cc device_worker_factory.cc data_set.cc DEPS op_registry
device_context scope framework_proto trainer_desc_proto glog fs shell fleet_wrapper box_wrapper lodtensor_printer
lod_rank_table feed_fetch_method sendrecvop_rpc communicator collective_helper ${GLOB_DISTRIBUTE_DEPS}
graph_to_program_pass variable_helper data_feed_proto ${NGRAPH_EXE_DEPS} timer)
graph_to_program_pass variable_helper data_feed_proto timer)
set(DISTRIBUTE_COMPILE_FLAGS "-Wno-non-virtual-dtor -Wno-error=non-virtual-dtor -Wno-error=delete-non-virtual-dtor")
set_source_files_properties(executor.cc PROPERTIES COMPILE_FLAGS ${DISTRIBUTE_COMPILE_FLAGS})
else()
......@@ -205,7 +199,7 @@ else()
pull_dense_worker.cc section_worker.cc device_worker_factory.cc data_set.cc DEPS op_registry
device_context scope framework_proto data_feed_proto trainer_desc_proto glog
lod_rank_table fs shell fleet_wrapper box_wrapper lodtensor_printer feed_fetch_method
graph_to_program_pass variable_helper ${NGRAPH_EXE_DEPS} timer)
graph_to_program_pass variable_helper timer)
cc_test(test_naive_executor SRCS naive_executor_test.cc DEPS naive_executor elementwise_add_op)
endif()
......
......@@ -113,15 +113,8 @@ set(IR_PASS_DEPS graph_viz_pass multi_devices_graph_pass
if(NOT APPLE AND NOT WIN32 AND WITH_GPU)
set(IR_PASS_DEPS ${IR_PASS_DEPS} fusion_group_pass)
endif()
if(WITH_NGRAPH)
set(IR_PASS_DEPS ${IR_PASS_DEPS} ngraph)
endif()
cc_library(build_strategy SRCS build_strategy.cc DEPS pass_builder ${IR_PASS_DEPS})
if (WITH_MKLDNN)
target_link_libraries(build_strategy mkldnn_placement_pass)
endif()
if (WITH_NGRAPH)
target_link_libraries(build_strategy ngraph_subgraph_pass)
endif()
......@@ -27,7 +27,6 @@ limitations under the License. */
#include "paddle/fluid/framework/ir/multi_devices_graph_pass/multi_devices_graph_pass.h"
DECLARE_bool(use_mkldnn);
DECLARE_bool(use_ngraph);
namespace paddle {
namespace framework {
......@@ -60,8 +59,6 @@ class ParallelExecutorPassBuilder : public ir::PassBuilder {
"sequential_execution_pass");
AppendPassWithCheck(strategy_.sync_batch_norm_, "sync_batch_norm_pass");
AppendPassToUseNgraph("ngraph_subgraph_pass");
AppendOpFusePasses();
AppendPrintGraphPass("graph_viz_pass", "_fused_graph");
......@@ -277,23 +274,6 @@ class ParallelExecutorPassBuilder : public ir::PassBuilder {
#endif
}
void AppendPassToUseNgraph(const std::string &pass_name) {
#ifdef PADDLE_WITH_NGRAPH
if (FLAGS_use_ngraph) {
if (strategy_.reduce_ != BuildStrategy::ReduceStrategy::kAllReduce) {
LOG(WARNING) << "Currently ngraph_subgraph_pass works under AllReduce,"
"please set FLAGS_use_ngraph=false.";
} else {
AppendPass(pass_name);
}
}
#else
PADDLE_ENFORCE_NE(FLAGS_use_ngraph, true,
platform::errors::PreconditionNotMet(
"Please compile with NGRAPH first to use NGRAPH"));
#endif
}
private:
BuildStrategy strategy_;
};
......@@ -451,9 +431,6 @@ USE_PASS(add_reader_dependency_pass);
#ifdef PADDLE_WITH_MKLDNN
USE_PASS(mkldnn_placement_pass);
#endif
#ifdef PADDLE_WITH_NGRAPH
USE_PASS(ngraph_subgraph_pass);
#endif
#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32) && !defined(__APPLE__)
USE_PASS(fusion_group_pass);
#endif
......@@ -37,13 +37,8 @@ limitations under the License. */
#include "paddle/fluid/platform/place.h"
#include "paddle/fluid/platform/profiler.h"
#ifdef PADDLE_WITH_NGRAPH
#include "paddle/fluid/operators/ngraph/ngraph_engine.h"
#endif
DECLARE_bool(benchmark);
DEFINE_bool(use_mkldnn, false, "Use MKLDNN to run");
DEFINE_bool(use_ngraph, false, "Use NGRAPH to run");
namespace paddle {
namespace framework {
......@@ -59,17 +54,6 @@ ExecutorPrepareContext::ExecutorPrepareContext(
void ExecutorPrepareContext::PrepareUnusedVars(
const std::vector<std::string>& keep_vars, bool force_disable_gc) {
#ifdef PADDLE_WITH_NGRAPH
if (FLAGS_use_ngraph) {
// FIXME(zjl): There is difference when ngraph and gc are both enabled
// in unittests. I do not know why it happens. Maybe ngraph engine
// would cache some variables?
LOG_FIRST_N(WARNING, 1)
<< "FLAGS_use_ngraph=True, garbage collection strategy is "
"disabled in Executor";
force_disable_gc = true;
}
#endif
// If gc is enabled and block size > 1
if (prog_.Size() > 1) {
operators::PrepareSafeEagerDeletionOnConditionalOpAndConditionalGradOp(
......@@ -375,12 +359,6 @@ std::unique_ptr<ExecutorPrepareContext> Executor::Prepare(
for (auto& op_desc : block.AllOps()) {
ctx->ops_.push_back(OpRegistry::CreateOp(*op_desc));
}
#ifdef PADDLE_WITH_NGRAPH
if (FLAGS_use_ngraph && ctx->block_id_ == 0) {
paddle::operators::NgraphEngine::FuseNgraphOps(
ctx->prog_.Block(ctx->block_id_), &ctx->ops_);
}
#endif
ctx->PrepareUnusedVars(skip_ref_cnt_vars, force_disable_gc);
return ctx;
}
......
......@@ -98,14 +98,6 @@ if(WITH_MKLDNN)
pass_library(cpu_quantize_squash_pass inference DIR mkldnn)
endif()
if(WITH_NGRAPH)
cc_library(ngraph_subgraph_pass SRCS ngraph_subgraph_pass.cc DEPS ngraph_bridge
subgraph_detector fuse_pass_base ${op_library_DEPS})
set(pass_file ${PADDLE_BINARY_DIR}/paddle/fluid/inference/api/paddle_inference_pass.h)
file(APPEND ${pass_file} "USE_PASS(ngraph_subgraph_pass);\n")
set(INFER_IR_PASSES ${INFER_IR_PASSES} ngraph_subgraph_pass CACHE INTERNAL "")
endif()
cc_library(fuse_bn_act_pass SRCS fuse_bn_act_pass.cc DEPS pass graph_pattern_detector )
cc_library(fuse_elewise_add_act_pass SRCS fuse_elewise_add_act_pass.cc DEPS pass graph_pattern_detector )
cc_library(fuse_relu_depthwise_conv_pass SRCS fuse_relu_depthwise_conv_pass.cc DEPS pass graph_pattern_detector )
......
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <set>
#include <string>
#include <unordered_set>
#include <vector>
#include "paddle/fluid/framework/ir/graph_helper.h"
#include "paddle/fluid/framework/ir/graph_pattern_detector.h"
#include "paddle/fluid/framework/ir/ngraph_subgraph_pass.h"
#include "paddle/fluid/framework/ir/subgraph_detector.h"
#include "paddle/fluid/operators/ngraph/ngraph_bridge.h"
#include "paddle/fluid/platform/enforce.h"
#include "paddle/fluid/string/pretty_log.h"
namespace paddle {
namespace framework {
namespace ir {
std::string GenerateEngineKey(const std::set<std::string> &engine_inputs,
const std::set<std::string> &engine_outputs,
const std::string &size) {
std::string engine_hash_key = "";
for (auto name : engine_inputs) {
engine_hash_key += name;
}
for (auto name : engine_outputs) {
engine_hash_key += name;
}
engine_hash_key += size;
auto engine_key = std::to_string(std::hash<std::string>()(engine_hash_key));
return engine_key;
}
void NgraphSubgraphPass::ApplyImpl(Graph *graph) const {
PADDLE_ENFORCE_NOT_NULL(graph);
FusePassBase::Init("ngraph_subgraph_pass", graph);
std::unordered_set<Node *> nodes2delete;
auto teller = [](const Node *node) {
if (!node->IsOp() || !node->Op()) return false;
auto op_type = node->Op()->Type();
return !paddle::operators::NgraphBridge::isRegister(op_type);
};
SubGraphFuser fuser(graph, teller, 0, "ngraph_engine");
fuser();
for (auto *node : graph->Nodes()) {
if (node->IsOp() && !Agent(node).subgraph()->empty()) {
OpDesc *op_desc = node->Op();
op_desc->SetType("ngraph_engine");
CreateNgraphEngineOp(node, graph);
std::unordered_set<const Node *> nodes2remove(
Agent(node).subgraph()->begin(), Agent(node).subgraph()->end());
GraphSafeRemoveNodes(graph, nodes2remove);
}
}
std::unordered_set<const Node *> nodes2remove;
for (auto *node : graph->Nodes()) {
if (node->IsOp() && Agent(node).deleted()) {
nodes2remove.insert(node);
}
}
framework::ir::GraphSafeRemoveNodes(graph, nodes2remove);
// std::vector<ir::Node *> nodes = ir::TopologySortOperations(*graph);
}
bool IsValid(std::string name) {
return name.find(Node::kControlDepVarName) == std::string::npos;
}
void UpdateNgraphIO(Node *node, Graph *graph,
std::vector<std::string> *input_names,
std::vector<std::string> *output_names) {
bool is_test = true, has_fetch = false;
for (Node *node : graph->Nodes()) {
if (node->IsOp() && node->Name().find("_grad") != std::string::npos) {
is_test = false;
}
if (node->IsVar() && node->Var()) {
for (auto out : node->outputs) {
if (out->Name() == "fetch") has_fetch = true;
}
}
}
if (is_test && has_fetch) {
for (auto *x : node->inputs) {
(*input_names).emplace_back(x->Name());
}
for (auto *x : node->outputs) {
(*output_names).emplace_back(x->Name());
}
return;
}
auto &subgraph = *Agent(node).subgraph();
std::unordered_set<std::string> inputs;
std::unordered_set<std::string> outputs;
for (auto *node : subgraph) {
for (auto in : node->inputs) {
auto name = in->Name();
if (!IsValid(name)) continue;
if (!outputs.count(name) && !inputs.count(name)) {
(*input_names).emplace_back(name);
inputs.insert(name);
}
}
for (auto out : node->outputs) {
auto name = out->Name();
if (!IsValid(name)) continue;
outputs.insert(name);
(*output_names).emplace_back(name);
}
}
}
void NgraphSubgraphPass::CreateNgraphEngineOp(Node *node, Graph *graph) const {
auto &subgraph = *Agent(node).subgraph();
PADDLE_ENFORCE_NE(subgraph.empty(), true, "subgraph cannot be empty");
framework::proto::BlockDesc block_proto;
framework::BlockDesc block_desc(nullptr, &block_proto);
block_desc.Proto()->set_parent_idx(-1);
block_desc.Proto()->set_idx(0);
for (auto *node : subgraph) {
auto *op = block_desc.AppendOp();
*op->Proto() = *node->Op()->Proto();
}
auto *vars = block_desc.Proto()->mutable_vars();
for (Node *node : graph->Nodes()) {
if (node->IsVar() && node->Var()) {
*vars->Add() = *node->Var()->Proto();
}
}
PADDLE_ENFORCE_NE(block_desc.Proto()->vars().empty(), true,
"the block has no var-desc");
std::vector<std::string> input_names;
std::vector<std::string> output_names;
UpdateNgraphIO(node, graph, &input_names, &output_names);
auto *op_desc = node->Op();
op_desc->SetInput(
"Xs", std::vector<std::string>(input_names.begin(), input_names.end()));
op_desc->SetOutput(
"Ys", std::vector<std::string>(output_names.begin(), output_names.end()));
int sgs = subgraph.size();
std::string subgraph_str = block_desc.Proto()->SerializeAsString();
std::string engine_key =
std::to_string(std::hash<std::string>()(subgraph_str));
std::vector<int> interval{0, sgs};
op_desc->SetType("ngraph_engine");
op_desc->SetAttr("interval", interval);
op_desc->SetAttr("graph", subgraph_str);
op_desc->SetAttr("engine_key", engine_key);
op_desc->SetAttr("op_role", 0);
}
} // namespace ir
} // namespace framework
} // namespace paddle
REGISTER_PASS(ngraph_subgraph_pass, paddle::framework::ir::NgraphSubgraphPass);
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include "paddle/fluid/framework/ir/fuse_pass_base.h"
#include "paddle/fluid/framework/ir/graph.h"
#include "paddle/fluid/framework/ir/graph_pattern_detector.h"
#include "paddle/fluid/framework/ir/pass.h"
namespace paddle {
namespace framework {
namespace ir {
/*
* Fuse supported ops to a NgraphEngineOp.
*/
class NgraphSubgraphPass : public FusePassBase {
public:
void ApplyImpl(ir::Graph *graph) const override;
virtual ~NgraphSubgraphPass() {}
private:
void CreateNgraphEngineOp(framework::ir::Node *x,
framework::ir::Graph *graph) const;
};
} // namespace ir
} // namespace framework
} // namespace paddle
......@@ -21,8 +21,6 @@ limitations under the License. */
#include "paddle/fluid/framework/ir/graph_pattern_detector.h"
#include "paddle/fluid/framework/ir/node.h"
DECLARE_bool(use_ngraph);
namespace paddle {
namespace framework {
namespace ir {
......@@ -398,11 +396,6 @@ void RemoveIntermediateOutputInSubgraph(const std::vector<Node *> &subgraph,
}
}
// In use for ngraph subgraph pass for parallel executor,
// this will remove all nodes, bypass this and let ngraph
// subgraph pass to process outputs
if (FLAGS_use_ngraph && valid_output.size() == 0) return;
outputs->assign(valid_output.begin(), valid_output.end());
}
......
......@@ -32,8 +32,6 @@ limitations under the License. */
#include "paddle/fluid/framework/ir/memory_optimize_pass/reference_count_pass_helper.h"
#include "paddle/fluid/platform/profiler.h"
DECLARE_bool(use_ngraph);
DECLARE_double(eager_delete_tensor_gb);
#ifdef WITH_GPERFTOOLS
......@@ -286,13 +284,6 @@ bool ParallelExecutorPrivate::AllowPartialFeed() const {
}
ir::Graph *ParallelExecutorPrivate::ApplyMemoryOptimizePass(ir::Graph *graph) {
if (FLAGS_use_ngraph) {
LOG_FIRST_N(WARNING, 1)
<< "FLAGS_use_ngraph=True, memory optimization strategy is "
"disabled in ParallelExecutor";
return graph;
}
/**
* NOTE(zengjinle): If BuildStrategy.memory_optimize = None in Python,
* set BuildStrategy.memory_optimize according to whether gc is enabled.
......
......@@ -33,7 +33,6 @@ DEFINE_bool(enable_unused_var_check, false,
// not in cpu kernel;
// 1: the inputs of which are used to indicate dtype of outputs;
// 2: the inputs of which are used in fused operators.
// 3: specical operators, like ngraph_engine.
// The category number is presented in the comments after each operator.
const std::unordered_set<std::string> op_has_unsed_vars_white_list = {
......@@ -54,8 +53,7 @@ const std::unordered_set<std::string> op_has_unsed_vars_white_list = {
"precision_recall", // 1
"fusion_seqpool_cvm_concat", // 2
"fused_batch_norm_act", // 2
"fused_batch_norm_act_grad", // 2
"ngraph_engine", // 3
"fused_batch_norm_act_grad" // 2
};
namespace paddle {
......
......@@ -136,10 +136,6 @@ void IRPassManager::CreatePasses(Argument *argument,
pass->Set("disable_trt_plugin_fp16",
new bool(argument->disable_trt_plugin_fp16()));
}
if (pass_name == "ngraph_subgraph_pass") {
pass->Set("program",
new framework::ProgramDesc *(&argument->main_program()));
}
if (pass_name == "lite_subgraph_pass") {
bool enable_int8 =
argument->lite_precision_mode() == AnalysisConfig::Precision::kInt8;
......
......@@ -28,11 +28,7 @@ if(WITH_MKLDNN)
endif()
cc_library(analysis_config SRCS analysis_config.cc DEPS ${mkldnn_quantizer_cfg} lod_tensor paddle_pass_builder)
if(WITH_NGRAPH)
cc_library(paddle_pass_builder SRCS paddle_pass_builder.cc DEPS ngraph)
else(WITH_NGRAPH)
cc_library(paddle_pass_builder SRCS paddle_pass_builder.cc)
endif(WITH_NGRAPH)
cc_library(paddle_pass_builder SRCS paddle_pass_builder.cc)
cc_library(paddle_inference_api SRCS api.cc api_impl.cc helper.cc DEPS lod_tensor scope reset_tensor_array
analysis_config zero_copy_tensor trainer_desc_proto)
......@@ -43,10 +39,6 @@ if(WITH_GPU AND TENSORRT_FOUND)
set(inference_deps ${inference_deps} tensorrt_engine tensorrt_converter)
endif()
if(WITH_NGRAPH)
set(inference_deps ${inference_deps} ngraph)
endif()
cc_library(analysis_predictor SRCS analysis_predictor.cc ${mkldnn_quantizer_src} DEPS ${inference_deps}
zero_copy_tensor ir_pass_manager op_compatible_info)
......
......@@ -116,8 +116,6 @@ AnalysisConfig::AnalysisConfig(const AnalysisConfig &other) {
CP_MEMBER(tensorrt_precision_mode_);
CP_MEMBER(trt_use_static_engine_);
CP_MEMBER(trt_use_calib_mode_);
// NGRAPH related.
CP_MEMBER(use_ngraph_);
// MKLDNN related.
CP_MEMBER(use_mkldnn_);
CP_MEMBER(mkldnn_enabled_op_types_);
......@@ -208,16 +206,6 @@ void AnalysisConfig::EnableMkldnnQuantizer() {
Update();
}
void AnalysisConfig::EnableNgraph() {
#ifdef PADDLE_WITH_NGRAPH
pass_builder()->EnableNgraph();
use_ngraph_ = true;
#else
LOG(ERROR) << "Please compile with NGRAPH first to use NGRAPH";
use_ngraph_ = false;
#endif
}
MkldnnQuantizerConfig *AnalysisConfig::mkldnn_quantizer_config() const {
PADDLE_ENFORCE_NOT_NULL(mkldnn_quantizer_config_,
"MkldnnQuantizer was not enabled yet.");
......@@ -305,20 +293,6 @@ void AnalysisConfig::Update() {
#endif
}
if (use_ngraph_) {
if (!enable_ir_optim_) {
LOG(ERROR)
<< "EnableNgraph() only works when IR optimization is enabled.";
}
#ifdef PADDLE_WITH_NGRAPH
pass_builder()->EnableNgraph();
use_ngraph_ = true;
#else
LOG(ERROR) << "Please compile with NGRAPH first to use NGRAPH";
use_ngraph_ = false;
#endif
}
if (use_mkldnn_) {
#ifdef PADDLE_WITH_MKLDNN
if (!enable_ir_optim_) {
......@@ -387,8 +361,6 @@ std::string AnalysisConfig::SerializeInfoCache() {
ss << enable_memory_optim_;
ss << use_ngraph_;
ss << use_mkldnn_;
ss << mkldnn_cache_capacity_;
for (auto &item : mkldnn_enabled_op_types_) ss << item;
......
......@@ -80,16 +80,6 @@ if (NOT WIN32)
endif()
endif(NOT WIN32)
if (NOT WIN32)
set(NGRAPH_PATH "${PADDLE_LIB_THIRD_PARTY_PATH}ngraph")
if(EXISTS ${NGRAPH_PATH})
include(GNUInstallDirs)
include_directories("${NGRAPH_PATH}/include")
link_directories("${NGRAPH_PATH}/${CMAKE_INSTALL_LIBDIR}")
set(NGRAPH_LIB ${NGRAPH_PATH}/${CMAKE_INSTALL_LIBDIR}/libngraph${CMAKE_SHARED_LIBRARY_SUFFIX})
endif()
endif()
if(WITH_MKL)
set(MATH_LIB_PATH "${PADDLE_LIB_THIRD_PARTY_PATH}mklml")
include_directories("${MATH_LIB_PATH}/include")
......@@ -132,7 +122,7 @@ endif()
if (NOT WIN32)
set(EXTERNAL_LIB "-lrt -ldl -lpthread")
set(DEPS ${DEPS}
${MATH_LIB} ${MKLDNN_LIB} ${NGRAPH_LIB}
${MATH_LIB} ${MKLDNN_LIB}
glog gflags protobuf xxhash
${EXTERNAL_LIB})
else()
......
......@@ -339,18 +339,6 @@ struct AnalysisConfig {
///
void SwitchIrDebug(int x = true);
///
/// \brief Turn on NGRAPH.
///
///
void EnableNgraph();
///
/// \brief A boolean state telling whether to use the NGRAPH.
///
/// \return bool Whether to use the NGRAPH.
///
bool ngraph_enabled() const { return use_ngraph_; }
///
/// \brief Turn on MKLDNN.
///
......@@ -548,7 +536,6 @@ struct AnalysisConfig {
// memory reuse related.
bool enable_memory_optim_{false};
bool use_ngraph_{false};
bool use_mkldnn_{false};
std::unordered_set<std::string> mkldnn_enabled_op_types_;
......
......@@ -143,10 +143,6 @@ void GpuPassStrategy::EnableMkldnnQuantizer() {
LOG(ERROR) << "GPU not support MKL-DNN quantization";
}
void GpuPassStrategy::EnableNgraph() {
LOG(ERROR) << "GPU not support Ngraph yet";
}
CpuPassStrategy::CpuPassStrategy() : PassStrategy({}) {
// NOTE the large fusions should be located in the front, so that they will
// not be damaged by smaller ones.
......@@ -224,14 +220,4 @@ void CpuPassStrategy::EnableMkldnnQuantizer() {
#endif
}
void CpuPassStrategy::EnableNgraph() {
#ifdef PADDLE_WITH_NGRAPH
if (!use_ngraph_) {
passes_.insert(passes_.begin(), "ngraph_subgraph_pass");
}
use_ngraph_ = true;
#else
use_ngraph_ = false;
#endif
}
} // namespace paddle
......@@ -94,10 +94,6 @@ class PassStrategy : public PaddlePassBuilder {
*/
virtual void EnableMKLDNN() {}
/** Enable NGRAPH optimization
*/
virtual void EnableNgraph() {}
/** Enable MKLDNN quantize optimization
*/
virtual void EnableMkldnnQuantizer() {}
......@@ -107,7 +103,6 @@ class PassStrategy : public PaddlePassBuilder {
virtual ~PassStrategy() = default;
protected:
bool use_ngraph_{false};
bool use_gpu_{false};
bool use_mkldnn_{false};
};
......@@ -121,7 +116,6 @@ class CpuPassStrategy : public PassStrategy {
explicit CpuPassStrategy(const CpuPassStrategy &other)
: PassStrategy(other.AllPasses()) {
use_gpu_ = other.use_gpu_;
use_ngraph_ = other.use_ngraph_;
use_mkldnn_ = other.use_mkldnn_;
use_mkldnn_quantizer_ = other.use_mkldnn_quantizer_;
}
......@@ -129,12 +123,10 @@ class CpuPassStrategy : public PassStrategy {
virtual ~CpuPassStrategy() = default;
void EnableCUDNN() override;
void EnableNgraph() override;
void EnableMKLDNN() override;
void EnableMkldnnQuantizer() override;
protected:
bool use_ngraph_{false};
bool use_mkldnn_quantizer_{false};
};
......@@ -151,7 +143,6 @@ class GpuPassStrategy : public PassStrategy {
}
void EnableCUDNN() override;
void EnableNgraph() override;
void EnableMKLDNN() override;
void EnableMkldnnQuantizer() override;
......
......@@ -215,11 +215,6 @@ typedef struct PD_MaxInputShape {
PADDLE_CAPI_EXPORT extern void PD_SwitchIrDebug(PD_AnalysisConfig* config,
bool x);
PADDLE_CAPI_EXPORT extern void PD_EnableNgraph(PD_AnalysisConfig* config);
PADDLE_CAPI_EXPORT extern bool PD_NgraphEnabled(
const PD_AnalysisConfig* config);
PADDLE_CAPI_EXPORT extern void PD_EnableMKLDNN(PD_AnalysisConfig* config);
PADDLE_CAPI_EXPORT extern void PD_SetMkldnnCacheCapacity(
......
......@@ -171,16 +171,6 @@ void PD_SwitchIrDebug(PD_AnalysisConfig* config, bool x) {
config->config.SwitchIrDebug(x);
}
void PD_EnableNgraph(PD_AnalysisConfig* config) {
PADDLE_ENFORCE_NOT_NULL(config);
config->config.EnableNgraph();
}
bool PD_NgraphEnabled(const PD_AnalysisConfig* config) {
PADDLE_ENFORCE_NOT_NULL(config);
return config->config.ngraph_enabled();
}
void PD_EnableMKLDNN(PD_AnalysisConfig* config) {
PADDLE_ENFORCE_NOT_NULL(config);
config->config.EnableMKLDNN();
......
......@@ -150,7 +150,7 @@ void SetConfig(AnalysisConfig *config) {
config->DisableFCPadding();
}
void profile(bool use_mkldnn = false, bool use_ngraph = false) {
void profile(bool use_mkldnn = false) {
AnalysisConfig config;
SetConfig(&config);
......@@ -158,10 +158,6 @@ void profile(bool use_mkldnn = false, bool use_ngraph = false) {
config.EnableMKLDNN();
}
if (use_ngraph) {
config.EnableNgraph();
}
std::vector<std::vector<PaddleTensor>> outputs;
std::vector<std::vector<PaddleTensor>> inputs;
LoadInputData(&inputs);
......@@ -171,11 +167,7 @@ void profile(bool use_mkldnn = false, bool use_ngraph = false) {
TEST(Analyzer_bert, profile) { profile(); }
#ifdef PADDLE_WITH_MKLDNN
TEST(Analyzer_bert, profile_mkldnn) { profile(true, false); }
#endif
#ifdef PADDLE_WITH_NGRAPH
TEST(Analyzer_bert, profile_ngraph) { profile(false, true); }
TEST(Analyzer_bert, profile_mkldnn) { profile(true); }
#endif
// Check the fuse status
......@@ -190,17 +182,13 @@ TEST(Analyzer_bert, fuse_statis) {
}
// Compare result of NativeConfig and AnalysisConfig
void compare(bool use_mkldnn = false, bool use_ngraph = false) {
void compare(bool use_mkldnn = false) {
AnalysisConfig cfg;
SetConfig(&cfg);
if (use_mkldnn) {
cfg.EnableMKLDNN();
}
if (use_ngraph) {
cfg.EnableNgraph();
}
std::vector<std::vector<PaddleTensor>> inputs;
LoadInputData(&inputs);
CompareNativeAndAnalysis(
......@@ -209,15 +197,7 @@ void compare(bool use_mkldnn = false, bool use_ngraph = false) {
TEST(Analyzer_bert, compare) { compare(); }
#ifdef PADDLE_WITH_MKLDNN
TEST(Analyzer_bert, compare_mkldnn) {
compare(true, false /* use_mkldnn, no use_ngraph */);
}
#endif
#ifdef PADDLE_WITH_NGRAPH
TEST(Analyzer_bert, compare_ngraph) {
compare(false, true /* no use_mkldnn, use_ngraph */);
}
TEST(Analyzer_bert, compare_mkldnn) { compare(true /* use_mkldnn */); }
#endif
// Compare Deterministic result
......
......@@ -58,9 +58,6 @@ TEST(PD_AnalysisConfig, use_gpu) {
false);
bool trt_enable = PD_TensorrtEngineEnabled(config);
CHECK(trt_enable) << "NO";
PD_EnableNgraph(config);
bool ngraph_enable = PD_NgraphEnabled(config);
LOG(INFO) << ngraph_enable << " Ngraph";
PD_EnableMemoryOptim(config);
bool memory_optim_enable = PD_MemoryOptimEnabled(config);
CHECK(memory_optim_enable) << "NO";
......
......@@ -78,8 +78,6 @@ std::ostream &operator<<(std::ostream &os, const AnalysisConfig &config) {
<< "use_tensorrt: " << config.tensorrt_engine_enabled() << "\n";
os << GenSpaces(num_spaces) << "use_mkldnn: " << config.mkldnn_enabled()
<< "\n";
os << GenSpaces(num_spaces) << "use_ngraph: " << config.ngraph_enabled()
<< "\n";
num_spaces--;
os << GenSpaces(num_spaces) << "}\n";
return os;
......
......@@ -12,7 +12,6 @@ add_subdirectory(detection)
add_subdirectory(elementwise)
add_subdirectory(fused)
add_subdirectory(metrics)
add_subdirectory(ngraph)
add_subdirectory(optimizers)
add_subdirectory(reduce_ops)
add_subdirectory(sequence_ops)
......
if(WITH_NGRAPH)
cc_library(ngraph_bridge SRCS ngraph_bridge.cc DEPS operator framework_proto ngraph)
cc_library(ngraph_engine SRCS ngraph_engine.cc DEPS ngraph_bridge framework_proto)
op_library(ngraph_engine_op DEPS ngraph_engine op_registry op_info device_context)
add_subdirectory(ops)
endif()
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <algorithm>
#include <functional>
#include <memory>
#include <unordered_set>
#include <vector>
#include "ngraph/ngraph.hpp"
#include "paddle/fluid/operators/ngraph/ngraph_bridge.h"
#include "paddle/fluid/operators/ngraph/ngraph_ops.h"
#include "paddle/fluid/operators/ngraph/ops/op_bridge.h"
#include "paddle/fluid/platform/enforce.h"
#include "paddle/fluid/platform/ngraph_helper.h"
constexpr int64_t kNoPadding = -1;
namespace paddle {
namespace operators {
bool NgraphBridge::isRegister(const std::string& str) {
return ops::NgraphSingleton::Lookup(str);
}
bool NgraphBridge::isSupported(
const std::unique_ptr<framework::OperatorBase>& op) {
static std::unordered_set<std::string> skip_op_list{
"reshape", "reshape2", "lookup_table", "lookup_table_grad"};
bool result = true;
auto& op_type = op->Type();
auto op_attrs = paddle::framework::AttrReader(op->Attrs());
if (!isRegister(op_type)) {
if (skip_op_list.count(op_type)) {
if (op_type == "lookup_table" || op_type == "lookup_table_grad") {
if (op_attrs.Get<bool>("is_sparse")) {
result = false;
}
} else if ((op_type == "reshape") || (op_type == "reshape2")) {
if (op->Input("Shape") != paddle::framework::kEmptyVarName) {
result = false;
}
} else {
result = false;
}
}
} else {
result = false;
}
return result;
}
void NgraphBridge::BuildNgNode(
const std::shared_ptr<framework::OperatorBase>& op) {
auto& op_type = op->Type();
ops::NgraphSingleton::BuildNode(ngb_node_map_, op, op_type);
}
} // namespace operators
} // namespace paddle
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <algorithm>
#include <map>
#include <memory>
#include <string>
#include <unordered_map>
#include "ngraph/node.hpp"
#include "paddle/fluid/framework/operator.h"
namespace paddle {
namespace operators {
class NgraphBridge {
public:
explicit NgraphBridge(
std::shared_ptr<
std::unordered_map<std::string, std::shared_ptr<ngraph::Node>>>
var_node_map)
: ngb_node_map_(var_node_map) {}
void BuildNgNode(const std::shared_ptr<framework::OperatorBase>& op);
static bool isRegister(const std::string& str);
static bool isSupported(const std::unique_ptr<framework::OperatorBase>& op);
private:
std::shared_ptr<
std::unordered_map<std::string, std::shared_ptr<ngraph::Node>>>
ngb_node_map_;
};
} // namespace operators
} // namespace paddle
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <list>
#include <memory>
#include <mutex> //NOLINT
#include <set>
#include <string>
#include <unordered_map>
#include <unordered_set>
#include <utility>
#include <vector>
#include "paddle/fluid/framework/operator.h"
#include "paddle/fluid/framework/program_desc.h"
#include "paddle/fluid/framework/var_desc.h"
#include "ngraph/ngraph.hpp"
namespace paddle {
namespace operators {
// cache engine repetitives
struct EngineCache {
std::shared_ptr<ngraph::runtime::Executable> ngraph_handle = nullptr;
std::shared_ptr<ngraph::runtime::Backend> ngraph_backend = nullptr;
std::set<std::string> persistables;
std::vector<std::string> var_in;
std::vector<std::string> var_out;
std::vector<size_t> var_in_updates;
bool is_test = true;
};
template <class T, class Engine, int separator = 0>
class NgraphThreadCache {
public:
typedef decltype(Engine::getMutex()) mutex_type;
typedef std::lock_guard<mutex_type> guard_type;
typedef T& ref_type;
enum class type_of_thread { unknown, forward, backward };
template <class S>
struct MetaInfo {
std::thread::id owner_tid; // owner of the cache, future use;
type_of_thread worker_type; // future use
S real_content;
MetaInfo()
: owner_tid{std::this_thread::get_id()},
worker_type{type_of_thread::unknown} {}
};
typedef std::unique_ptr<MetaInfo<T>> content_type;
typedef std::list<content_type> storage_type;
protected:
static storage_type l;
static mutex_type getMutex() { return Engine::getMutex(); }
static void remove_from_list(const T* raw_ptr) {
guard_type guard(getMutex());
l.remove_if([raw_ptr](const content_type& sh) {
return &(sh->real_content) == raw_ptr;
});
}
template <class TRaw>
struct TLSDescriptor {
TRaw* raw_ptr;
TLSDescriptor() : raw_ptr{nullptr} {}
~TLSDescriptor() {
// if thread die
NgraphThreadCache::remove_from_list(raw_ptr);
/* TODO : Parallel executor swap */
// FastMultiThreadCache::keep_alive_for_backward_thread(raw_ptr);
}
};
public:
NgraphThreadCache() = delete;
NgraphThreadCache(const NgraphThreadCache& copy) = delete;
static T& fetch() {
thread_local TLSDescriptor<T> tls;
if (!tls.raw_ptr) {
using elem_type = typename content_type::element_type;
content_type _p(new elem_type());
if (!_p) PADDLE_THROW("Cannot alloc memory for thread-cache ");
guard_type guard(getMutex());
l.push_back(std::move(_p));
tls.raw_ptr = &l.back()->real_content;
}
return *(tls.raw_ptr);
}
auto getSize() -> decltype(l.size()) {
guard_type guard(getMutex());
return l.size();
}
template <class F>
void for_each_cache(F f) {
guard_type guard(getMutex());
std::for_each(l.begin(), l.end(), f);
}
};
template <class T, class Engine, int separator>
typename NgraphThreadCache<T, Engine, separator>::storage_type
NgraphThreadCache<T, Engine, separator>::l;
// perform graph build through bridge and execute computation
class NgraphEngine {
public:
explicit NgraphEngine(const framework::Scope& scope,
const platform::Place& place,
const framework::ExecutionContext& ctx);
void Run(const framework::Scope& scope, const platform::Place& place) const;
static std::vector<std::string> feed_vars;
static void FuseNgraphOps(
const framework::BlockDesc& prog,
std::vector<std::unique_ptr<framework::OperatorBase>>* ops);
static std::recursive_mutex& getMutex() {
static std::recursive_mutex mx;
return mx;
}
private:
template <class T>
using ThCache =
NgraphThreadCache<std::unordered_map<std::string, T>, NgraphEngine>;
using main_engine_cache = ThCache<EngineCache>;
using main_t_in_cache =
ThCache<std::vector<std::shared_ptr<ngraph::runtime::Tensor>>>;
const framework::Scope& scope_;
const platform::Place& place_;
std::vector<std::shared_ptr<framework::OperatorBase>> fused_ops_;
std::unordered_map<std::string, ngraph::element::Type> var_type_map_;
std::set<std::string> persistables_;
std::unordered_set<std::string> post_op_inputs_;
// it is test for a single run, it can be a validation during training
bool is_test_{true};
// inference only. eg. CAPI inference
bool is_inference_{false};
std::string func_cache_key_;
// use a weak pointer to keep backend_ alive
// to avoid it to be destropyed too earlier
static std::weak_ptr<ngraph::runtime::Backend> wp_backend_;
// use mutex to keep it thread safe
static std::mutex ng_mutex_;
// ngraph backend eg. CPU
std::shared_ptr<ngraph::runtime::Backend> backend_;
// var_name of inputs
std::vector<std::string> var_in_;
// var_name of outputs from fetch in order
std::vector<std::string> var_out_;
// non-persitable var_in
std::vector<size_t> var_in_updates_;
// map input vars to nodes
std::shared_ptr<
std::unordered_map<std::string, std::shared_ptr<ngraph::Node>>>
var_in_node_map_;
// map each var name with a ngraph node
std::shared_ptr<
std::unordered_map<std::string, std::shared_ptr<ngraph::Node>>>
var_node_map_;
// prepare info for ngraph engine need
void Prepare(const framework::ExecutionContext& ctx);
// get ngraph engine input and output list
void BuildNgIO(const std::vector<framework::OpDesc*>& op_descs,
const std::vector<int>& interval);
// get ngraph input and define ngraph input parameters
void GetNgInputShape();
// Call ngraph bridge to map ops
void BuildNgNodes();
// build ngraph function call
std::shared_ptr<ngraph::Function> BuildNgFunction(
const framework::ExecutionContext& ctx);
// clear ngraph engine cache and t_in cache
void ClearNgCache();
// Check cache for ngraph function or otherwise build the function
void GetNgFunction(const framework::ExecutionContext& ctx);
};
} // namespace operators
} // namespace paddle
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <string>
#include "paddle/fluid/framework/block_desc.h"
#include "paddle/fluid/framework/op_desc.h"
#include "paddle/fluid/framework/op_info.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/ngraph/ngraph_engine_op.h"
namespace paddle {
namespace operators {
class NgraphEngineOpMaker : public framework::OpProtoAndCheckerMaker {
public:
void Make() override {
AddInput("Xs", "A list of inputs.").AsDispensable();
AddOutput("Ys", "A list of outputs").AsDispensable();
AddAttr<std::string>("graph", "the graph.");
AddAttr<std::string>("engine_key", "the engine hash key.");
AddAttr<std::vector<int>>("interval", "op interval supported by ngraph");
AddComment("ngraph engine operator.");
}
};
class NgraphEngineInferVarType : public framework::VarTypeInference {
public:
void operator()(framework::InferVarTypeContext *ctx) const override {}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OPERATOR(ngraph_engine, ops::NgraphEngineOp, ops::NgraphEngineOpMaker);
REGISTER_OP_CPU_KERNEL(
ngraph_engine,
ops::NgraphEngineKernel<paddle::platform::CPUDeviceContext, float>);
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <string>
#include <vector>
#include "paddle/fluid/framework/scope.h"
#include "paddle/fluid/operators/ngraph/ngraph_engine.h"
#include "paddle/fluid/platform/device_context.h"
#include "paddle/fluid/platform/place.h"
namespace paddle {
namespace operators {
class NgraphEngineOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
protected:
void InferShape(framework::InferShapeContext* ctx) const override {}
framework::OpKernelType GetExpectedKernelType(
const framework::ExecutionContext& ctx) const override {
framework::OpKernelType kt = framework::OpKernelType(
framework::proto::VarType::FP32, platform::CPUPlace());
return kt;
}
};
template <typename DeviceContext, typename T>
class NgraphEngineKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto& scope = ctx.scope();
auto place = ctx.GetPlace();
NgraphEngine ngraph_engine(scope, place, ctx);
ngraph_engine.Run(scope, place);
}
};
} // namespace operators
} // namespace paddle
file(GLOB LIST_OPS RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}" "*.h")
set(pass_file ${PADDLE_BINARY_DIR}/paddle/fluid/operators/ngraph/ngraph_ops.h)
file(APPEND ${pass_file} "\#pragma once\n")
file(WRITE ${pass_file} "// Generated by the /paddle/fluid/operators/ngraph/ops/CMakeLists.txt. DO NOT EDIT!\n\n")
foreach(OPS_NAME ${LIST_OPS})
file(APPEND ${pass_file} "\#include \"paddle/fluid/operators/ngraph/ops/${OPS_NAME}\"\n")
endforeach(OPS_NAME)
/*Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <memory>
#include <string>
#include <unordered_map>
#include <vector>
#include "ngraph/ngraph.hpp"
#include "paddle/fluid/operators/ngraph/ops/op_bridge.h"
#include "paddle/fluid/platform/ngraph_helper.h"
namespace paddle {
namespace operators {
namespace ngraphs {
void BuildAccuracyNode(
const std::shared_ptr<framework::OperatorBase>& op,
std::shared_ptr<
std::unordered_map<std::string, std::shared_ptr<ngraph::Node>>>
ngb_node_map) {
auto indices = platform::GetInputNode(op, "Indices", ngb_node_map);
auto label = platform::GetInputNode(op, "Label", ngb_node_map);
auto inference = platform::GetInputNode(op, "Out", ngb_node_map);
auto inference_shape = inference->get_shape();
size_t num_samples = inference_shape.at(0);
size_t k = inference_shape.at(1);
std::shared_ptr<ngraph::Node> label_k = label;
if (k > 1) {
auto label_1d = std::make_shared<ngraph::op::Reshape>(
label, ngraph::AxisVector{0, 1}, ngraph::Shape{num_samples});
label_k = std::make_shared<ngraph::op::Broadcast>(label_1d, inference_shape,
ngraph::AxisSet{1});
}
auto node_equal = std::make_shared<ngraph::op::Equal>(indices, label_k);
auto node_eq_int =
std::make_shared<ngraph::op::Convert>(node_equal, ngraph::element::i64);
auto num_correct_0d =
std::make_shared<ngraph::op::Sum>(node_eq_int, ngraph::AxisSet{0, 1});
std::shared_ptr<ngraph::Node> num_correct =
platform::NgReshaper(num_correct_0d, ngraph::Shape{1});
std::shared_ptr<ngraph::Node> n_samples = ngraph::op::Constant::create(
ngraph::element::i64, ngraph::Shape{1}, {num_samples});
std::shared_ptr<ngraph::Node> accuracy = std::make_shared<ngraph::op::Divide>(
std::make_shared<ngraph::op::Convert>(num_correct, ngraph::element::f32),
std::make_shared<ngraph::op::Convert>(n_samples, ngraph::element::f32));
platform::SetOutputNode(op, "Accuracy", accuracy, ngb_node_map);
platform::SetOutputNode(op, "Correct", num_correct, ngb_node_map);
platform::SetOutputNode(op, "Total", n_samples, ngb_node_map);
}
} // namespace ngraphs
} // namespace operators
} // namespace paddle
REGISTER_NG_OP(accuracy, BuildAccuracyNode);
/*Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <memory>
#include <string>
#include <unordered_map>
#include "ngraph/ngraph.hpp"
#include "paddle/fluid/operators/ngraph/ops/op_bridge.h"
#include "paddle/fluid/platform/ngraph_helper.h"
namespace paddle {
namespace operators {
namespace ngraphs {
void BuildGeluNode(
const std::shared_ptr<framework::OperatorBase>& op,
std::shared_ptr<
std::unordered_map<std::string, std::shared_ptr<ngraph::Node>>>
ngb_node_map) {
auto input = platform::GetInputNode(op, "X", ngb_node_map);
auto half = paddle::platform::CreateConstant(input->get_element_type(),
input->get_shape(), {0.5});
auto one = paddle::platform::CreateConstant(input->get_element_type(),
input->get_shape(), {1});
auto sqrt_two =
std::make_shared<ngraph::op::Sqrt>(paddle::platform::CreateConstant(
input->get_element_type(), input->get_shape(), {2}));
auto out = half * input *
(one + std::make_shared<ngraph::op::Erf>(input / sqrt_two));
platform::SetOutputNode(op, "Out", out, ngb_node_map);
}
void BuildGeluGradNode(
const std::shared_ptr<framework::OperatorBase>& op,
std::shared_ptr<
std::unordered_map<std::string, std::shared_ptr<ngraph::Node>>>
ngb_node_map) {
auto input = platform::GetInputNode(op, "X", ngb_node_map);
auto dout = platform::GetInputNode(op, "Out@GRAD", ngb_node_map);
auto half = paddle::platform::CreateConstant(input->get_element_type(),
input->get_shape(), {0.5});
auto minus_half = paddle::platform::CreateConstant(
input->get_element_type(), input->get_shape(), {-0.5});
auto one = paddle::platform::CreateConstant(input->get_element_type(),
input->get_shape(), {1});
auto two = paddle::platform::CreateConstant(input->get_element_type(),
input->get_shape(), {2});
auto pi = paddle::platform::CreateConstant(
input->get_element_type(), input->get_shape(), {3.14159265359});
auto sqrt_two = std::make_shared<ngraph::op::Sqrt>(two);
auto sqrt_pi = std::make_shared<ngraph::op::Sqrt>(pi);
auto first =
half * (one + std::make_shared<ngraph::op::Erf>(input * one / sqrt_two));
auto second = half * (two / sqrt_pi) * (one / sqrt_two) * input *
std::make_shared<ngraph::op::Exp>(minus_half * input * input);
auto gelu_grad = dout * (first + second);
platform::SetOutputNode(op, "X@GRAD", gelu_grad, ngb_node_map);
}
void BuildReluGradNode(
const std::shared_ptr<framework::OperatorBase>& op,
std::shared_ptr<
std::unordered_map<std::string, std::shared_ptr<ngraph::Node>>>
ngb_node_map) {
auto out = platform::GetInputNode(op, "Out", ngb_node_map);
auto dout = platform::GetInputNode(op, "Out@GRAD", ngb_node_map);
auto relu_grad = std::make_shared<ngraph::op::ReluBackprop>(out, dout);
platform::SetOutputNode(op, "X@GRAD", relu_grad, ngb_node_map);
}
void BuildSquareNode(
const std::shared_ptr<framework::OperatorBase>& op,
std::shared_ptr<
std::unordered_map<std::string, std::shared_ptr<ngraph::Node>>>
ngb_node_map) {
auto input = platform::GetInputNode(op, "X", ngb_node_map);
auto out = input * input;
platform::SetOutputNode(op, "Out", out, ngb_node_map);
}
void BuildTanhGradNode(
const std::shared_ptr<framework::OperatorBase>& op,
std::shared_ptr<
std::unordered_map<std::string, std::shared_ptr<ngraph::Node>>>
ngb_node_map) {
auto out = platform::GetInputNode(op, "Out", ngb_node_map);
auto dout = platform::GetInputNode(op, "Out@GRAD", ngb_node_map);
auto shape = out->get_shape();
auto node_const =
ngraph::op::Constant::create(ngraph::element::f32, shape, {1});
auto result = dout * (node_const - out * out);
platform::SetOutputNode(op, "X@GRAD", result, ngb_node_map);
}
} // namespace ngraphs
} // namespace operators
} // namespace paddle
REGISTER_NG_OP(gelu, BuildGeluNode);
REGISTER_NG_OP(gelu_grad, BuildGeluGradNode);
REGISTER_NG_OP(relu_grad, BuildReluGradNode);
REGISTER_NG_OP(square, BuildSquareNode);
REGISTER_NG_OP(tanh_grad, BuildTanhGradNode);
/*Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <memory>
#include <string>
#include <unordered_map>
#include "ngraph/ngraph.hpp"
#include "paddle/fluid/operators/ngraph/ops/elementwise_scalar_op.h"
#include "paddle/fluid/operators/ngraph/ops/op_bridge.h"
#include "paddle/fluid/platform/ngraph_helper.h"
namespace paddle {
namespace operators {
namespace ngraphs {
void BuildAdamNode(
const std::shared_ptr<framework::OperatorBase>& op,
std::shared_ptr<
std::unordered_map<std::string, std::shared_ptr<ngraph::Node>>>
ngb_node_map) {
auto op_attrs = framework::AttrReader(op->Attrs());
auto beta1pow = platform::GetInputNode(op, "Beta1Pow", ngb_node_map);
auto beta2pow = platform::GetInputNode(op, "Beta2Pow", ngb_node_map);
auto grad = platform::GetInputNode(op, "Grad", ngb_node_map);
auto learning_rate = platform::GetInputNode(op, "LearningRate", ngb_node_map);
auto moment1 = platform::GetInputNode(op, "Moment1", ngb_node_map);
auto moment2 = platform::GetInputNode(op, "Moment2", ngb_node_map);
auto param = platform::GetInputNode(op, "Param", ngb_node_map);
auto epsilon = op_attrs.Get<float>("epsilon");
auto beta2 = op_attrs.Get<float>("beta2");
auto beta1 = op_attrs.Get<float>("beta1");
auto moment1_shape = moment1->get_shape();
auto grad_shape = grad->get_shape();
auto moment1out = std::make_shared<ngraph::op::Add>(
ElementwiseScalar<ngraph::op::Multiply>(beta1, moment1),
ElementwiseScalar<ngraph::op::Multiply>(1. - beta1, grad));
auto grad_square = std::make_shared<ngraph::op::Multiply>(grad, grad);
auto moment2out = std::make_shared<ngraph::op::Add>(
ElementwiseScalar<ngraph::op::Multiply>(beta2, moment2),
ElementwiseScalar<ngraph::op::Multiply>(1. - beta2, grad_square));
auto node_sqrt = std::make_shared<ngraph::op::Sqrt>(
ElementwiseScalar<ngraph::op::Subtract>(1., beta2pow));
auto lr = std::make_shared<ngraph::op::Divide>(
node_sqrt, ElementwiseScalar<ngraph::op::Subtract>(1., beta1pow));
auto updated_lr = std::make_shared<ngraph::op::Multiply>(learning_rate, lr);
auto moment2_sqrt = std::make_shared<ngraph::op::Sqrt>(moment2out);
auto param_grad = std::make_shared<ngraph::op::Divide>(
moment1out, ElementwiseScalar<ngraph::op::Add>(epsilon, moment2_sqrt));
auto delta = ElementwiseScalar<ngraph::op::Multiply>(updated_lr, param_grad);
auto param_out = std::make_shared<ngraph::op::Subtract>(param, delta);
auto beta1_pow_out = ElementwiseScalar<ngraph::op::Multiply>(beta1, beta1pow);
auto beta2_pow_out = ElementwiseScalar<ngraph::op::Multiply>(beta2, beta2pow);
platform::SetOutputNode(op, "Moment1Out", moment1out, ngb_node_map);
platform::SetOutputNode(op, "Moment2Out", moment2out, ngb_node_map);
platform::SetOutputNode(op, "ParamOut", param_out, ngb_node_map);
platform::SetOutputNode(op, "Beta1PowOut", beta1_pow_out, ngb_node_map);
platform::SetOutputNode(op, "Beta2PowOut", beta2_pow_out, ngb_node_map);
}
} // namespace ngraphs
} // namespace operators
} // namespace paddle
REGISTER_NG_OP(adam, BuildAdamNode);
/*Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <functional>
#include <memory>
#include <string>
#include <unordered_map>
#include "ngraph/ngraph.hpp"
#include "paddle/fluid/operators/ngraph/ops/op_bridge.h"
#include "paddle/fluid/platform/ngraph_helper.h"
namespace paddle {
namespace operators {
namespace ngraphs {
static void BuildAssignNode(
const std::shared_ptr<framework::OperatorBase>& op,
std::shared_ptr<
std::unordered_map<std::string, std::shared_ptr<ngraph::Node>>>
ngb_node_map) {
auto input = platform::GetInputNode(op, "X", ngb_node_map);
auto out = input;
paddle::platform::SetOutputNode(op, "Out", out, ngb_node_map);
}
} // namespace ngraphs
} // namespace operators
} // namespace paddle
REGISTER_NG_OP(assign, BuildAssignNode);
/*Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <memory>
#include <string>
#include <unordered_map>
#include <vector>
#include "ngraph/ngraph.hpp"
#include "paddle/fluid/operators/ngraph/ops/elementwise_node.h"
#include "paddle/fluid/operators/ngraph/ops/elementwise_scalar_op.h"
#include "paddle/fluid/operators/ngraph/ops/op_bridge.h"
#include "paddle/fluid/platform/ngraph_helper.h"
namespace paddle {
namespace operators {
namespace ngraphs {
void BuildBatchNormNode(
const std::shared_ptr<paddle::framework::OperatorBase>& op,
std::shared_ptr<
std::unordered_map<std::string, std::shared_ptr<ngraph::Node>>>
ngb_node_map) {
auto op_attrs = paddle::framework::AttrReader(op->Attrs());
auto& data_layout = op_attrs.Get<std::string>("data_layout");
auto bias = paddle::platform::GetInputNode(op, "Bias", ngb_node_map);
auto mean = paddle::platform::GetInputNode(op, "Mean", ngb_node_map);
auto variance = paddle::platform::GetInputNode(op, "Variance", ngb_node_map);
auto scale = paddle::platform::GetInputNode(op, "Scale", ngb_node_map);
auto x = paddle::platform::GetInputNode(op, "X", ngb_node_map);
const bool is_test = op_attrs.Get<bool>("is_test");
const float epsilon = op_attrs.Get<float>("epsilon");
const float momentum = op_attrs.Get<float>("momentum");
PADDLE_ENFORCE(
data_layout == "NHWC" || data_layout == "NCHW" || data_layout == "NC",
"The BatchNorm operator only supports NHWC/NCHW/NC data format");
if (data_layout == "NHWC") {
x = paddle::platform::Nhwc2Nchw(x);
}
std::shared_ptr<ngraph::Node> mean_out, saved_mean, saved_variance,
variance_out, y;
if (!is_test) {
auto BN = std::make_shared<ngraph::op::BatchNormTraining>(epsilon, scale,
bias, x);
y = std::make_shared<ngraph::op::GetOutputElement>(BN, 0);
saved_mean = std::make_shared<ngraph::op::GetOutputElement>(BN, 1);
saved_variance = std::make_shared<ngraph::op::GetOutputElement>(BN, 2);
mean_out = std::make_shared<ngraph::op::Add>(
paddle::operators::ngraphs::ElementwiseScalar<ngraph::op::Multiply>(
momentum, mean),
paddle::operators::ngraphs::ElementwiseScalar<ngraph::op::Multiply>(
1. - momentum, saved_mean));
variance_out = std::make_shared<ngraph::op::Add>(
paddle::operators::ngraphs::ElementwiseScalar<ngraph::op::Multiply>(
momentum, variance),
paddle::operators::ngraphs::ElementwiseScalar<ngraph::op::Multiply>(
1. - momentum, saved_variance));
if (data_layout == "NHWC") {
y = paddle::platform::Nchw2Nhwc(y);
}
paddle::platform::SetOutputNode(op, "MeanOut", mean_out, ngb_node_map);
paddle::platform::SetOutputNode(op, "VarianceOut", variance_out,
ngb_node_map);
paddle::platform::SetOutputNode(op, "SavedMean", saved_mean, ngb_node_map);
paddle::platform::SetOutputNode(op, "SavedVariance", saved_variance,
ngb_node_map);
paddle::platform::SetOutputNode(op, "Y", y, ngb_node_map);
} else {
y = std::make_shared<ngraph::op::BatchNormInference>(epsilon, scale, bias,
x, mean, variance);
paddle::platform::SetOutputNode(op, "Y", y, ngb_node_map);
}
}
void BuildBatchNormGradNode(
const std::shared_ptr<paddle::framework::OperatorBase>& op,
std::shared_ptr<
std::unordered_map<std::string, std::shared_ptr<ngraph::Node>>>
ngb_node_map) {
auto op_attrs = paddle::framework::AttrReader(op->Attrs());
auto& data_layout = op_attrs.Get<std::string>("data_layout");
auto bias = paddle::platform::GetInputNode(op, "Bias", ngb_node_map);
auto saved_mean =
paddle::platform::GetInputNode(op, "SavedMean", ngb_node_map);
auto saved_variance =
paddle::platform::GetInputNode(op, "SavedVariance", ngb_node_map);
auto scale = paddle::platform::GetInputNode(op, "Scale", ngb_node_map);
auto x = paddle::platform::GetInputNode(op, "X", ngb_node_map);
auto dy = paddle::platform::GetInputNode(op, "Y@GRAD", ngb_node_map);
auto x_shape = x->get_shape();
auto dy_shape = dy->get_shape();
PADDLE_ENFORCE(x_shape.size() == 2 || x_shape.size() == 4,
"BN grap input size needs to be 2 or 4");
PADDLE_ENFORCE_EQ(x_shape.size(), dy_shape.size(),
"BN grap input and delta size needs to be equal");
PADDLE_ENFORCE(
data_layout == "NHWC" || data_layout == "NCHW" || data_layout == "NC",
"The BatchNorm operator only supports NHWC/NCHW/NC data format");
if (x_shape.size() == 2) {
x = std::make_shared<ngraph::op::Reshape>(
x, ngraph::AxisVector{0, 1},
ngraph::Shape{x_shape.at(0), x_shape.at(1), 1, 1});
dy = std::make_shared<ngraph::op::Reshape>(
dy, ngraph::AxisVector{0, 1},
ngraph::Shape{dy_shape.at(0), dy_shape.at(1), 1, 1});
}
if (data_layout == "NHWC") {
x = paddle::platform::Nhwc2Nchw(dy);
dy = paddle::platform::Nhwc2Nchw(dy);
}
const float epsilon = op_attrs.Get<float>("epsilon");
auto bn_bprop = std::make_shared<ngraph::op::BatchNormTrainingBackprop>(
epsilon, scale, bias, x, saved_mean, saved_variance, dy);
std::shared_ptr<ngraph::Node> dx =
std::make_shared<ngraph::op::GetOutputElement>(bn_bprop, 0);
auto dscale = std::make_shared<ngraph::op::GetOutputElement>(bn_bprop, 1);
auto dbias = std::make_shared<ngraph::op::GetOutputElement>(bn_bprop, 2);
paddle::platform::SetOutputNode(op, "Bias@GRAD", dbias, ngb_node_map);
paddle::platform::SetOutputNode(op, "Scale@GRAD", dscale, ngb_node_map);
if (x_shape.size() == 2) {
paddle::platform::SetOutputNode(
op, "X@GRAD", paddle::platform::NgReshaper(dx, x_shape), ngb_node_map);
} else {
if (data_layout == "NHWC") {
dx = paddle::platform::Nchw2Nhwc(dx);
}
paddle::platform::SetOutputNode(op, "X@GRAD", dx, ngb_node_map);
}
}
} // namespace ngraphs
} // namespace operators
} // namespace paddle
REGISTER_NG_OP(batch_norm, BuildBatchNormNode);
REGISTER_NG_OP(batch_norm_grad, BuildBatchNormGradNode);
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <memory>
#include <string>
#include <unordered_map>
#include "ngraph/ngraph.hpp"
#include "paddle/fluid/operators/ngraph/ops/op_bridge.h"
#include "paddle/fluid/platform/ngraph_helper.h"
namespace paddle {
namespace operators {
namespace ngraphs {
template <typename T>
static void BuildBinaryNode(
const std::shared_ptr<paddle::framework::OperatorBase>& op,
std::shared_ptr<
std::unordered_map<std::string, std::shared_ptr<ngraph::Node>>>
ngb_node_map) {
auto x = paddle::platform::GetInputNode(op, "X", ngb_node_map);
auto y = paddle::platform::GetInputNode(op, "Y", ngb_node_map);
auto out = std::make_shared<T>(x, y);
paddle::platform::SetOutputNode(op, "Out", out, ngb_node_map);
}
template <typename T>
static void BuildUnaryNode(
const std::shared_ptr<paddle::framework::OperatorBase>& op,
std::shared_ptr<
std::unordered_map<std::string, std::shared_ptr<ngraph::Node>>>
ngb_node_map) {
auto input = paddle::platform::GetInputNode(op, "X", ngb_node_map);
auto out = std::make_shared<T>(input);
paddle::platform::SetOutputNode(op, "Out", out, ngb_node_map);
}
} // namespace ngraphs
} // namespace operators
} // namespace paddle
REGISTER_NG_OP(abs, BuildUnaryNode<ngraph::op::Abs>);
REGISTER_NG_OP(relu, BuildUnaryNode<ngraph::op::Relu>);
REGISTER_NG_OP(tanh, BuildUnaryNode<ngraph::op::Tanh>);
REGISTER_NG_OP(sigmoid, BuildUnaryNode<ngraph::op::Sigmoid>);
REGISTER_NG_OP(logical_and, BuildBinaryNode<ngraph::op::And>);
REGISTER_NG_OP(logical_or, BuildBinaryNode<ngraph::op::Or>);
REGISTER_NG_OP(logical_not, BuildUnaryNode<ngraph::op::Not>);
/*Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <functional>
#include <memory>
#include <string>
#include <unordered_map>
#include "ngraph/ngraph.hpp"
#include "paddle/fluid/operators/ngraph/ops/op_bridge.h"
#include "paddle/fluid/platform/ngraph_helper.h"
namespace paddle {
namespace operators {
namespace ngraphs {
static void BuildCastNode(
const std::shared_ptr<framework::OperatorBase>& op,
std::shared_ptr<
std::unordered_map<std::string, std::shared_ptr<ngraph::Node>>>
ngb_node_map) {
auto input = platform::GetInputNode(op, "X", ngb_node_map);
auto op_attrs = framework::AttrReader(op->Attrs());
auto ng_dtype =
platform::GetNgType(static_cast<paddle::framework::proto::VarType::Type>(
op_attrs.Get<int>("out_dtype")));
auto out = std::make_shared<ngraph::op::Convert>(input, ng_dtype);
paddle::platform::SetOutputNode(op, "Out", out, ngb_node_map);
}
} // namespace ngraphs
} // namespace operators
} // namespace paddle
REGISTER_NG_OP(cast, BuildCastNode);
/*Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <memory>
#include <string>
#include <unordered_map>
#include <vector>
#include "ngraph/ngraph.hpp"
#include "paddle/fluid/operators/ngraph/ops/op_bridge.h"
#include "paddle/fluid/platform/ngraph_helper.h"
namespace paddle {
namespace operators {
namespace ngraphs {
void BuildConcatNode(
const std::shared_ptr<framework::OperatorBase>& op,
std::shared_ptr<
std::unordered_map<std::string, std::shared_ptr<ngraph::Node>>>
ngb_node_map) {
std::vector<std::shared_ptr<ngraph::Node>> args;
for (auto& var_name_item : op->Inputs()) {
for (auto& var_name : var_name_item.second) {
auto& node0 = ngb_node_map->at(var_name);
args.push_back(node0);
}
}
auto op_attrs = framework::AttrReader(op->Attrs());
int axis = op_attrs.Get<int>("axis");
if (axis < 0) {
axis = axis + args[0]->get_shape().size();
}
auto out = std::make_shared<ngraph::op::Concat>(args, axis);
platform::SetOutputNode(op, "Out", out, ngb_node_map);
}
} // namespace ngraphs
} // namespace operators
} // namespace paddle
REGISTER_NG_OP(concat, BuildConcatNode);
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <memory>
#include <string>
#include <unordered_map>
#include <vector>
#include "ngraph/ngraph.hpp"
#include "paddle/fluid/operators/ngraph/ops/op_bridge.h"
#include "paddle/fluid/platform/ngraph_helper.h"
namespace paddle {
namespace operators {
namespace ngraphs {
std::shared_ptr<ngraph::Node> GroupedConvolution(
const std::shared_ptr<ngraph::Node>& data_batch,
const std::shared_ptr<ngraph::Node>& filters, const ngraph::Strides strides,
const ngraph::Strides dilations, const ngraph::CoordinateDiff& paddings,
size_t groups) {
auto& data_shape = data_batch->get_shape();
auto& filter_shape = filters->get_shape();
ngraph::NodeVector ng_slices;
for (size_t i = 0; i < groups; ++i) {
size_t channel_step = filter_shape.at(1);
const std::vector<size_t> lower_bound{0, i * channel_step, 0, 0};
const std::vector<size_t> upper_bound{data_shape.at(0),
(i + 1) * channel_step,
data_shape.at(2), data_shape.at(3)};
auto data_slice = std::make_shared<ngraph::op::Slice>(
data_batch, lower_bound, upper_bound);
size_t filter_step = filter_shape.at(0) / groups;
const std::vector<size_t> filter_lower_bound{i * filter_step, 0, 0, 0};
const std::vector<size_t> filter_upper_bound{
(i + 1) * filter_step, filter_shape.at(1), filter_shape.at(2),
filter_shape.at(3)};
auto filter_slice = std::make_shared<ngraph::op::Slice>(
filters, filter_lower_bound, filter_upper_bound);
auto ng_conv = std::make_shared<ngraph::op::Convolution>(
data_slice, filter_slice, strides, dilations, paddings, paddings);
ng_slices.push_back(ng_conv);
}
size_t concat_axis = 1;
return std::make_shared<ngraph::op::Concat>(ng_slices, concat_axis);
}
std::shared_ptr<ngraph::Node> GroupedGradConvolutionFilter(
const std::shared_ptr<ngraph::Node>& data_batch,
const std::shared_ptr<ngraph::Node>& filters,
const std::shared_ptr<ngraph::Node>& doutput, const ngraph::Strides strides,
const ngraph::Strides dilations, const ngraph::CoordinateDiff& paddings,
size_t groups) {
auto& data_shape = data_batch->get_shape();
auto& filter_shape = filters->get_shape();
auto& out_shape = doutput->get_shape();
ngraph::NodeVector ng_slices;
for (size_t i = 0; i < groups; ++i) {
size_t channel_step = filter_shape.at(1);
const std::vector<size_t> lower_bound{0, i * channel_step, 0, 0};
const std::vector<size_t> upper_bound{data_shape.at(0),
(i + 1) * channel_step,
data_shape.at(2), data_shape.at(3)};
auto data_slice = std::make_shared<ngraph::op::Slice>(
data_batch, lower_bound, upper_bound);
size_t filter_step = filter_shape.at(0) / groups;
const std::vector<size_t> filter_lower_bound{i * filter_step, 0, 0, 0};
const std::vector<size_t> filter_upper_bound{
(i + 1) * filter_step, filter_shape.at(1), filter_shape.at(2),
filter_shape.at(3)};
auto filter_slice = std::make_shared<ngraph::op::Slice>(
filters, filter_lower_bound, filter_upper_bound);
const std::vector<size_t> olower_bound{0, i * filter_step, 0, 0};
const std::vector<size_t> oupper_bound{out_shape.at(0),
(i + 1) * filter_step,
out_shape.at(2), out_shape.at(3)};
auto out_slice = std::make_shared<ngraph::op::Slice>(doutput, olower_bound,
oupper_bound);
auto ng_conv = std::make_shared<ngraph::op::ConvolutionBackpropFilters>(
data_slice, filter_slice->get_shape(), out_slice, strides, dilations,
paddings, paddings, ngraph::Strides{1, 1});
ng_slices.push_back(ng_conv);
}
size_t concat_axis = 0;
return std::make_shared<ngraph::op::Concat>(ng_slices, concat_axis);
}
std::shared_ptr<ngraph::Node> GroupedGradConvolutionData(
const std::shared_ptr<ngraph::Node>& data_batch,
const std::shared_ptr<ngraph::Node>& filters,
const std::shared_ptr<ngraph::Node>& doutput, const ngraph::Strides strides,
const ngraph::Strides dilations, const ngraph::CoordinateDiff& paddings,
size_t groups) {
auto& data_shape = data_batch->get_shape();
auto& filter_shape = filters->get_shape();
auto& out_shape = doutput->get_shape();
ngraph::NodeVector ng_slices;
for (size_t i = 0; i < groups; ++i) {
size_t channel_step = filter_shape.at(1);
const std::vector<size_t> lower_bound{0, i * channel_step, 0, 0};
const std::vector<size_t> upper_bound{data_shape.at(0),
(i + 1) * channel_step,
data_shape.at(2), data_shape.at(3)};
auto data_slice = std::make_shared<ngraph::op::Slice>(
data_batch, lower_bound, upper_bound);
size_t filter_step = filter_shape.at(0) / groups;
const std::vector<size_t> filter_lower_bound{i * filter_step, 0, 0, 0};
const std::vector<size_t> filter_upper_bound{
(i + 1) * filter_step, filter_shape.at(1), filter_shape.at(2),
filter_shape.at(3)};
auto filter_slice = std::make_shared<ngraph::op::Slice>(
filters, filter_lower_bound, filter_upper_bound);
const std::vector<size_t> olower_bound{0, i * filter_step, 0, 0};
const std::vector<size_t> oupper_bound{out_shape.at(0),
(i + 1) * filter_step,
out_shape.at(2), out_shape.at(3)};
auto out_slice = std::make_shared<ngraph::op::Slice>(doutput, olower_bound,
oupper_bound);
auto ng_conv = std::make_shared<ngraph::op::ConvolutionBackpropData>(
data_slice->get_shape(), filter_slice, out_slice, strides, dilations,
paddings, paddings, ngraph::Strides{1, 1});
ng_slices.push_back(ng_conv);
}
size_t concat_axis = 1;
return std::make_shared<ngraph::op::Concat>(ng_slices, concat_axis);
}
void BuildConv2dNode(
const std::shared_ptr<paddle::framework::OperatorBase>& op,
std::shared_ptr<
std::unordered_map<std::string, std::shared_ptr<ngraph::Node>>>
ngb_node_map) {
auto op_attrs = paddle::framework::AttrReader(op->Attrs());
auto filters = paddle::platform::GetInputNode(op, "Filter", ngb_node_map);
auto input = paddle::platform::GetInputNode(op, "Input", ngb_node_map);
std::vector<int> strides = op_attrs.Get<std::vector<int>>("strides");
std::vector<int> paddings = op_attrs.Get<std::vector<int>>("paddings");
std::vector<int> dilations = op_attrs.Get<std::vector<int>>("dilations");
const ngraph::Strides ng_strides{static_cast<size_t>(strides.at(0)),
static_cast<size_t>(strides.at(1))};
const ngraph::Strides ng_dilations{static_cast<size_t>(dilations.at(0)),
static_cast<size_t>(dilations.at(1))};
const ngraph::CoordinateDiff ng_paddings{
static_cast<std::ptrdiff_t>(paddings.at(0)),
static_cast<std::ptrdiff_t>(paddings.at(1))};
int groups = static_cast<size_t>(op_attrs.Get<int>("groups"));
PADDLE_ENFORCE_GE(groups, 1, "conv groups needs be no less than 1");
std::shared_ptr<ngraph::Node> result;
if (groups == 1) {
result = std::make_shared<ngraph::op::Convolution>(
input, filters, ng_strides, ng_dilations, ng_paddings, ng_paddings);
} else {
result = GroupedConvolution(input, filters, ng_strides, ng_dilations,
ng_paddings, groups);
}
paddle::platform::SetOutputNode(op, "Output", result, ngb_node_map);
}
void BuildConv2dGradNode(
const std::shared_ptr<paddle::framework::OperatorBase>& op,
std::shared_ptr<
std::unordered_map<std::string, std::shared_ptr<ngraph::Node>>>
ngb_node_map) {
auto op_attrs = paddle::framework::AttrReader(op->Attrs());
auto filter = paddle::platform::GetInputNode(op, "Filter", ngb_node_map);
auto input = paddle::platform::GetInputNode(op, "Input", ngb_node_map);
auto doutput =
paddle::platform::GetInputNode(op, "Output@GRAD", ngb_node_map);
int groups = op_attrs.Get<int>("groups");
std::vector<int> strides = op_attrs.Get<std::vector<int>>("strides");
std::vector<int> paddings = op_attrs.Get<std::vector<int>>("paddings");
std::vector<int> dilations = op_attrs.Get<std::vector<int>>("dilations");
const ngraph::Strides ng_strides{static_cast<size_t>(strides.at(0)),
static_cast<size_t>(strides.at(1))};
const ngraph::Strides ng_dilations{static_cast<size_t>(dilations.at(0)),
static_cast<size_t>(dilations.at(1))};
const ngraph::CoordinateDiff ng_paddings{
static_cast<std::ptrdiff_t>(paddings.at(0)),
static_cast<std::ptrdiff_t>(paddings.at(1))};
std::shared_ptr<ngraph::Node> dfilter;
std::shared_ptr<ngraph::Node> dinput;
if (groups == 1) {
dfilter = std::make_shared<ngraph::op::ConvolutionBackpropFilters>(
input, filter->get_shape(), doutput, ng_strides, ng_dilations,
ng_paddings, ng_paddings, ngraph::Strides{1, 1});
dinput = std::make_shared<ngraph::op::ConvolutionBackpropData>(
input->get_shape(), filter, doutput, ng_strides, ng_dilations,
ng_paddings, ng_paddings, ngraph::Strides{1, 1});
} else {
dfilter = GroupedGradConvolutionFilter(input, filter, doutput, ng_strides,
ng_dilations, ng_paddings, groups);
dinput = GroupedGradConvolutionData(input, filter, doutput, ng_strides,
ng_dilations, ng_paddings, groups);
}
paddle::platform::SetOutputNode(op, "Filter@GRAD", dfilter, ngb_node_map);
paddle::platform::SetOutputNode(op, "Input@GRAD", dinput, ngb_node_map);
}
} // namespace ngraphs
} // namespace operators
} // namespace paddle
REGISTER_NG_OP(conv2d, BuildConv2dNode);
REGISTER_NG_OP(conv2d_grad, BuildConv2dGradNode);
REGISTER_NG_OP(depthwise_conv2d, BuildConv2dNode);
/*Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <functional>
#include <memory>
#include <string>
#include <unordered_map>
#include "ngraph/ngraph.hpp"
#include "paddle/fluid/operators/ngraph/ops/op_bridge.h"
#include "paddle/fluid/platform/ngraph_helper.h"
namespace paddle {
namespace operators {
namespace ngraphs {
std::shared_ptr<ngraph::Node> remove_trailing_one(
const std::shared_ptr<ngraph::Node>& input) {
auto shape = input->get_shape();
if (shape.back() == 1 && shape.size() > 1) {
shape.pop_back();
return platform::NgReshaper(input, shape);
} else {
return input;
}
}
std::shared_ptr<ngraph::Node> flatten_node(
const std::shared_ptr<ngraph::Node>& input) {
auto shape = input->get_shape();
auto rank = shape.size();
auto output = input;
if (rank > 2) {
auto shape_2d = paddle::platform::FlattenTo2d(shape, rank - 1);
output = paddle::platform::NgReshaper(input, shape_2d);
}
return output;
}
std::shared_ptr<ngraph::Node> convert_to_node_type(
const std::shared_ptr<ngraph::Node>& input,
const std::shared_ptr<ngraph::Node>& ref) {
auto output = input;
if (input->get_element_type() != ref->get_element_type()) {
output =
std::make_shared<ngraph::op::Convert>(input, ref->get_element_type());
}
return output;
}
std::shared_ptr<ngraph::Node> create_xe(
const std::shared_ptr<ngraph::Node>& one_hot,
const std::shared_ptr<ngraph::Node>& x) {
auto node_log = std::make_shared<ngraph::op::Log>(x);
auto node_mul = one_hot * node_log;
auto node_sum = std::make_shared<ngraph::op::Sum>(
node_mul, ngraph::AxisSet{x->get_shape().size() - 1});
auto shape = x->get_shape();
shape.back() = 1;
return platform::NgReshaper(-node_sum, shape);
}
std::shared_ptr<ngraph::Node> create_mask(
const std::shared_ptr<ngraph::Node>& label, int ignore_index) {
auto ignore_node = paddle::platform::CreateConstant(
label->get_element_type(), label->get_shape(), {ignore_index});
auto not_equal_node =
std::make_shared<ngraph::op::NotEqual>(label, ignore_node);
return not_equal_node;
}
std::shared_ptr<ngraph::Node> create_one_hot(
const std::shared_ptr<ngraph::Node>& label,
const std::shared_ptr<ngraph::Node>& x) {
auto label_shape = label->get_shape();
return std::make_shared<ngraph::op::OneHot>(
remove_trailing_one(label), x->get_shape(), x->get_shape().size() - 1);
}
std::shared_ptr<ngraph::Node> GetCrossEntropy(
std::shared_ptr<ngraph::Node> x, std::shared_ptr<ngraph::Node> label,
const bool is_soft_label, int ignore_index) {
std::shared_ptr<ngraph::Node> node_1_hot = label;
if (!is_soft_label) {
node_1_hot = create_one_hot(label, x);
}
node_1_hot = convert_to_node_type(node_1_hot, x);
auto xe = create_xe(node_1_hot, x);
if (!is_soft_label) {
auto mask = convert_to_node_type(create_mask(label, ignore_index), xe);
xe = xe * mask;
}
return xe;
}
std::shared_ptr<ngraph::Node> GetCrossEntropyGrad(
std::shared_ptr<ngraph::Node> x, std::shared_ptr<ngraph::Node> label,
std::shared_ptr<ngraph::Node> dy, const bool is_soft_label,
int ignore_index) {
auto x_shape = x->get_shape();
auto rank = x_shape.size();
std::shared_ptr<ngraph::Node> mask;
if (!is_soft_label) {
mask = convert_to_node_type(create_mask(label, ignore_index), x);
mask = std::make_shared<ngraph::op::Broadcast>(
remove_trailing_one(mask), x_shape, ngraph::AxisSet{rank - 1});
label = create_one_hot(label, x);
}
auto dy_reshape = remove_trailing_one(dy);
auto dy_bcast = std::make_shared<ngraph::op::Broadcast>(
dy_reshape, x_shape, ngraph::AxisSet{rank - 1});
label = convert_to_node_type(label, x);
auto xe_grad = -label * dy_bcast / x;
if (!is_soft_label) {
xe_grad = xe_grad * mask;
}
return xe_grad;
}
void BuildCrossEntropyNode(
const std::shared_ptr<paddle::framework::OperatorBase>& op,
std::shared_ptr<
std::unordered_map<std::string, std::shared_ptr<ngraph::Node>>>
ngb_node_map) {
auto x = paddle::platform::GetInputNode(op, "X", ngb_node_map);
auto label = paddle::platform::GetInputNode(op, "Label", ngb_node_map);
auto op_attrs = paddle::framework::AttrReader(op->Attrs());
const bool is_soft_label = op_attrs.Get<bool>("soft_label");
int ignore_index = op_attrs.Get<int>("ignore_index");
auto xe = GetCrossEntropy(x, label, is_soft_label, ignore_index);
paddle::platform::SetOutputNode(op, "Y", xe, ngb_node_map);
}
void BuildCrossEntropyGradNode(
const std::shared_ptr<paddle::framework::OperatorBase>& op,
std::shared_ptr<
std::unordered_map<std::string, std::shared_ptr<ngraph::Node>>>
ngb_node_map) {
auto op_attrs = paddle::framework::AttrReader(op->Attrs());
const bool is_soft_label = op_attrs.Get<bool>("soft_label");
int ignore_index = op_attrs.Get<int>("ignore_index");
auto x = paddle::platform::GetInputNode(op, "X", ngb_node_map);
auto label = paddle::platform::GetInputNode(op, "Label", ngb_node_map);
auto dy = paddle::platform::GetInputNode(op, "Y@GRAD", ngb_node_map);
auto xe_grad = GetCrossEntropyGrad(x, label, dy, is_soft_label, ignore_index);
paddle::platform::SetOutputNode(op, "X@GRAD", xe_grad, ngb_node_map);
}
void BuildCrossEntropy2Node(
const std::shared_ptr<paddle::framework::OperatorBase>& op,
std::shared_ptr<
std::unordered_map<std::string, std::shared_ptr<ngraph::Node>>>
ngb_node_map) {
auto x = paddle::platform::GetInputNode(op, "X", ngb_node_map);
auto label = paddle::platform::GetInputNode(op, "Label", ngb_node_map);
auto op_attrs = paddle::framework::AttrReader(op->Attrs());
int ignore_index = op_attrs.Get<int>("ignore_index");
auto rank = x->get_shape().size();
auto one_hot = convert_to_node_type(create_one_hot(label, x), x);
auto xe = create_xe(one_hot, x);
auto mask = convert_to_node_type(create_mask(label, ignore_index), xe);
xe = xe * mask;
std::shared_ptr<ngraph::Node> node_sum =
std::make_shared<ngraph::op::Sum>(one_hot * x, ngraph::AxisSet{rank - 1});
node_sum = paddle::platform::NgReshaper(node_sum, mask->get_shape());
auto matchx = mask * node_sum;
paddle::platform::SetOutputNode(op, "MatchX", matchx, ngb_node_map);
platform::SetOutputNode(op, "XShape", x, ngb_node_map);
paddle::platform::SetOutputNode(op, "Y", xe, ngb_node_map);
}
void BuildCrossEntropyGrad2Node(
const std::shared_ptr<paddle::framework::OperatorBase>& op,
std::shared_ptr<
std::unordered_map<std::string, std::shared_ptr<ngraph::Node>>>
ngb_node_map) {
auto op_attrs = paddle::framework::AttrReader(op->Attrs());
int ignore_index = op_attrs.Get<int>("ignore_index");
auto matchx = paddle::platform::GetInputNode(op, "MatchX", ngb_node_map);
auto label = paddle::platform::GetInputNode(op, "Label", ngb_node_map);
auto x = paddle::platform::GetInputNode(op, "XShape", ngb_node_map);
auto dy = paddle::platform::GetInputNode(op, framework::GradVarName("Y"),
ngb_node_map);
matchx = remove_trailing_one(matchx);
label = remove_trailing_one(label);
x = remove_trailing_one(x);
dy = remove_trailing_one(dy);
auto x_shape = x->get_shape();
auto rank = x_shape.size();
auto one_hot = convert_to_node_type(create_one_hot(label, x), x);
auto mask = convert_to_node_type(create_mask(label, ignore_index), x);
auto zero = paddle::platform::CreateConstant(matchx->get_element_type(),
matchx->get_shape(), {0});
auto one = paddle::platform::CreateConstant(matchx->get_element_type(),
matchx->get_shape(), {1});
auto is_zero = std::make_shared<ngraph::op::Equal>(matchx, zero);
matchx = std::make_shared<ngraph::op::Select>(is_zero, one, matchx);
auto dy_bcast = std::make_shared<ngraph::op::Broadcast>(
mask * dy, x_shape, ngraph::AxisSet{rank - 1});
auto matchx_bcast = std::make_shared<ngraph::op::Broadcast>(
matchx, x_shape, ngraph::AxisSet{rank - 1});
auto xe_grad = -dy_bcast * one_hot / matchx_bcast;
paddle::platform::SetOutputNode(op, framework::GradVarName("X"), xe_grad,
ngb_node_map);
}
} // namespace ngraphs
} // namespace operators
} // namespace paddle
REGISTER_NG_OP(cross_entropy, BuildCrossEntropyNode);
REGISTER_NG_OP(cross_entropy_grad, BuildCrossEntropyGradNode);
REGISTER_NG_OP(cross_entropy2, BuildCrossEntropy2Node);
REGISTER_NG_OP(cross_entropy_grad2, BuildCrossEntropyGrad2Node);
/*Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <functional>
#include <memory>
#include <string>
#include <unordered_map>
#include "ngraph/ngraph.hpp"
#include "ngraph/op/experimental/generate_mask.hpp"
#include "paddle/fluid/operators/ngraph/ops/elementwise_scalar_op.h"
#include "paddle/fluid/operators/ngraph/ops/op_bridge.h"
#include "paddle/fluid/platform/ngraph_helper.h"
namespace paddle {
namespace operators {
namespace ngraphs {
static void BuildDropoutNode(
const std::shared_ptr<framework::OperatorBase>& op,
std::shared_ptr<
std::unordered_map<std::string, std::shared_ptr<ngraph::Node>>>
ngb_node_map) {
auto input = platform::GetInputNode(op, "X", ngb_node_map);
auto op_attrs = framework::AttrReader(op->Attrs());
auto dropout_prob = op_attrs.Get<float>("dropout_prob");
auto dropout_implementation =
op_attrs.Get<std::string>("dropout_implementation");
auto is_test = op_attrs.Get<bool>("is_test");
auto seed = op_attrs.Get<int>("seed");
auto fix_seed = op_attrs.Get<bool>("fix_seed");
float value = 1.0f - dropout_prob;
bool upscale_in_train = (dropout_implementation == "upscale_in_train");
if (is_test) {
if (upscale_in_train) {
platform::SetOutputNode(op, "Out", input, ngb_node_map);
} else {
auto mask_val = paddle::platform::CreateConstant(
input->get_element_type(), input->get_shape(), {value});
auto out = input * mask_val;
platform::SetOutputNode(op, "Out", out, ngb_node_map);
}
} else {
auto one = paddle::platform::CreateConstant(input->get_element_type(),
ngraph::Shape{}, {1});
auto gen_mask = std::make_shared<ngraph::op::GenerateMask>(
one, input->get_shape(), input->get_element_type(), seed, value,
fix_seed);
if (upscale_in_train) {
auto mask_val = paddle::platform::CreateConstant(
input->get_element_type(), input->get_shape(), {value});
auto out = value ? input * gen_mask / mask_val : input * gen_mask;
platform::SetOutputNode(op, "Mask", gen_mask, ngb_node_map);
platform::SetOutputNode(op, "Out", out, ngb_node_map);
} else {
auto out = input * gen_mask;
platform::SetOutputNode(op, "Mask", gen_mask, ngb_node_map);
platform::SetOutputNode(op, "Out", out, ngb_node_map);
}
}
}
static void BuildDropoutGradNode(
const std::shared_ptr<framework::OperatorBase>& op,
std::shared_ptr<
std::unordered_map<std::string, std::shared_ptr<ngraph::Node>>>
ngb_node_map) {
auto dy = platform::GetInputNode(op, "Out@GRAD", ngb_node_map);
auto mask = platform::GetInputNode(op, "Mask", ngb_node_map);
if (dy->get_element_type() != mask->get_element_type()) {
mask = std::make_shared<ngraph::op::Convert>(mask, dy->get_element_type());
}
auto op_attrs = framework::AttrReader(op->Attrs());
auto dropout_prob = op_attrs.Get<float>("dropout_prob");
auto dropout_implementation =
op_attrs.Get<std::string>("dropout_implementation");
auto dx = dy * mask;
if (dropout_implementation == "upscale_in_train") {
if (dropout_prob == 1.0f) {
dx = ElementwiseScalar<ngraph::op::Multiply>(0., dy);
} else {
dx =
ElementwiseScalar<ngraph::op::Multiply>(1. / (1. - dropout_prob), dx);
}
}
platform::SetOutputNode(op, "X@GRAD", dx, ngb_node_map);
}
} // namespace ngraphs
} // namespace operators
} // namespace paddle
REGISTER_NG_OP(dropout, BuildDropoutNode);
REGISTER_NG_OP(dropout_grad, BuildDropoutGradNode);
/*Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <memory>
#include <string>
#include <unordered_map>
#include <vector>
#include "ngraph/ngraph.hpp"
#include "paddle/fluid/operators/ngraph/ops/elementwise_node.h"
#include "paddle/fluid/operators/ngraph/ops/op_bridge.h"
#include "paddle/fluid/platform/ngraph_helper.h"
namespace paddle {
namespace operators {
namespace ngraphs {
void BuildElementwiseAddNode(
const std::shared_ptr<paddle::framework::OperatorBase>& op,
std::shared_ptr<
std::unordered_map<std::string, std::shared_ptr<ngraph::Node>>>
ngb_node_map) {
BuildElementwiseBinaryNode<ngraph::op::Add>(op, ngb_node_map);
}
void BuildElementwiseAddGradNode(
const std::shared_ptr<paddle::framework::OperatorBase>& op,
std::shared_ptr<
std::unordered_map<std::string, std::shared_ptr<ngraph::Node>>>
ngb_node_map) {
auto op_attrs = paddle::framework::AttrReader(op->Attrs());
int axis = op_attrs.Get<int>("axis");
auto dout = paddle::platform::GetInputNode(op, "Out@GRAD", ngb_node_map);
auto y = paddle::platform::GetInputNode(op, "Y", ngb_node_map);
auto dout_shape = dout->get_shape();
auto y_shape = y->get_shape();
if (dout_shape == y_shape) {
paddle::platform::SetOutputNode(op, "X@GRAD", dout, ngb_node_map);
paddle::platform::SetOutputNode(op, "Y@GRAD", dout, ngb_node_map);
} else {
axis = (axis == -1 ? dout_shape.size() - y_shape.size() : axis);
paddle::platform::TrimTrailingSingularDims(&y_shape);
axis = (y_shape.size() == 0 ? dout_shape.size() : axis);
int pre, n, post;
paddle::platform::GetMidDims(dout_shape, y_shape, axis, &pre, &n, &post);
ngraph::Shape lhs_shape{};
lhs_shape.push_back(pre);
lhs_shape.push_back(n);
if (post != 1) {
lhs_shape.push_back(post);
}
std::vector<size_t> lhs_order(dout_shape.size());
std::iota(std::begin(lhs_order), std::end(lhs_order), 0);
auto dout_reshape = std::make_shared<ngraph::op::Reshape>(
dout, ngraph::AxisVector(lhs_order), lhs_shape);
ngraph::AxisSet axis_set{0};
if (post != 1) {
axis_set.insert(2);
}
auto dout_sum = std::make_shared<ngraph::op::Sum>(dout_reshape, axis_set);
auto dy = std::make_shared<ngraph::op::Reshape>(
dout_sum, ngraph::AxisVector{0}, y->get_shape());
paddle::platform::SetOutputNode(op, "X@GRAD", dout, ngb_node_map);
paddle::platform::SetOutputNode(op, "Y@GRAD", dy, ngb_node_map);
}
}
} // namespace ngraphs
} // namespace operators
} // namespace paddle
REGISTER_NG_OP(elementwise_add, BuildElementwiseAddNode);
REGISTER_NG_OP(elementwise_add_grad, BuildElementwiseAddGradNode);
/*Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <memory>
#include <string>
#include <unordered_map>
#include <vector>
#include "ngraph/ngraph.hpp"
#include "paddle/fluid/platform/ngraph_helper.h"
namespace paddle {
namespace operators {
namespace ngraphs {
ngraph::NodeVector ElementwiseBinaryNodePrepare(
const std::shared_ptr<paddle::framework::OperatorBase>& op,
std::shared_ptr<
std::unordered_map<std::string, std::shared_ptr<ngraph::Node>>>
ngb_node_map) {
auto op_attrs = paddle::framework::AttrReader(op->Attrs());
int axis = op_attrs.Get<int>("axis");
auto lhs = paddle::platform::GetInputNode(op, "X", ngb_node_map);
auto rhs = paddle::platform::GetInputNode(op, "Y", ngb_node_map);
auto lhs_shape = lhs->get_shape();
auto rhs_shape = rhs->get_shape();
PADDLE_ENFORCE_GE(lhs_shape.size(), rhs_shape.size(),
"Rank of first input must >= rank of second input.");
if (lhs_shape == rhs_shape) {
return ngraph::NodeVector{lhs, rhs};
}
axis = (rhs_shape.size() == 0) ? lhs_shape.size() - 1 : axis;
axis = (axis == -1 ? lhs_shape.size() - rhs_shape.size() : axis);
PADDLE_ENFORCE(axis >= 0 && axis < (int)(lhs_shape.size()),
"Axis should be in range [0, lhs_shape)");
paddle::platform::TrimTrailingSingularDims(&rhs_shape);
int pre, n, post;
paddle::platform::GetMidDims(lhs_shape, rhs_shape, axis, &pre, &n, &post);
ngraph::Shape l_shape{};
l_shape.push_back(pre);
l_shape.push_back(n);
l_shape.push_back(post);
std::vector<size_t> rhs_order(rhs->get_shape().size());
std::iota(std::begin(rhs_order), std::end(rhs_order), 0);
ngraph::Shape r_shape{};
r_shape.push_back(n);
auto rhs_reshape = std::make_shared<ngraph::op::Reshape>(
rhs, ngraph::AxisVector(rhs_order), r_shape);
auto rhs_bcast = std::make_shared<ngraph::op::Broadcast>(
rhs_reshape, l_shape, ngraph::AxisSet{0, 2});
std::vector<size_t> bcast_order(rhs_bcast->get_shape().size());
std::iota(std::begin(bcast_order), std::end(bcast_order), 0);
std::shared_ptr<ngraph::Node> rhs_bcast_reshape =
std::make_shared<ngraph::op::Reshape>(
rhs_bcast, ngraph::AxisVector(bcast_order), lhs_shape);
return ngraph::NodeVector{lhs, rhs_bcast_reshape};
}
} // namespace ngraphs
} // namespace operators
} // namespace paddle
/*Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <memory>
#include <string>
#include <unordered_map>
#include <vector>
#include "ngraph/ngraph.hpp"
#include "paddle/fluid/operators/ngraph/ops/elementwise_node.h"
#include "paddle/fluid/operators/ngraph/ops/op_bridge.h"
#include "paddle/fluid/platform/ngraph_helper.h"
namespace paddle {
namespace operators {
namespace ngraphs {
void BuildElementwiseDivGradNode(
const std::shared_ptr<paddle::framework::OperatorBase>& op,
std::shared_ptr<
std::unordered_map<std::string, std::shared_ptr<ngraph::Node>>>
ngb_node_map) {
auto op_attrs = paddle::framework::AttrReader(op->Attrs());
int axis = op_attrs.Get<int>("axis");
auto dout = paddle::platform::GetInputNode(op, "Out@GRAD", ngb_node_map);
auto y = paddle::platform::GetInputNode(op, "Y", ngb_node_map);
auto out = paddle::platform::GetInputNode(op, "Out", ngb_node_map);
auto dout_shape = dout->get_shape();
auto y_shape = y->get_shape();
if (dout->get_element_type() != y->get_element_type()) {
y = std::make_shared<ngraph::op::Convert>(y, dout->get_element_type());
}
auto dy_hd = std::make_shared<ngraph::op::Multiply>(out, dout);
if (dout_shape == y_shape) {
auto dx = std::make_shared<ngraph::op::Divide>(dout, y);
auto dy = std::make_shared<ngraph::op::Divide>(dy_hd, -y);
paddle::platform::SetOutputNode(op, "X@GRAD", dx, ngb_node_map);
paddle::platform::SetOutputNode(op, "Y@GRAD", dy, ngb_node_map);
} else {
auto dy_hd_shape = dy_hd->get_shape();
axis = (axis == -1 ? dy_hd_shape.size() - y_shape.size() : axis);
paddle::platform::TrimTrailingSingularDims(&y_shape);
axis = (y_shape.size() == 0 ? dy_hd_shape.size() : axis);
int pre, n, post;
paddle::platform::GetMidDims(dy_hd_shape, y_shape, axis, &pre, &n, &post);
ngraph::Shape lhs_shape{};
lhs_shape.push_back(pre);
lhs_shape.push_back(n);
if (post != 1) {
lhs_shape.push_back(post);
}
std::vector<size_t> dy_order(dout_shape.size());
std::iota(std::begin(dy_order), std::end(dy_order), 0);
auto dy_hd_reshape = std::make_shared<ngraph::op::Reshape>(
dy_hd, ngraph::AxisVector(dy_order), lhs_shape);
ngraph::AxisSet axis_set{0};
if (post != 1) {
axis_set.insert(2);
}
auto dy_sum = std::make_shared<ngraph::op::Sum>(dy_hd_reshape, axis_set);
auto dy_sum_yshape = std::make_shared<ngraph::op::Reshape>(
dy_sum, ngraph::AxisVector{0}, y->get_shape());
auto dy_ = std::make_shared<ngraph::op::Divide>(dy_sum_yshape, -y);
paddle::platform::SetOutputNode(op, "Y@GRAD", dy_, ngb_node_map);
y_shape = y->get_shape();
std::vector<size_t> y_order(y_shape.size() == 0 ? 1 : y_shape.size());
std::iota(std::begin(y_order), std::end(y_order), 0);
auto y_reshape = std::make_shared<ngraph::op::Reshape>(
y, ngraph::AxisVector(y_order), ngraph::Shape{(size_t)n});
auto y_broadcast =
std::make_shared<ngraph::op::Broadcast>(y_reshape, lhs_shape, axis_set);
std::vector<size_t> lhs_order(lhs_shape.size());
std::iota(std::begin(lhs_order), std::end(lhs_order), 0);
auto y_broadcast_reshape = std::make_shared<ngraph::op::Reshape>(
y_broadcast, ngraph::AxisVector(lhs_order), dout_shape);
auto dx = std::make_shared<ngraph::op::Divide>(dout, y_broadcast_reshape);
paddle::platform::SetOutputNode(op, "X@GRAD", dx, ngb_node_map);
}
}
} // namespace ngraphs
} // namespace operators
} // namespace paddle
REGISTER_NG_OP(elementwise_div_grad, BuildElementwiseDivGradNode);
/*Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <memory>
#include <string>
#include <unordered_map>
#include <vector>
#include "ngraph/ngraph.hpp"
#include "paddle/fluid/operators/ngraph/ops/elementwise_node.h"
#include "paddle/fluid/operators/ngraph/ops/op_bridge.h"
#include "paddle/fluid/platform/ngraph_helper.h"
namespace paddle {
namespace operators {
namespace ngraphs {
void BuildElementwiseMulNode(
const std::shared_ptr<paddle::framework::OperatorBase>& op,
std::shared_ptr<
std::unordered_map<std::string, std::shared_ptr<ngraph::Node>>>
ngb_node_map) {
BuildElementwiseBinaryNode<ngraph::op::Multiply>(op, ngb_node_map);
}
void BuildElementwiseMulGradNode(
const std::shared_ptr<paddle::framework::OperatorBase>& op,
std::shared_ptr<
std::unordered_map<std::string, std::shared_ptr<ngraph::Node>>>
ngb_node_map) {
auto op_attrs = paddle::framework::AttrReader(op->Attrs());
int axis = op_attrs.Get<int>("axis");
auto dout = paddle::platform::GetInputNode(op, "Out@GRAD", ngb_node_map);
auto y = paddle::platform::GetInputNode(op, "Y", ngb_node_map);
auto x = paddle::platform::GetInputNode(op, "X", ngb_node_map);
auto dout_shape = dout->get_shape();
auto y_shape = y->get_shape();
auto x_shape = x->get_shape();
if (dout->get_element_type() != y->get_element_type()) {
y = std::make_shared<ngraph::op::Convert>(y, dout->get_element_type());
}
if (dout_shape == y_shape) {
auto dx = std::make_shared<ngraph::op::Multiply>(dout, y);
auto dy = std::make_shared<ngraph::op::Multiply>(dout, x);
paddle::platform::SetOutputNode(op, "X@GRAD", dx, ngb_node_map);
paddle::platform::SetOutputNode(op, "Y@GRAD", dy, ngb_node_map);
} else {
auto dy_hd = std::make_shared<ngraph::op::Multiply>(dout, x);
auto dy_hd_shape = dy_hd->get_shape();
axis = (axis == -1 ? dy_hd_shape.size() - y_shape.size() : axis);
paddle::platform::TrimTrailingSingularDims(&y_shape);
axis = (y_shape.size() == 0 ? dy_hd_shape.size() : axis);
int pre, n, post;
paddle::platform::GetMidDims(dy_hd_shape, y_shape, axis, &pre, &n, &post);
ngraph::Shape lhs_shape{};
lhs_shape.push_back(pre);
lhs_shape.push_back(n);
if (post != 1) {
lhs_shape.push_back(post);
}
std::vector<size_t> dy_order(dout_shape.size());
std::iota(std::begin(dy_order), std::end(dy_order), 0);
auto dy_hd_reshape = std::make_shared<ngraph::op::Reshape>(
dy_hd, ngraph::AxisVector(dy_order), lhs_shape);
ngraph::AxisSet axis_set{0};
if (post != 1) {
axis_set.insert(2);
}
auto dy_sum = std::make_shared<ngraph::op::Sum>(dy_hd_reshape, axis_set);
auto dy_sum_yshape = std::make_shared<ngraph::op::Reshape>(
dy_sum, ngraph::AxisVector{0}, y->get_shape());
paddle::platform::SetOutputNode(op, "Y@GRAD", dy_sum_yshape, ngb_node_map);
y_shape = y->get_shape();
std::vector<size_t> y_order(y_shape.size() == 0 ? 1 : y_shape.size());
std::iota(std::begin(y_order), std::end(y_order), 0);
auto y_reshape = std::make_shared<ngraph::op::Reshape>(
y, ngraph::AxisVector(y_order), ngraph::Shape{(size_t)n});
auto y_broadcast =
std::make_shared<ngraph::op::Broadcast>(y_reshape, lhs_shape, axis_set);
std::vector<size_t> lhs_order(lhs_shape.size());
std::iota(std::begin(lhs_order), std::end(lhs_order), 0);
auto y_broadcast_reshape = std::make_shared<ngraph::op::Reshape>(
y_broadcast, ngraph::AxisVector(lhs_order), dout_shape);
auto dx = std::make_shared<ngraph::op::Multiply>(y_broadcast_reshape, dout);
paddle::platform::SetOutputNode(op, "X@GRAD", dx, ngb_node_map);
}
}
} // namespace ngraphs
} // namespace operators
} // namespace paddle
REGISTER_NG_OP(elementwise_mul, BuildElementwiseMulNode);
REGISTER_NG_OP(elementwise_mul_grad, BuildElementwiseMulGradNode);
/* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <memory>
#include <string>
#include <unordered_map>
#include "ngraph/ngraph.hpp"
#include "paddle/fluid/operators/ngraph/ops/elementwise_binary_prepare_node.h"
#include "paddle/fluid/operators/ngraph/ops/op_bridge.h"
#include "paddle/fluid/platform/ngraph_helper.h"
namespace paddle {
namespace operators {
namespace ngraphs {
template <typename T>
void BuildElementwiseBinaryNode(
const std::shared_ptr<paddle::framework::OperatorBase>& op,
std::shared_ptr<
std::unordered_map<std::string, std::shared_ptr<ngraph::Node>>>
ngb_node_map) {
auto nodes = ElementwiseBinaryNodePrepare(op, ngb_node_map);
std::shared_ptr<ngraph::Node>& x = nodes.at(0);
std::shared_ptr<ngraph::Node>& y = nodes.at(1);
y = std::make_shared<ngraph::op::Convert>(y, x->get_element_type());
auto out = std::make_shared<T>(x, y);
paddle::platform::SetOutputNode(op, "Out", out, ngb_node_map);
}
template <typename T>
void BuildElementwiseCompareNode(
const std::shared_ptr<paddle::framework::OperatorBase>& op,
std::shared_ptr<
std::unordered_map<std::string, std::shared_ptr<ngraph::Node>>>
ngb_node_map) {
auto nodes = ElementwiseBinaryNodePrepare(op, ngb_node_map);
std::shared_ptr<ngraph::Node>& x = nodes.at(0);
std::shared_ptr<ngraph::Node>& y = nodes.at(1);
if (x->get_element_type() != y->get_element_type()) {
x = std::make_shared<ngraph::op::Convert>(x, ngraph::element::f64);
y = std::make_shared<ngraph::op::Convert>(y, ngraph::element::f64);
}
auto out = std::make_shared<T>(x, y);
paddle::platform::SetOutputNode(op, "Out", out, ngb_node_map);
}
} // namespace ngraphs
} // namespace operators
} // namespace paddle
REGISTER_NG_OP(elementwise_max,
BuildElementwiseBinaryNode<ngraph::op::Maximum>);
REGISTER_NG_OP(elementwise_pow, BuildElementwiseBinaryNode<ngraph::op::Power>);
REGISTER_NG_OP(elementwise_sub,
BuildElementwiseBinaryNode<ngraph::op::Subtract>);
REGISTER_NG_OP(elementwise_min,
BuildElementwiseBinaryNode<ngraph::op::Minimum>);
REGISTER_NG_OP(less_than, BuildElementwiseCompareNode<ngraph::op::Less>);
REGISTER_NG_OP(elementwise_div, BuildElementwiseBinaryNode<ngraph::op::Divide>);
/*Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <string>
#include "ngraph/ngraph.hpp"
#include "paddle/fluid/platform/ngraph_helper.h"
namespace paddle {
namespace operators {
namespace ngraphs {
template <typename T>
std::shared_ptr<ngraph::Node> ElementwiseScalar(
float scale, std::shared_ptr<ngraph::Node> node) {
auto node_shape = node->get_shape();
auto scale_const = ngraph::op::Constant::create(node->get_element_type(),
node_shape, {scale});
return std::make_shared<T>(scale_const, node);
}
template <typename T>
std::shared_ptr<ngraph::Node> ElementwiseScalar(
std::shared_ptr<ngraph::Node> scale_1d,
std::shared_ptr<ngraph::Node> node) {
auto scale_shape = scale_1d->get_shape();
PADDLE_ENFORCE_EQ(scale_shape.size(), 1, "Supporting 1d scale node");
PADDLE_ENFORCE_EQ(scale_shape.at(0), 1, "scale 1d in in shape {1}");
auto node_shape = node->get_shape();
ngraph::AxisSet axis_set;
for (size_t i = 0; i < node_shape.size(); ++i) {
axis_set.insert(i);
}
node_shape.push_back(1);
auto scale_bcast =
std::make_shared<ngraph::op::Broadcast>(scale_1d, node_shape, axis_set);
auto scale_reshape =
paddle::platform::NgReshaper(scale_bcast, node->get_shape());
return std::make_shared<T>(scale_reshape, node);
}
} // namespace ngraphs
} // namespace operators
} // namespace paddle
/*Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <memory>
#include <string>
#include <unordered_map>
#include <vector>
#include "ngraph/ngraph.hpp"
#include "paddle/fluid/operators/ngraph/ops/op_bridge.h"
#include "paddle/fluid/platform/ngraph_helper.h"
namespace paddle {
namespace operators {
namespace ngraphs {
void BuildFillConstantNode(
const std::shared_ptr<paddle::framework::OperatorBase>& op,
std::shared_ptr<
std::unordered_map<std::string, std::shared_ptr<ngraph::Node>>>
ngb_node_map) {
auto op_attrs = paddle::framework::AttrReader(op->Attrs());
auto vsp = op_attrs.Get<std::vector<int64_t>>("shape");
ngraph::Shape shape;
for (auto& sp : vsp) {
shape.push_back(sp);
}
float value = op_attrs.Get<float>("value");
auto ng_dtype =
platform::GetNgType(static_cast<paddle::framework::proto::VarType::Type>(
op_attrs.Get<int>("dtype")));
auto out = ngraph::op::Constant::create(ng_dtype, shape, {value});
paddle::platform::SetOutputNode(op, "Out", out, ngb_node_map);
}
} // namespace ngraphs
} // namespace operators
} // namespace paddle
REGISTER_NG_OP(fill_constant, BuildFillConstantNode);
/*Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <functional>
#include <memory>
#include <string>
#include <unordered_map>
#include "ngraph/ngraph.hpp"
#include "paddle/fluid/operators/ngraph/ops/op_bridge.h"
#include "paddle/fluid/platform/ngraph_helper.h"
namespace paddle {
namespace operators {
namespace ngraphs {
static void BuildFillZerosLikeNode(
const std::shared_ptr<framework::OperatorBase>& op,
std::shared_ptr<
std::unordered_map<std::string, std::shared_ptr<ngraph::Node>>>
ngb_node_map) {
auto x = platform::GetInputNode(op, "X", ngb_node_map);
auto out = paddle::platform::CreateConstant(x->get_element_type(),
x->get_shape(), {0});
platform::SetOutputNode(op, "Out", out, ngb_node_map);
}
} // namespace ngraphs
} // namespace operators
} // namespace paddle
REGISTER_NG_OP(fill_zeros_like, BuildFillZerosLikeNode);
/* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <functional>
#include <memory>
#include <string>
#include <unordered_map>
#include <vector>
#include "ngraph/ngraph.hpp"
#include "paddle/fluid/operators/ngraph/ops/op_bridge.h"
#include "paddle/fluid/platform/ngraph_helper.h"
namespace paddle {
namespace operators {
namespace ngraphs {
void BuildGatherNode(
const std::shared_ptr<paddle::framework::OperatorBase>& op,
std::shared_ptr<
std::unordered_map<std::string, std::shared_ptr<ngraph::Node>>>
ngb_node_map) {
auto x = platform::GetInputNode(op, "X", ngb_node_map);
PADDLE_ENFORCE_NOT_NULL(x);
auto index = platform::GetInputNode(op, "Index", ngb_node_map);
auto& index_shape = index->get_shape();
PADDLE_ENFORCE(index_shape.size() == 1 ||
(index_shape.size() == 2 && index_shape[1] == 1));
if (index_shape.size() == 2) {
index = platform::NgReshaper(index, ngraph::Shape{index_shape[0]});
}
auto out = std::make_shared<ngraph::op::Gather>(x, index);
paddle::platform::SetOutputNode(op, "Out", out, ngb_node_map);
}
void BuildGatherGradNode(
const std::shared_ptr<paddle::framework::OperatorBase>& op,
std::shared_ptr<
std::unordered_map<std::string, std::shared_ptr<ngraph::Node>>>
ngb_node_map) {
auto dout = platform::GetInputNode(op, "Out@GRAD", ngb_node_map);
PADDLE_ENFORCE_NOT_NULL(dout);
auto x = platform::GetInputNode(op, "X", ngb_node_map);
auto index = platform::GetInputNode(op, "Index", ngb_node_map);
auto& index_shape = index->get_shape();
PADDLE_ENFORCE(index_shape.size() == 1 ||
(index_shape.size() == 2 && index_shape[1] == 1));
if (index_shape.size() == 2) {
index = platform::NgReshaper(index, ngraph::Shape{index_shape[0]});
}
std::shared_ptr<ngraph::Node> x0 = paddle::platform::CreateConstant(
dout->get_element_type(), x->get_shape(), {0});
auto dx = std::make_shared<ngraph::op::ScatterAdd>(x0, index, dout);
paddle::platform::SetOutputNode(op, "X@GRAD", dx, ngb_node_map);
}
} // namespace ngraphs
} // namespace operators
} // namespace paddle
REGISTER_NG_OP(gather, BuildGatherNode);
REGISTER_NG_OP(gather_grad, BuildGatherGradNode);
/*Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <memory>
#include <string>
#include <unordered_map>
#include <vector>
#include "ngraph/ngraph.hpp"
#include "paddle/fluid/operators/ngraph/ops/elementwise_node.h"
#include "paddle/fluid/platform/ngraph_helper.h"
namespace paddle {
namespace operators {
namespace ngraphs {
void BuildIncrementNode(
const std::shared_ptr<paddle::framework::OperatorBase>& op,
std::shared_ptr<
std::unordered_map<std::string, std::shared_ptr<ngraph::Node>>>
ngb_node_map) {
auto x = paddle::platform::GetInputNode(op, "X", ngb_node_map);
auto op_attrs = paddle::framework::AttrReader(op->Attrs());
float step = op_attrs.Get<float>("step");
auto step_op = std::make_shared<ngraph::op::Constant>(
x->get_element_type(), x->get_shape(), std::vector<float>{step});
std::shared_ptr<ngraph::Node> out =
std::make_shared<ngraph::op::Add>(x, step_op);
paddle::platform::SetOutputNode(op, "Out", out, ngb_node_map);
}
} // namespace ngraphs
} // namespace operators
} // namespace paddle
REGISTER_NG_OP(increment, BuildIncrementNode);
/*Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <memory>
#include <string>
#include <unordered_map>
#include <vector>
#include "ngraph/ngraph.hpp"
#include "paddle/fluid/operators/ngraph/ops/op_bridge.h"
#include "paddle/fluid/platform/ngraph_helper.h"
namespace paddle {
namespace operators {
namespace ngraphs {
std::shared_ptr<ngraph::Node> reshape_reduction(
std::shared_ptr<ngraph::Node> node, const ngraph::Shape shape,
int begin_norm_axis) {
ngraph::Shape keepdims_shape(shape.begin(), shape.begin() + begin_norm_axis);
return paddle::platform::NgReshaper(node, keepdims_shape);
}
std::shared_ptr<ngraph::Node> broadcast_reduction(
std::shared_ptr<ngraph::Node> node, const ngraph::Shape shape,
int begin_norm_axis) {
ngraph::AxisSet axis_set;
for (size_t i = begin_norm_axis; i < shape.size(); ++i) axis_set.insert(i);
auto reshape = reshape_reduction(node, shape, begin_norm_axis);
return std::make_shared<ngraph::op::Broadcast>(reshape, shape, axis_set);
}
std::shared_ptr<ngraph::Node> reshape_bias_scale(
std::shared_ptr<ngraph::Node> node, const ngraph::Shape shape,
int begin_norm_axis) {
ngraph::Shape keepdims_shape(shape.begin() + begin_norm_axis, shape.end());
return paddle::platform::NgReshaper(node, keepdims_shape);
}
std::shared_ptr<ngraph::Node> broadcast_bias_scale(
std::shared_ptr<ngraph::Node> node, const ngraph::Shape shape,
int begin_norm_axis) {
auto reshape = reshape_bias_scale(node, shape, begin_norm_axis);
ngraph::AxisSet axis_set;
for (int i = 0; i < begin_norm_axis; ++i) axis_set.insert(i);
return std::make_shared<ngraph::op::Broadcast>(reshape, shape, axis_set);
}
std::shared_ptr<ngraph::Node> flatten(const std::shared_ptr<ngraph::Node>& node,
bool insert_leading_one = false) {
size_t out = 1;
for (auto s : node->get_shape()) out *= s;
if (insert_leading_one) {
return paddle::platform::NgReshaper(node, ngraph::Shape{1, out});
} else {
return paddle::platform::NgReshaper(node, ngraph::Shape{out});
}
}
static void BuildLayerNormNode(
const std::shared_ptr<paddle::framework::OperatorBase>& op,
std::shared_ptr<
std::unordered_map<std::string, std::shared_ptr<ngraph::Node>>>
ngb_node_map) {
auto op_attrs = paddle::framework::AttrReader(op->Attrs());
const auto begin_norm_axis = op_attrs.Get<int>("begin_norm_axis");
const auto epsilon = op_attrs.Get<float>("epsilon");
auto x = paddle::platform::GetInputNode(op, "X", ngb_node_map);
auto scale = paddle::platform::GetInputNode(op, "Scale", ngb_node_map);
auto bias = paddle::platform::GetInputNode(op, "Bias", ngb_node_map);
auto shape = x->get_shape();
std::vector<size_t> reduction_axes(shape.size() - begin_norm_axis);
std::iota(reduction_axes.begin(), reduction_axes.end(), begin_norm_axis);
auto mean = ngraph::builder::mean(x, reduction_axes);
auto broadcast_mean = broadcast_reduction(mean, shape, begin_norm_axis);
auto delta = x - broadcast_mean;
auto variance = ngraph::builder::mean(delta * delta, reduction_axes);
auto eps = paddle::platform::CreateConstant(variance->get_element_type(),
variance->get_shape(), {epsilon});
auto stddev = std::make_shared<ngraph::op::Sqrt>(variance + eps);
auto broadcast_stddev = broadcast_reduction(stddev, shape, begin_norm_axis);
auto norm = delta / broadcast_stddev;
if (scale) {
auto broadcast_scale = broadcast_bias_scale(scale, shape, begin_norm_axis);
norm = norm * broadcast_scale;
}
if (bias) {
auto broadcast_bias = broadcast_bias_scale(bias, shape, begin_norm_axis);
norm = norm + broadcast_bias;
}
mean = flatten(mean);
variance = flatten(variance);
paddle::platform::SetOutputNode(op, "Y", norm, ngb_node_map);
paddle::platform::SetOutputNode(op, "Mean", mean, ngb_node_map);
paddle::platform::SetOutputNode(op, "Variance", variance, ngb_node_map);
}
static void BuildLayerNormGradNode(
const std::shared_ptr<paddle::framework::OperatorBase>& op,
std::shared_ptr<
std::unordered_map<std::string, std::shared_ptr<ngraph::Node>>>
ngb_node_map) {
auto op_attrs = paddle::framework::AttrReader(op->Attrs());
const auto begin_norm_axis = op_attrs.Get<int>("begin_norm_axis");
const auto epsilon = op_attrs.Get<float>("epsilon");
auto x = paddle::platform::GetInputNode(op, "X", ngb_node_map);
auto mean = paddle::platform::GetInputNode(op, "Mean", ngb_node_map);
auto variance = paddle::platform::GetInputNode(op, "Variance", ngb_node_map);
auto scale = paddle::platform::GetInputNode(op, "Scale", ngb_node_map);
auto dy = paddle::platform::GetInputNode(op, framework::GradVarName("Y"),
ngb_node_map);
auto dx = paddle::platform::GetOutputNode(op, framework::GradVarName("X"),
ngb_node_map);
auto dscale = paddle::platform::GetOutputNode(
op, framework::GradVarName("Scale"), ngb_node_map);
auto dbias = paddle::platform::GetOutputNode(
op, framework::GradVarName("Bias"), ngb_node_map);
auto shape = x->get_shape();
auto broadcast_mean = broadcast_reduction(mean, shape, begin_norm_axis);
auto delta = x - broadcast_mean;
auto eps = paddle::platform::CreateConstant(variance->get_element_type(),
variance->get_shape(), {epsilon});
auto stddev = std::make_shared<ngraph::op::Sqrt>(variance + eps);
auto broadcast_stddev = broadcast_reduction(stddev, shape, begin_norm_axis);
auto norm = delta / broadcast_stddev;
if (dbias) {
std::vector<size_t> reduction_axes(begin_norm_axis);
std::iota(reduction_axes.begin(), reduction_axes.end(), 0);
auto sum_dy = std::make_shared<ngraph::op::Sum>(dy, reduction_axes);
paddle::platform::SetOutputNode(op, framework::GradVarName("Bias"),
flatten(sum_dy), ngb_node_map);
}
if (dscale) {
std::vector<size_t> reduction_axes(begin_norm_axis);
std::iota(reduction_axes.begin(), reduction_axes.end(), 0);
auto sum_dy = std::make_shared<ngraph::op::Sum>(dy * norm, reduction_axes);
paddle::platform::SetOutputNode(op, framework::GradVarName("Scale"),
flatten(sum_dy), ngb_node_map);
}
if (dx) {
std::shared_ptr<ngraph::Node> dx_end = dy / broadcast_stddev;
if (dscale)
dx_end = dx_end * broadcast_bias_scale(scale, shape, begin_norm_axis);
std::vector<size_t> reduction_axes(shape.size() - begin_norm_axis);
std::iota(reduction_axes.begin(), reduction_axes.end(), begin_norm_axis);
auto dx_mean = broadcast_reduction(
ngraph::builder::mean(-dx_end, reduction_axes), shape, begin_norm_axis);
auto dx_std =
norm * broadcast_reduction(
ngraph::builder::mean(-dx_end * norm, reduction_axes), shape,
begin_norm_axis);
paddle::platform::SetOutputNode(op, framework::GradVarName("X"),
dx_end + dx_mean + dx_std, ngb_node_map);
}
}
REGISTER_NG_OP(layer_norm, BuildLayerNormNode);
REGISTER_NG_OP(layer_norm_grad, BuildLayerNormGradNode);
} // namespace ngraphs
} // namespace operators
} // namespace paddle
/*Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <memory>
#include <string>
#include <unordered_map>
#include <vector>
#include "ngraph/ngraph.hpp"
#include "ngraph/op/embedding_lookup.hpp"
#include "paddle/fluid/operators/lookup_table_op.h"
#include "paddle/fluid/operators/ngraph/ops/op_bridge.h"
#include "paddle/fluid/platform/ngraph_helper.h"
namespace paddle {
namespace operators {
namespace ngraphs {
void BuildLookupTableNode(
const std::shared_ptr<framework::OperatorBase>& op,
std::shared_ptr<
std::unordered_map<std::string, std::shared_ptr<ngraph::Node>>>
ngb_node_map) {
auto op_attrs = paddle::framework::AttrReader(op->Attrs());
const bool is_sparse = op_attrs.Get<bool>("is_sparse");
const int64_t padding_idx = op_attrs.Get<int64_t>("padding_idx");
auto ng_ids = paddle::platform::GetInputNode(op, "Ids", ngb_node_map);
PADDLE_ENFORCE_NOT_NULL(ng_ids);
const auto ng_w = paddle::platform::GetInputNode(op, "W", ngb_node_map);
PADDLE_ENFORCE_NOT_NULL(ng_w);
if (is_sparse) {
PADDLE_THROW("Sparsity is not yet supported in nGraph lookup_table op.");
}
auto ng_w_mask = ng_w;
if (padding_idx != kNoPadding) {
auto w_shape = ng_w->get_shape();
std::vector<int> maskV(w_shape[0], 1);
maskV[padding_idx] = 0;
auto maskV_node = std::make_shared<ngraph::op::Constant>(
ng_w->get_element_type(), ngraph::Shape{w_shape[0]}, maskV);
ngraph::AxisSet axis_set;
for (unsigned int i = 1; i < w_shape.size(); ++i) axis_set.insert(i);
auto maskV_bd =
std::make_shared<ngraph::op::Broadcast>(maskV_node, w_shape, axis_set);
ng_w_mask = std::make_shared<ngraph::op::Multiply>(ng_w, maskV_bd);
}
auto shape = ng_ids->get_shape();
if (shape.back() == 1) {
shape.pop_back();
ng_ids = platform::NgReshaper(ng_ids, shape);
}
auto ng_lookup = std::make_shared<ngraph::op::Gather>(ng_w_mask, ng_ids);
platform::SetOutputNode(op, "Out", ng_lookup, ngb_node_map);
}
void BuildLookupTableGradNode(
const std::shared_ptr<framework::OperatorBase>& op,
std::shared_ptr<
std::unordered_map<std::string, std::shared_ptr<ngraph::Node>>>
ngb_node_map) {
auto op_attrs = paddle::framework::AttrReader(op->Attrs());
const bool is_sparse = op_attrs.Get<bool>("is_sparse");
auto ng_ids = paddle::platform::GetInputNode(op, "Ids", ngb_node_map);
PADDLE_ENFORCE_NOT_NULL(ng_ids);
const auto ng_w = paddle::platform::GetInputNode(op, "W", ngb_node_map);
PADDLE_ENFORCE_NOT_NULL(ng_w);
auto dout = paddle::platform::GetInputNode(op, "Out@GRAD", ngb_node_map);
if (is_sparse) {
PADDLE_THROW("Sparsity is not yet supported in nGraph lookup_table op.");
}
auto shape = ng_ids->get_shape();
if (shape.back() == 1) {
shape.pop_back();
ng_ids = platform::NgReshaper(ng_ids, shape);
}
std::shared_ptr<ngraph::Node> W0 = paddle::platform::CreateConstant(
dout->get_element_type(), ng_w->get_shape(), {0});
auto dW = std::make_shared<ngraph::op::ScatterAdd>(W0, ng_ids, dout);
platform::SetOutputNode(op, "W@GRAD", dW, ngb_node_map);
}
} // namespace ngraphs
} // namespace operators
} // namespace paddle
REGISTER_NG_OP(lookup_table, BuildLookupTableNode);
REGISTER_NG_OP(lookup_table_grad, BuildLookupTableGradNode);
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <functional>
#include <memory>
#include <string>
#include <unordered_map>
#include "ngraph/ngraph.hpp"
#include "paddle/fluid/operators/ngraph/ops/op_bridge.h"
#include "paddle/fluid/platform/ngraph_helper.h"
namespace paddle {
namespace operators {
namespace ngraphs {
static void BuildLrnNode(
const std::shared_ptr<framework::OperatorBase>& op,
std::shared_ptr<
std::unordered_map<std::string, std::shared_ptr<ngraph::Node>>>
ngb_node_map) {
auto input = platform::GetInputNode(op, "X", ngb_node_map);
auto op_attrs = framework::AttrReader(op->Attrs());
const int n = op_attrs.Get<int>("n");
const float alpha = op_attrs.Get<float>("alpha") * static_cast<float>(n);
const float beta = op_attrs.Get<float>("beta");
const float k = op_attrs.Get<float>("k");
auto lrn_out = std::make_shared<ngraph::op::LRN>(input, alpha, beta, k, n);
std::shared_ptr<ngraph::Node> mid_out = paddle::platform::CreateConstant(
input->get_element_type(), input->get_shape(), {k});
platform::SetOutputNode(op, "MidOut", mid_out, ngb_node_map);
platform::SetOutputNode(op, "Out", lrn_out, ngb_node_map);
}
} // namespace ngraphs
} // namespace operators
} // namespace paddle
REGISTER_NG_OP(lrn, BuildLrnNode);
/*Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <memory>
#include <string>
#include <unordered_map>
#include <vector>
#include "ngraph/ngraph.hpp"
#include "paddle/fluid/operators/ngraph/ops/elementwise_scalar_op.h"
#include "paddle/fluid/operators/ngraph/ops/op_bridge.h"
#include "paddle/fluid/platform/ngraph_helper.h"
namespace paddle {
namespace operators {
namespace ngraphs {
std::shared_ptr<ngraph::Node> transposeAndFlat3D(
const std::shared_ptr<ngraph::Node>& input, const bool transpose,
bool x = true) {
auto shape = input->get_shape();
size_t n = shape.size();
std::shared_ptr<ngraph::Node> output;
if (n >= 3) {
std::vector<size_t> order(n);
std::iota(std::begin(order), std::end(order), 0);
size_t outer = 1;
for (size_t i = 0; i < n - 2; i++) {
outer = outer * shape[i];
}
std::vector<size_t> reshape{outer, shape[n - 2], shape[n - 1]};
if (transpose == true) {
order[n - 2] = n - 1;
order[n - 1] = n - 2;
reshape[2] = shape[n - 2];
reshape[1] = shape[n - 1];
}
output = std::make_shared<ngraph::op::Reshape>(
input, ngraph::AxisVector(order), ngraph::Shape(reshape));
} else {
std::shared_ptr<ngraph::Node> temp;
if (n == 1 && x == true) {
temp = std::make_shared<ngraph::op::Reshape>(input, ngraph::AxisVector{0},
ngraph::Shape{1, shape[0]});
} else if (n == 1 && x == false) {
temp = std::make_shared<ngraph::op::Reshape>(input, ngraph::AxisVector{0},
ngraph::Shape{shape[0], 1});
} else {
temp = input;
}
auto temp_shape = temp->get_shape();
if (transpose == true) {
output = std::make_shared<ngraph::op::Reshape>(
temp, ngraph::AxisVector{1, 0},
ngraph::Shape{temp_shape[1], temp_shape[0]});
} else {
output = temp;
}
}
return output;
}
std::shared_ptr<ngraph::Node> broadcast3D(
const std::shared_ptr<ngraph::Node>& input, size_t axis0) {
auto shape = input->get_shape();
size_t n = shape.size();
if (n == 2) {
auto output = std::make_shared<ngraph::op::Broadcast>(
input, ngraph::Shape{axis0, shape[0], shape[1]}, ngraph::AxisSet{0});
return output;
}
return input;
}
std::shared_ptr<ngraph::Node> dotOp(const std::shared_ptr<ngraph::Node>& a,
const std::shared_ptr<ngraph::Node>& b) {
std::shared_ptr<ngraph::Node> out;
auto a_shape = a->get_shape();
auto na = a_shape.size();
auto b_shape = b->get_shape();
auto nb = b_shape.size();
if (na > 2 && nb > 2) {
out = std::make_shared<ngraph::op::BatchMatMul>(a, b);
} else {
out = std::make_shared<ngraph::op::Dot>(a, b);
}
return out;
}
std::shared_ptr<ngraph::Node> reshapeToOriginal(
std::shared_ptr<ngraph::Node> input, const ngraph::Shape& shape) {
auto input_shape = input->get_shape();
std::vector<size_t> axis(input_shape.size());
std::iota(axis.begin(), axis.end(), 0);
auto out = std::make_shared<ngraph::op::Reshape>(input, axis, shape);
return out;
}
void BuildMatMulNode(
const std::shared_ptr<paddle::framework::OperatorBase>& op,
std::shared_ptr<
std::unordered_map<std::string, std::shared_ptr<ngraph::Node>>>
ngb_node_map) {
auto x = paddle::platform::GetInputNode(op, "X", ngb_node_map);
auto y = paddle::platform::GetInputNode(op, "Y", ngb_node_map);
auto op_attrs = paddle::framework::AttrReader(op->Attrs());
bool transpose_x = op_attrs.Get<bool>("transpose_X");
bool transpose_y = op_attrs.Get<bool>("transpose_Y");
float alpha = op_attrs.Get<float>("alpha");
std::shared_ptr<ngraph::Node> out;
auto x_shape = x->get_shape();
auto y_shape = y->get_shape();
size_t nx = x_shape.size();
size_t ny = y_shape.size();
x = transposeAndFlat3D(x, transpose_x, true);
y = transposeAndFlat3D(y, transpose_y, false);
auto y_shape3 = y->get_shape();
auto x_shape3 = x->get_shape();
if (nx > 2 || ny > 2) {
ngraph::Shape out_shape = x_shape;
if (nx != 3) {
x = broadcast3D(x, y_shape3[0]);
out_shape = y_shape;
}
if (ny != 3) {
y = broadcast3D(y, x_shape3[0]);
out_shape = x_shape;
}
auto nout = out_shape.size();
auto out3 = std::make_shared<ngraph::op::BatchMatMul>(x, y);
auto out3_shape = out3->get_shape();
out_shape[nout - 1] = out3_shape[2];
out_shape[nout - 2] = out3_shape[1];
out = std::make_shared<ngraph::op::Reshape>(
out3, ngraph::AxisVector{0, 1, 2}, out_shape);
} else {
out = std::make_shared<ngraph::op::Dot>(x, y);
}
auto out_shape = out->get_shape();
std::vector<size_t> axis(out_shape.size());
std::iota(axis.begin(), axis.end(), 0);
for (size_t i = out_shape.size() - 1; i > 0; i--) {
if (out_shape[i] == 1) {
out_shape.erase(out_shape.begin() + i);
}
}
auto out_ = std::make_shared<ngraph::op::Reshape>(
out, ngraph::AxisVector(axis), out_shape);
auto out_alpha = ElementwiseScalar<ngraph::op::Multiply>(alpha, out_);
paddle::platform::SetOutputNode(op, "Out", out_alpha, ngb_node_map);
}
void BuildMatMulGradNode(
const std::shared_ptr<paddle::framework::OperatorBase>& op,
std::shared_ptr<
std::unordered_map<std::string, std::shared_ptr<ngraph::Node>>>
ngb_node_map) {
auto op_attrs = paddle::framework::AttrReader(op->Attrs());
auto dout = paddle::platform::GetInputNode(op, "Out@GRAD", ngb_node_map);
auto y = paddle::platform::GetInputNode(op, "Y", ngb_node_map);
auto x = paddle::platform::GetInputNode(op, "X", ngb_node_map);
bool is_dx = paddle::platform::HasOutput(op, "X@GRAD") ? true : false;
bool is_dy = paddle::platform::HasOutput(op, "Y@GRAD") ? true : false;
bool transpose_x = op_attrs.Get<bool>("transpose_X");
bool transpose_y = op_attrs.Get<bool>("transpose_Y");
float alpha = op_attrs.Get<float>("alpha");
auto dout_shape = dout->get_shape();
auto x_shape = x->get_shape();
auto y_shape = y->get_shape();
size_t nx = x_shape.size();
size_t ny = y_shape.size();
size_t ndout = dout_shape.size();
std::shared_ptr<ngraph::Node> x2, y2;
std::shared_ptr<ngraph::Node> dout2;
x2 = transposeAndFlat3D(x, false);
y2 = transposeAndFlat3D(y, false, false);
dout2 = transposeAndFlat3D(dout, false);
auto x2_shape = x2->get_shape();
auto y2_shape = y2->get_shape();
if (nx >= 3 || ny >= 3) {
std::shared_ptr<ngraph::Node> dout_temp;
if (ndout == 2) {
dout_temp = std::make_shared<ngraph::op::Reshape>(
dout, ngraph::AxisVector{0, 1},
ngraph::Shape{dout_shape[0], dout_shape[1], 1});
if (ny < 3) {
dout2 = dout_temp;
} else {
dout2 = transposeAndFlat3D(dout_temp, true);
}
}
x2 = broadcast3D(x2, y_shape[0]);
y2 = broadcast3D(y2, x_shape[0]);
} else {
dout2 = transposeAndFlat3D(dout, false, nx == 1 && transpose_x == false);
}
if (transpose_y == false) {
y2 = transposeAndFlat3D(y2, true);
}
if (transpose_x == false) {
x2 = transposeAndFlat3D(x2, true);
}
auto dx = dotOp(dout2, y2);
auto dy = dotOp(x2, dout2);
if (transpose_x == true) {
dx = transposeAndFlat3D(dx, true);
}
if (transpose_y == true) {
dy = transposeAndFlat3D(dy, true);
}
if (nx < 3 && ny >= 3) {
dx = std::make_shared<ngraph::op::Sum>(dx, ngraph::AxisSet{0});
}
if (ny < 3 && nx >= 3) {
dy = std::make_shared<ngraph::op::Sum>(dy, ngraph::AxisSet{0});
}
auto dx_t = reshapeToOriginal(dx, x_shape);
auto dy_t = reshapeToOriginal(dy, y_shape);
auto dx_scale = ElementwiseScalar<ngraph::op::Multiply>(1 / alpha, dx_t);
auto dy_scale = ElementwiseScalar<ngraph::op::Multiply>(1 / alpha, dy_t);
if (is_dx)
paddle::platform::SetOutputNode(op, "X@GRAD", dx_scale, ngb_node_map);
if (is_dy)
paddle::platform::SetOutputNode(op, "Y@GRAD", dy_scale, ngb_node_map);
}
} // namespace ngraphs
} // namespace operators
} // namespace paddle
REGISTER_NG_OP(matmul, BuildMatMulNode);
REGISTER_NG_OP(matmul_grad, BuildMatMulGradNode);
/*Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <functional>
#include <memory>
#include <string>
#include <unordered_map>
#include "ngraph/ngraph.hpp"
#include "paddle/fluid/operators/ngraph/ops/elementwise_scalar_op.h"
#include "paddle/fluid/operators/ngraph/ops/op_bridge.h"
#include "paddle/fluid/platform/ngraph_helper.h"
namespace paddle {
namespace operators {
namespace ngraphs {
void BuildMeanNode(
const std::shared_ptr<paddle::framework::OperatorBase>& op,
std::shared_ptr<
std::unordered_map<std::string, std::shared_ptr<ngraph::Node>>>
ngb_node_map) {
auto input = paddle::platform::GetInputNode(op, "X", ngb_node_map);
ngraph::AxisSet axes;
for (size_t i = 0; i < input->get_shape().size(); ++i) {
axes.insert(i);
}
auto mean = ngraph::builder::mean(input, axes);
auto mean_1d = std::make_shared<ngraph::op::Reshape>(
mean, ngraph::AxisVector{}, ngraph::Shape{1});
paddle::platform::SetOutputNode(op, "Out", mean_1d, ngb_node_map);
}
void BuildMeanGradNode(
const std::shared_ptr<paddle::framework::OperatorBase>& op,
std::shared_ptr<
std::unordered_map<std::string, std::shared_ptr<ngraph::Node>>>
ngb_node_map) {
auto x = paddle::platform::GetInputNode(op, "X", ngb_node_map);
auto og = paddle::platform::GetInputNode(op, "Out@GRAD", ngb_node_map);
auto x_shape = x->get_shape();
float x_size = std::accumulate(std::begin(x_shape), std::end(x_shape), 1,
std::multiplies<float>());
auto node_const = ngraph::op::Constant::create(og->get_element_type(),
ngraph::Shape{1}, {x_size});
auto node_div = std::make_shared<ngraph::op::Divide>(og, node_const);
auto result = ElementwiseScalar<ngraph::op::Add>(
og / node_const,
ngraph::op::Constant::create(og->get_element_type(), x_shape, {0}));
paddle::platform::SetOutputNode(op, "X@GRAD", result, ngb_node_map);
}
} // namespace ngraphs
} // namespace operators
} // namespace paddle
REGISTER_NG_OP(mean, BuildMeanNode);
REGISTER_NG_OP(mean_grad, BuildMeanGradNode);
/*Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <memory>
#include <string>
#include <unordered_map>
#include <vector>
#include "ngraph/ngraph.hpp"
#include "paddle/fluid/operators/ngraph/ops/op_bridge.h"
#include "paddle/fluid/platform/ngraph_helper.h"
namespace paddle {
namespace operators {
namespace ngraphs {
void BuildMomentumNode(
const std::shared_ptr<paddle::framework::OperatorBase>& op,
std::shared_ptr<
std::unordered_map<std::string, std::shared_ptr<ngraph::Node>>>
ngb_node_map) {
auto op_attrs = paddle::framework::AttrReader(op->Attrs());
auto param = paddle::platform::GetInputNode(op, "Param", ngb_node_map);
auto grad = paddle::platform::GetInputNode(op, "Grad", ngb_node_map);
auto velocity = paddle::platform::GetInputNode(op, "Velocity", ngb_node_map);
auto learning_rate =
paddle::platform::GetInputNode(op, "LearningRate", ngb_node_map);
auto mu = op_attrs.Get<float>("mu");
bool use_nesterov = op_attrs.Get<bool>("use_nesterov");
auto param_shape = param->get_shape();
auto velocity_shape = velocity->get_shape();
auto grad_shape = grad->get_shape();
auto lr_shape = learning_rate->get_shape();
auto shape_velocity = ngraph::Shape{velocity_shape};
auto mu_create =
ngraph::op::Constant::create(ngraph::element::f32, shape_velocity, {mu});
auto vel_mul = std::make_shared<ngraph::op::Multiply>(velocity, mu_create);
auto vel_out = std::make_shared<ngraph::op::Add>(vel_mul, grad);
ngraph::NodeVector result;
if (use_nesterov) {
auto mul_res = std::make_shared<ngraph::op::Multiply>(vel_out, mu_create);
auto add_res = std::make_shared<ngraph::op::Add>(grad, mul_res);
auto add_2d = paddle::platform::FlattenTo2d(add_res->get_shape(), 0);
auto vel_reshape = paddle::platform::NgReshaper(vel_out, add_2d);
auto lr_bcast = std::make_shared<ngraph::op::Broadcast>(
learning_rate, vel_reshape->get_shape(),
ngraph::AxisSet{vel_reshape->get_shape().size() - 1});
auto lr_1d = paddle::platform::FlattenTo1d(lr_bcast->get_shape(), 0);
auto lr_reshape = std::make_shared<ngraph::op::Reshape>(
lr_bcast, ngraph::AxisVector{0, 1}, lr_1d);
lr_reshape = std::make_shared<ngraph::op::Reshape>(
lr_reshape, ngraph::AxisVector{0}, param->get_shape());
auto mul_res1 = std::make_shared<ngraph::op::Multiply>(add_res, lr_reshape);
auto res = std::make_shared<ngraph::op::Subtract>(param, mul_res1);
paddle::platform::SetOutputNode(op, "ParamOut", res, ngb_node_map);
} else {
auto vel_2d = paddle::platform::FlattenTo2d(vel_out->get_shape(), 0);
auto vel_reshape = paddle::platform::NgReshaper(vel_out, vel_2d);
auto lr_bcast = std::make_shared<ngraph::op::Broadcast>(
learning_rate, vel_reshape->get_shape(),
ngraph::AxisSet{vel_reshape->get_shape().size() - 1});
auto lr_1d = paddle::platform::FlattenTo1d(lr_bcast->get_shape(), 0);
auto lr_reshape = std::make_shared<ngraph::op::Reshape>(
lr_bcast, ngraph::AxisVector{0, 1}, lr_1d);
lr_reshape = std::make_shared<ngraph::op::Reshape>(
lr_reshape, ngraph::AxisVector{0}, param->get_shape());
auto mul_result =
std::make_shared<ngraph::op::Multiply>(lr_reshape, vel_out);
auto res = std::make_shared<ngraph::op::Subtract>(param, mul_result);
paddle::platform::SetOutputNode(op, "ParamOut", res, ngb_node_map);
}
paddle::platform::SetOutputNode(op, "VelocityOut", vel_out, ngb_node_map);
}
} // namespace ngraphs
} // namespace operators
} // namespace paddle
REGISTER_NG_OP(momentum, BuildMomentumNode);
/*Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <memory>
#include <string>
#include <unordered_map>
#include "ngraph/ngraph.hpp"
#include "paddle/fluid/operators/ngraph/ops/op_bridge.h"
#include "paddle/fluid/platform/ngraph_helper.h"
namespace paddle {
namespace operators {
namespace ngraphs {
static void BuildMulNode(
const std::shared_ptr<paddle::framework::OperatorBase>& op,
std::shared_ptr<
std::unordered_map<std::string, std::shared_ptr<ngraph::Node>>>
ngb_node_map) {
auto op_attrs = paddle::framework::AttrReader(op->Attrs());
int x_num_col_dims = op_attrs.Get<int>("x_num_col_dims");
int y_num_col_dims = op_attrs.Get<int>("y_num_col_dims");
auto x = paddle::platform::GetInputNode(op, "X", ngb_node_map);
auto y = paddle::platform::GetInputNode(op, "Y", ngb_node_map);
int y_rank = y->get_shape().size();
auto x_reshape = x;
auto y_reshape = y;
if (x->get_shape().size() > 2) {
auto x_2d = paddle::platform::FlattenTo2d(x->get_shape(), x_num_col_dims);
x_reshape = paddle::platform::NgReshaper(x, x_2d);
}
if (y->get_shape().size() > 2) {
auto y_2d = paddle::platform::FlattenTo2d(y->get_shape(), y_num_col_dims);
y_reshape = paddle::platform::NgReshaper(y, y_2d);
}
std::shared_ptr<ngraph::Node> out =
std::make_shared<ngraph::op::Dot>(x_reshape, y_reshape);
ngraph::Shape out_shape;
for (int i = 0; i < x_num_col_dims; ++i) {
out_shape.push_back(x->get_shape()[i]);
}
for (int i = y_num_col_dims; i < y_rank; ++i) {
out_shape.push_back(y->get_shape()[i]);
}
out = paddle::platform::NgReshaper(out, out_shape);
paddle::platform::SetOutputNode(op, "Out", out, ngb_node_map);
}
static void BuildMulGradNode(
const std::shared_ptr<paddle::framework::OperatorBase>& op,
std::shared_ptr<
std::unordered_map<std::string, std::shared_ptr<ngraph::Node>>>
ngb_node_map) {
auto op_attrs = paddle::framework::AttrReader(op->Attrs());
int x_num_col_dims = op_attrs.Get<int>("x_num_col_dims");
int y_num_col_dims = op_attrs.Get<int>("y_num_col_dims");
auto x = paddle::platform::GetInputNode(op, "X", ngb_node_map);
auto y = paddle::platform::GetInputNode(op, "Y", ngb_node_map);
auto dout = paddle::platform::GetInputNode(op, "Out@GRAD", ngb_node_map);
bool is_dx = paddle::platform::HasOutput(op, "X@GRAD") ? true : false;
bool is_dy = paddle::platform::HasOutput(op, "Y@GRAD") ? true : false;
auto x_shape = x->get_shape();
auto y_shape = y->get_shape();
auto x_reshape = x;
auto y_reshape = y;
if (x_shape.size() > 2) {
auto x_2d_shape = paddle::platform::FlattenTo2d(x_shape, x_num_col_dims);
x_reshape = paddle::platform::NgReshaper(x, x_2d_shape);
}
if (y_shape.size() > 2) {
auto y_2d_shape = paddle::platform::FlattenTo2d(y_shape, y_num_col_dims);
y_reshape = paddle::platform::NgReshaper(y, y_2d_shape);
}
auto x_reshape_shape = x_reshape->get_shape();
std::reverse(x_reshape_shape.begin(), x_reshape_shape.end());
auto x_transpose = std::make_shared<ngraph::op::Reshape>(
x_reshape, ngraph::AxisVector{1, 0}, x_reshape_shape);
auto y_reshape_shape = y_reshape->get_shape();
std::reverse(y_reshape_shape.begin(), y_reshape_shape.end());
auto y_transpose = std::make_shared<ngraph::op::Reshape>(
y_reshape, ngraph::AxisVector{1, 0}, y_reshape_shape);
if (is_dx) {
if (dout->get_shape().size() > 2) {
auto dout_2d_shape = paddle::platform::FlattenTo2d(dout->get_shape(), 2);
dout = paddle::platform::NgReshaper(dout, dout_2d_shape);
}
auto dx = std::make_shared<ngraph::op::Dot>(dout, y_transpose);
if (dx->get_shape() == x_shape) {
paddle::platform::SetOutputNode(op, "X@GRAD", dx, ngb_node_map);
} else {
auto dx_reshape = paddle::platform::NgReshaper(dx, x_shape);
paddle::platform::SetOutputNode(op, "X@GRAD", dx_reshape, ngb_node_map);
}
}
if (is_dy) {
if (dout->get_shape().size() > 2) {
auto dout_2d_shape = paddle::platform::FlattenTo2d(dout->get_shape(), 2);
dout = paddle::platform::NgReshaper(dout, dout_2d_shape);
}
auto dy = std::make_shared<ngraph::op::Dot>(x_transpose, dout);
if (dy->get_shape() == y_shape) {
paddle::platform::SetOutputNode(op, "Y@GRAD", dy, ngb_node_map);
} else {
auto dy_reshape = paddle::platform::NgReshaper(dy, y_shape);
paddle::platform::SetOutputNode(op, "Y@GRAD", dy_reshape, ngb_node_map);
}
}
}
} // namespace ngraphs
} // namespace operators
} // namespace paddle
REGISTER_NG_OP(mul, BuildMulNode);
REGISTER_NG_OP(mul_grad, BuildMulGradNode);
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <algorithm>
#include <map>
#include <string>
#include <unordered_map>
#include "ngraph/node.hpp"
#include "paddle/fluid/framework/operator.h"
#include "paddle/fluid/operators/ngraph/ngraph_bridge.h"
#include "paddle/fluid/platform/enforce.h"
namespace paddle {
namespace operators {
namespace ops {
class NgraphSingleton {
NgraphSingleton() = default;
NgraphSingleton(NgraphSingleton const&) = delete;
void operator=(NgraphSingleton const) = delete;
~NgraphSingleton() = default;
static std::map<
std::string,
std::function<void(const std::shared_ptr<framework::OperatorBase>&,
std::shared_ptr<std::unordered_map<
std::string, std::shared_ptr<ngraph::Node>>>)>>
ng_node_maps_;
public:
template <typename TF>
static void Register(TF&& tf, const std::string& name) {
ng_node_maps_[name] = tf;
}
static bool Lookup(const std::string& name) {
auto it = ng_node_maps_.find(name);
if (it == ng_node_maps_.end()) {
return true;
}
return false;
}
static void BuildNode(
const std::shared_ptr<std::unordered_map<
std::string, std::shared_ptr<ngraph::Node>>>& ng_maps,
const std::shared_ptr<framework::OperatorBase>& op,
const std::string& name) {
ng_node_maps_[name](op, ng_maps);
}
};
std::map<std::string,
std::function<void(const std::shared_ptr<framework::OperatorBase>&,
std::shared_ptr<std::unordered_map<
std::string, std::shared_ptr<ngraph::Node>>>)>>
NgraphSingleton::ng_node_maps_;
} // namespace ops
} // namespace operators
} // namespace paddle
#define REGISTER_NG_OP(op_type__, Converter__) \
struct ng_##op_type__##_converter { \
ng_##op_type__##_converter() { \
paddle::operators::ops::NgraphSingleton::Register( \
paddle::operators::ngraphs::Converter__, #op_type__); \
} \
}; \
ng_##op_type__##_converter ng_##op_type__##_converter__;
此差异已折叠。
/*Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <memory>
#include <string>
#include <unordered_map>
#include "ngraph/ngraph.hpp"
#include "paddle/fluid/operators/ngraph/ops/elementwise_scalar_op.h"
#include "paddle/fluid/operators/ngraph/ops/op_bridge.h"
#include "paddle/fluid/platform/ngraph_helper.h"
namespace paddle {
namespace operators {
namespace ngraphs {
void BuildScaleNode(
const std::shared_ptr<paddle::framework::OperatorBase>& op,
std::shared_ptr<
std::unordered_map<std::string, std::shared_ptr<ngraph::Node>>>
ngb_node_map) {
auto op_attrs = paddle::framework::AttrReader(op->Attrs());
float scale = op_attrs.Get<float>("scale");
auto x = paddle::platform::GetInputNode(op, "X", ngb_node_map);
auto out = ElementwiseScalar<ngraph::op::Multiply>(scale, x);
paddle::platform::SetOutputNode(op, "Out", out, ngb_node_map);
}
} // namespace ngraphs
} // namespace operators
} // namespace paddle
REGISTER_NG_OP(scale, BuildScaleNode);
此差异已折叠。
此差异已折叠。
此差异已折叠。
/*Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <memory>
#include <string>
#include <unordered_map>
#include "ngraph/ngraph.hpp"
#include "paddle/fluid/operators/ngraph/ops/op_bridge.h"
#include "paddle/fluid/platform/ngraph_helper.h"
namespace paddle {
namespace operators {
namespace ngraphs {
void BuildTopKNode(
const std::shared_ptr<paddle::framework::OperatorBase>& op,
std::shared_ptr<
std::unordered_map<std::string, std::shared_ptr<ngraph::Node>>>
ngb_node_map) {
auto op_attrs = paddle::framework::AttrReader(op->Attrs());
int k = op_attrs.Get<int>("k");
auto input = paddle::platform::GetInputNode(op, "X", ngb_node_map);
auto top_k = std::make_shared<ngraph::op::TopK>(
input, input->get_shape().size() - 1, ngraph::element::i64, k);
std::shared_ptr<ngraph::Node> indices =
std::make_shared<ngraph::op::GetOutputElement>(top_k, 0);
std::shared_ptr<ngraph::Node> out =
std::make_shared<ngraph::op::GetOutputElement>(top_k, 1);
paddle::platform::SetOutputNode(op, "Indices", indices, ngb_node_map);
paddle::platform::SetOutputNode(op, "Out", out, ngb_node_map);
}
} // namespace ngraphs
} // namespace operators
} // namespace paddle
REGISTER_NG_OP(top_k, BuildTopKNode);
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册