diff --git a/paddle/fluid/eager/.gitignore b/paddle/fluid/eager/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..f0a8b9203cf6a38ebe2d06975ed5640b3b5706c9 --- /dev/null +++ b/paddle/fluid/eager/.gitignore @@ -0,0 +1,2 @@ +generated/** +autocodegen/generated_example/ \ No newline at end of file diff --git a/paddle/fluid/eager/CMakeLists.txt b/paddle/fluid/eager/CMakeLists.txt index 9746305c6c66c5182400ab9d14cf01d9372cdd16..e8cb55b7afeb9abaeb12c4f3e49b37a977585d88 100644 --- a/paddle/fluid/eager/CMakeLists.txt +++ b/paddle/fluid/eager/CMakeLists.txt @@ -1,9 +1,22 @@ +set(eager_deps pten pten_api hook_utils tensor_utils utils global_utils backward pten_tensor legacy autograd_meta grad_node_info grad_tensor_holder gradient_accumulation accumulation_node) +set(fluid_deps tracer layer proto_desc operator op_registry variable_helper memcpy) +set(generated_deps dygraph_function dygraph_node) + +if(NOT DEFINED ON_INFER) + message("Performing Eager Dygraph Auto Code Generation") + add_subdirectory(auto_code_generator) +endif() + add_subdirectory(api) add_subdirectory(accumulation) -add_subdirectory(tests) +add_subdirectory(legacy) -cc_library(autograd_meta SRCS autograd_meta.cc DEPS pten pten_api) cc_library(grad_node_info SRCS grad_node_info.cc DEPS pten pten_api) cc_library(grad_tensor_holder SRCS grad_tensor_holder.cc DEPS grad_node_info gradient_accumulation) + +cc_library(autograd_meta SRCS autograd_meta.cc DEPS pten pten_api) cc_library(utils SRCS utils.cc DEPS pten pten_api global_utils layer proto_desc operator op_registry variable_helper memcpy scale_op autograd_meta) +cc_library(legacy SRCS ${DYGRAPH_LEGACY} DEPS global_utils proto_desc operator pten pten_api op_registry variable_helper memcpy) cc_library(backward SRCS backward.cc DEPS grad_tensor_holder utils autograd_meta grad_node_info) + +add_subdirectory(tests) diff --git a/paddle/fluid/eager/api/generated/.gitignore b/paddle/fluid/eager/api/generated/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..7b49528feab2b6beea436757e64f10ccd351df10 --- /dev/null +++ b/paddle/fluid/eager/api/generated/.gitignore @@ -0,0 +1 @@ +fluid_generated/** diff --git a/paddle/fluid/eager/api/generated/CMakeLists.txt b/paddle/fluid/eager/api/generated/CMakeLists.txt index 41fadef153bddca1e75e8a452d598389f0dd99c5..407a8d69e52daecbd04601cb8afe3a1e1b7a5dd5 100644 --- a/paddle/fluid/eager/api/generated/CMakeLists.txt +++ b/paddle/fluid/eager/api/generated/CMakeLists.txt @@ -1 +1,5 @@ add_subdirectory(eager_generated) + +if(NOT DEFINED ON_INFER) + add_subdirectory(fluid_generated) +endif() diff --git a/paddle/fluid/eager/auto_code_generator/CMakeLists.txt b/paddle/fluid/eager/auto_code_generator/CMakeLists.txt index 8609756effe4b76e4ce36cb6e2039338cef42112..5d31c9139baa81531ce040d1dc0dde56a059cda5 100644 --- a/paddle/fluid/eager/auto_code_generator/CMakeLists.txt +++ b/paddle/fluid/eager/auto_code_generator/CMakeLists.txt @@ -6,13 +6,24 @@ target_link_libraries(eager_generator ${EAGER_GENERETOR_DEPS}) get_property (os_dependency_modules GLOBAL PROPERTY OS_DEPENDENCY_MODULES) target_link_libraries(eager_generator ${os_dependency_modules}) +if(WITH_ROCM) + target_link_libraries(eager_generator ${ROCM_HIPRTC_LIB}) +endif() + # Prepare file structure message("Generate dygraph file structure at path: ${PADDLE_SOURCE_DIR}/paddle/fluid/eager/generated") execute_process( COMMAND "${PYTHON_EXECUTABLE}" "${PADDLE_SOURCE_DIR}/paddle/fluid/eager/auto_code_generator/generate_file_structures.py" "${PADDLE_SOURCE_DIR}/paddle/fluid/eager/" ) -add_custom_target(eager_codegen - COMMAND "${CMAKE_CURRENT_BINARY_DIR}/eager_generator" "${PADDLE_SOURCE_DIR}/paddle/fluid/eager/api/generated/fluid_generated" +if(WIN32) + add_custom_target(eager_codegen + COMMAND "${CMAKE_CURRENT_BINARY_DIR}/eager_generator.exe" "${PADDLE_SOURCE_DIR}/paddle/fluid/eager/api/generated/fluid_generated" DEPENDS eager_generator VERBATIM) +else() + add_custom_target(eager_codegen + COMMAND "${CMAKE_CURRENT_BINARY_DIR}/eager_generator" "${PADDLE_SOURCE_DIR}/paddle/fluid/eager/api/generated/fluid_generated" + DEPENDS eager_generator + VERBATIM) +endif() diff --git a/paddle/fluid/eager/auto_code_generator/eager_generator.cc b/paddle/fluid/eager/auto_code_generator/eager_generator.cc index 2cb8b4b9904b3406b25050761f3a1189c2399261..bc4775f2189162482977f8aac569afbc8aea288e 100644 --- a/paddle/fluid/eager/auto_code_generator/eager_generator.cc +++ b/paddle/fluid/eager/auto_code_generator/eager_generator.cc @@ -577,11 +577,6 @@ static std::string GenerateGradNodeCreationContent( // If single output slotname and not duplicable, // then generate: "egr::AutogradMeta* p_autograd_out = // egr::EagerUtils::autograd_meta("op_proto->outputs()[0].name()")" - - // TODO(zhanlve): in case of multiple slotname but none of which are - // duplicable, - // avoid constructing vector, generate seperate - // AutogradMeta* objects respectively. std::string get_autograd_meta_str = " // Prepare Autograd Meta \n"; for (const proto::OpProto::Var& input : op_proto.inputs()) { const std::string& input_name = input.name(); @@ -607,11 +602,6 @@ static std::string GenerateGradNodeCreationContent( // If single output slotname and not duplicable, // then generate: "egr::AutogradMeta* p_autograd_out = // egr::EagerUtils::autograd_meta("op_proto.outputs()[0].name()")" - - // TODO(zhanlve): in case of multiple slotname but none of which are - // duplicable, - // avoid constructing vector, generate seperate - // AutogradMeta* objects respectively. for (const proto::OpProto::Var& output : op_proto.outputs()) { const std::string& output_name = output.name(); const std::string& output_autograd_name = "p_autograd_" + output_name; @@ -725,9 +715,9 @@ static std::string GenerateGradNodeCreationContent( // [Generation] GradNode Creation const char* GRAD_NODE_CREATION_TEMPLATE = " %s" - " bool require_any_grad = egr::ComputeRequireGrad(%s);\n" + " bool require_any_grad = egr::EagerUtils::ComputeRequireGrad(%s);\n" " if(require_any_grad) {\n" - " egr::PassStopGradient(%s);\n" + " egr::EagerUtils::PassStopGradient(%s);\n" "%s\n }"; std::string grad_node_creation_body_str = paddle::string::Sprintf( GRAD_NODE_CREATION_TEMPLATE, prepare_autograd_meta_str, @@ -793,7 +783,7 @@ static std::pair GenerateForwardFunctionContents( Controller.Instance().GetExpectedPlace(), {}); // According to fwd_outputs_names - std::vector Out0 = GetOutputs(outs["Out0"]); + std::vector Out0 = GGetOutputetOutputs(outs["Out0"]); egr::EagerTensor Out1 = GetOutputs(outs["Out1"][0]); std::vector Out2 = GetOutputs(outs["Out2"]); @@ -830,7 +820,8 @@ static std::pair GenerateForwardFunctionContents( input_args_str_list[input_position] = paddle::string::Sprintf(FWD_INS_ARG_TEMPLATE, input_name); } - const char* FWD_INS_CONTENT_TEMPLATE = "{ \"%s\", egr::SyncToVars(%s) },"; + const char* FWD_INS_CONTENT_TEMPLATE = + "{ \"%s\", egr::EagerUtils::SyncToVars(%s) },"; ins_contents_str += paddle::string::Sprintf(FWD_INS_CONTENT_TEMPLATE, input_name, input_name); } @@ -925,14 +916,14 @@ static std::pair GenerateForwardFunctionContents( if (output.duplicable()) { const char* FWD_OUT_TENSORS_TEMPLATE = " std::vector %s = " - "egr::GetOutputs(outs[\"%s\"]);\n"; + "egr::EagerUtils::GetOutputs(outs[\"%s\"]);\n"; out_tensor_str = paddle::string::Sprintf(FWD_OUT_TENSORS_TEMPLATE, output_name, output_name); return_types[return_position] = "std::vector"; } else { const char* FWD_OUT_TENSOR_TEMPLATE = " egr::EagerTensor %s = " - "egr::GetOutput(outs[\"%s\"][0]);\n"; + "egr::EagerUtils::GetOutput(outs[\"%s\"][0]);\n"; out_tensor_str = paddle::string::Sprintf(FWD_OUT_TENSOR_TEMPLATE, output_name, output_name); return_types[return_position] = "egr::EagerTensor"; @@ -1093,7 +1084,8 @@ static std::string GenerateGradNodeCCContents( grad_ins_fwd_slotname_map.at(grad_input_name) + "_"; const char* GRAD_INS_FWD_CONTENT_TEMPLATE = "{ \"%s\", " - "egr::SyncToVars(egr::EagerUtils::RecoverTensorWrapper(&this->%s, " + "egr::EagerUtils::SyncToVars(egr::EagerUtils::RecoverTensorWrapper(&" + "this->%s, " "nullptr)) },"; ins_contents_str += paddle::string::Sprintf(GRAD_INS_FWD_CONTENT_TEMPLATE, @@ -1104,7 +1096,7 @@ static std::string GenerateGradNodeCCContents( size_t fwd_output_position = fwd_outputs_name_pos_map.at( grad_ins_grad_slotname_map.at(grad_input_name)); const char* GRAD_INS_GRAD_CONTENT_TEMPLATE = - "{ \"%s\", egr::SyncToVars(grads[%d]) },"; + "{ \"%s\", egr::EagerUtils::SyncToVars(grads[%d]) },"; ins_contents_str += paddle::string::Sprintf( GRAD_INS_GRAD_CONTENT_TEMPLATE, grad_input_name, fwd_output_position); @@ -1206,7 +1198,7 @@ static std::string GenerateGradNodeCCContents( fwd_inputs_name_pos_map.at(grad_outs_slotname_map.at(grad_out_name)); const char* BWD_OUTPUT_TEMPLATE = - " outputs[%d] = GetOutputs(outs[\"%s\"]);\n"; + " outputs[%d] = egr::EagerUtils::GetOutputs(outs[\"%s\"]);\n"; outputs_str += paddle::string::Sprintf(BWD_OUTPUT_TEMPLATE, fwd_input_position, grad_out_name); } @@ -1526,6 +1518,9 @@ static void DygraphCodeGeneration(const std::string& output_dir) { GenerateForwardHFile(output_dir, dygraph_forward_api_str); } +} // namespace framework +} // namespace paddle + int main(int argc, char* argv[]) { if (argc != 2) { std::cerr << "argc must be 2" << std::endl; @@ -1537,6 +1532,3 @@ int main(int argc, char* argv[]) { return 0; } - -} // namespace framework -} // namespace paddle diff --git a/paddle/fluid/eager/legacy/prepared_operator.cc b/paddle/fluid/eager/legacy/prepared_operator.cc index a6cc934521774a7ddacc04a890af8c30aecdc1ee..3db154b70a11efe19ac906ac6db460b66566d890 100644 --- a/paddle/fluid/eager/legacy/prepared_operator.cc +++ b/paddle/fluid/eager/legacy/prepared_operator.cc @@ -20,7 +20,7 @@ #include "paddle/fluid/framework/pten_utils.h" #include "paddle/utils/small_vector.h" #ifdef PADDLE_WITH_XPU -#include "paddle/fluid/platform/xpu/xpu_op_list.h" +#include "paddle/fluid/platform/device/xpu/xpu_op_list.h" #endif DECLARE_bool(check_nan_inf); DECLARE_bool(run_pten_kernel); diff --git a/paddle/fluid/eager/tests/CMakeLists.txt b/paddle/fluid/eager/tests/CMakeLists.txt index eb5c25b338722f7b94f94b6c200a63934f600794..289f24dfa63675c64a9e6ec07de400d4128eabe7 100644 --- a/paddle/fluid/eager/tests/CMakeLists.txt +++ b/paddle/fluid/eager/tests/CMakeLists.txt @@ -1,5 +1,2 @@ -set(eager_deps pten pten_api hook_utils tensor_utils utils global_utils backward pten_tensor autograd_meta grad_node_info grad_tensor_holder gradient_accumulation accumulation_node) -set(fluid_deps tracer layer proto_desc operator op_registry variable_helper memcpy) - add_subdirectory(data_structure_tests) add_subdirectory(task_tests) diff --git a/paddle/fluid/eager/tests/task_tests/CMakeLists.txt b/paddle/fluid/eager/tests/task_tests/CMakeLists.txt index 28cffca920425b8531c3e1d003879b009e00f87b..3921ce5b69cd7d55deae91d01f4ad9464a044f93 100644 --- a/paddle/fluid/eager/tests/task_tests/CMakeLists.txt +++ b/paddle/fluid/eager/tests/task_tests/CMakeLists.txt @@ -5,3 +5,7 @@ cc_test(test_egr_task_backward SRCS backward_test.cc DEPS ${eager_deps} ${fluid_ cc_test(test_egr_task_hook SRCS hook_test.cc DEPS ${eager_deps} ${fluid_deps} eager_scale scale_node) cc_test(test_egr_task_cross_batch SRCS cross_batch_accumulation_test.cc DEPS ${eager_deps} ${fluid_deps} eager_scale scale_node) cc_test(test_egr_task_fwd_bwd_joint SRCS fwd_bwd_joint_test.cc DEPS ${eager_deps} ${fluid_deps} eager_scale scale_node) + +if(NOT DEFINED ON_INFER) + cc_test(test_egr_task_autocodegen SRCS generated_test.cc DEPS ${eager_deps} ${fluid_deps} ${generated_deps}) +endif() diff --git a/paddle/fluid/eager/tests/task_tests/generated_test.cc b/paddle/fluid/eager/tests/task_tests/generated_test.cc new file mode 100644 index 0000000000000000000000000000000000000000..eb8d1e517eaf3b0c27262b581c749a87cccf8001 --- /dev/null +++ b/paddle/fluid/eager/tests/task_tests/generated_test.cc @@ -0,0 +1,95 @@ +// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Eager Dygraph + +#include + +#include "gtest/gtest.h" + +#include "paddle/fluid/eager/api/all.h" +#include "paddle/fluid/eager/api/utils/tensor_utils.h" +#include "paddle/fluid/eager/autograd_meta.h" +#include "paddle/fluid/eager/backward.h" +#include "paddle/fluid/eager/utils.h" + +#include "paddle/fluid/eager/tests/test_utils.h" +#include "paddle/fluid/imperative/tracer.h" + +#include "paddle/fluid/eager/api/generated/fluid_generated/dygraph_forward_api.h" +#include "paddle/pten/core/kernel_registry.h" + +// TODO(jiabin): remove nolint here!!! +using namespace egr; // NOLINT + +namespace eager_test { + +TEST(Generated, Sigmoid) { + // Prepare Device Contexts + InitEnv(paddle::platform::CPUPlace()); + VLOG(6) << "Init Env"; + // 1. Prepare Input + paddle::framework::DDim ddim = paddle::framework::make_ddim({2, 4, 4, 4}); + VLOG(6) << "Make Dim"; + egr::EagerTensor tensor = CreateTensorWithValue( + ddim, paddle::platform::CPUPlace(), pten::DataType::FLOAT32, + pten::DataLayout::NCHW, 0.0, true); + VLOG(6) << "Make EagerTensor"; + RetainGradForTensor(tensor); + VLOG(6) << "Retain Grad for Tensor"; + auto output_tensor = sigmoid_dygraph_function(tensor, {}); + VLOG(6) << "Run Backward"; + CompareVariableWithValue(output_tensor, 0.5); + + std::vector target_tensors = {output_tensor}; + VLOG(6) << "Runing Backward"; + RunBackward(target_tensors, {}); + + VLOG(6) << "Finish Backward"; + CompareGradVariableWithValue(tensor, 0.25); +} + +TEST(Generated, Matmul_v2) { + // Prepare Device Contexts + InitEnv(paddle::platform::CPUPlace()); + + auto tracer = std::make_shared(); + paddle::imperative::SetCurrentTracer(tracer); + + // 1. Prepare Input + paddle::framework::DDim ddimX = paddle::framework::make_ddim({4, 16}); + egr::EagerTensor X = CreateTensorWithValue( + ddimX, paddle::platform::CPUPlace(), pten::DataType::FLOAT32, + pten::DataLayout::NCHW, 3.0, true); + RetainGradForTensor(X); + + paddle::framework::DDim ddimY = paddle::framework::make_ddim({16, 20}); + egr::EagerTensor Y = CreateTensorWithValue( + ddimY, paddle::platform::CPUPlace(), pten::DataType::FLOAT32, + pten::DataLayout::NCHW, 2.0, true); + RetainGradForTensor(Y); + + auto output_tensor = matmul_v2_dygraph_function( + X, Y, {{"trans_x", false}, {"trans_y", false}}); + + CompareVariableWithValue(output_tensor, 96); + + std::vector target_tensors = {output_tensor}; + RunBackward(target_tensors, {}); + + CompareGradVariableWithValue(X, 2.0 * 20); + CompareGradVariableWithValue(Y, 3.0 * 4); +} + +} // namespace eager_test diff --git a/paddle/fluid/eager/utils.cc b/paddle/fluid/eager/utils.cc index 0b7e6e357dc14a6b56353b32a7ecbbc62659c886..28eefd62c5aa0acea8bfd09171fc8990989c3d6a 100644 --- a/paddle/fluid/eager/utils.cc +++ b/paddle/fluid/eager/utils.cc @@ -14,6 +14,7 @@ #include "paddle/fluid/eager/utils.h" #include "paddle/fluid/eager/api/utils/global_utils.h" +#include "paddle/fluid/eager/tensor_wrapper.h" #include "paddle/pten/api/all.h" #include "paddle/pten/common/layout.h" @@ -188,4 +189,19 @@ egr::EagerTensor EagerUtils::GetOutput( return EagerTensor((*(out.get()))); } +EagerTensor EagerUtils::RecoverTensorWrapper( + TensorWrapper* tw, const std::shared_ptr& grad_node) { + return tw->recover(grad_node); +} + +std::vector EagerUtils::RecoverTensorWrapper( + std::vector* tw, + const std::shared_ptr& grad_node) { + std::vector ret; + for (auto& t : *tw) { + ret.emplace_back(t.recover(grad_node)); + } + return ret; +} + } // namespace egr diff --git a/paddle/fluid/eager/utils.h b/paddle/fluid/eager/utils.h index 19692df78dd40fecc3734a37b07d24cee85e2f81..f7e226a2aba36c0540ceb2f06d3890d0dfbe6097 100644 --- a/paddle/fluid/eager/utils.h +++ b/paddle/fluid/eager/utils.h @@ -22,6 +22,8 @@ namespace egr { +class TensorWrapper; + /** * EagerUtils is utils used to do some static conversion or autograd * members access, this class is desinged to be a full static functional @@ -131,6 +133,13 @@ class EagerUtils { iter.apply(std::forward(args)...); } + // TensorWrapper Utils + static egr::EagerTensor RecoverTensorWrapper( + egr::TensorWrapper* tw, const std::shared_ptr& grad_node); + static std::vector RecoverTensorWrapper( + std::vector* tw, + const std::shared_ptr& grad_node); + // Intermidate needed remove this once we don't need legacy static std::vector> SyncToVars( const egr::EagerTensor& tensor); diff --git a/paddle/fluid/framework/details/nan_inf_utils.h b/paddle/fluid/framework/details/nan_inf_utils.h index e4fd24f201d7f389ffa11da1c00f466982fe982d..d458e88a5619ac351cdf2bd64683a559af9c3186 100644 --- a/paddle/fluid/framework/details/nan_inf_utils.h +++ b/paddle/fluid/framework/details/nan_inf_utils.h @@ -17,6 +17,7 @@ #include #include +#include "paddle/fluid/eager/legacy/type_def.h" #include "paddle/fluid/framework/operator.h" #include "paddle/fluid/framework/scope.h" #include "paddle/fluid/imperative/type_defs.h" @@ -53,6 +54,19 @@ void CheckOpHasNanOrInfInDygraph(const std::string& op_type, } } +template +static void CheckOpHasNanOrInfInEager(const std::string& op_type, + const egr::NameMap& op_outs, + platform::Place place) { + for (const auto& pair : op_outs) { + for (const auto& tensor : pair.second) { + auto* var = tensor->MutableVar(); + if (var == nullptr) continue; + CheckVarHasNanOrInf(op_type, tensor->name(), var, place); + } + } +} + #ifdef PADDLE_WITH_ASCEND_CL void NPUAllocAndClearFloatStatus(const framework::OperatorBase& op, const framework::ScopeBase& scope,