未验证 提交 74fdba7c 编写于 作者: Z Zhanlue Yang 提交者: GitHub

Refactored eager legacy namespace (#37659)

上级 46c71f2c
...@@ -779,7 +779,7 @@ static std::pair<std::string, std::string> GenerateForwardFunctionContents( ...@@ -779,7 +779,7 @@ static std::pair<std::string, std::string> GenerateForwardFunctionContents(
,ConstructDuplicableOutput(Out1Num)} }; ,ConstructDuplicableOutput(Out1Num)} };
// According to op_proto->attrs() // According to op_proto->attrs()
egr::RunOp("op_type", ins, outs, attr_map, egr::legacy::RunOp("op_type", ins, outs, attr_map,
Controller.Instance().GetExpectedPlace(), {}); Controller.Instance().GetExpectedPlace(), {});
// According to fwd_outputs_names // According to fwd_outputs_names
...@@ -894,7 +894,7 @@ static std::pair<std::string, std::string> GenerateForwardFunctionContents( ...@@ -894,7 +894,7 @@ static std::pair<std::string, std::string> GenerateForwardFunctionContents(
const char* FWD_TRACE_OP_TEMPLATE = const char* FWD_TRACE_OP_TEMPLATE =
" paddle::framework::AttributeMap attrs = attr_map;\n" " paddle::framework::AttributeMap attrs = attr_map;\n"
" paddle::framework::AttributeMap default_attrs;\n" " paddle::framework::AttributeMap default_attrs;\n"
" egr::RunOp(\"%s\", ins, outs, attrs, \n" " egr::legacy::RunOp(\"%s\", ins, outs, attrs, \n"
" egr::Controller::Instance().GetExpectedPlace(),\n" " egr::Controller::Instance().GetExpectedPlace(),\n"
" &default_attrs, true, {});\n"; " &default_attrs, true, {});\n";
std::string trace_op_str = std::string trace_op_str =
...@@ -1052,7 +1052,7 @@ static std::string GenerateGradNodeCCContents( ...@@ -1052,7 +1052,7 @@ static std::string GenerateGradNodeCCContents(
// Visit each OpBase // Visit each OpBase
for(auto iter = "grad_node->begin()"; iter < "grad_node->end()"; iter++) { for(auto iter = "grad_node->begin()"; iter < "grad_node->end()"; iter++) {
// Simply pass entire attribute map to kernels // Simply pass entire attribute map to kernels
egr::RunOp("iter->Type()", ins, outs, this->attr_map_, egr::legacy::RunOp("iter->Type()", ins, outs, this->attr_map_,
egr::Controller::Instance().ExpectedPlace(), false, {}); egr::Controller::Instance().ExpectedPlace(), false, {});
} }
...@@ -1180,7 +1180,7 @@ static std::string GenerateGradNodeCCContents( ...@@ -1180,7 +1180,7 @@ static std::string GenerateGradNodeCCContents(
" // Pass the entire attribute map to TraceOp\n" " // Pass the entire attribute map to TraceOp\n"
" // The underlying kernel will pickup whatever attribute they need " " // The underlying kernel will pickup whatever attribute they need "
"at runtime\n" "at runtime\n"
" egr::RunOp(\"%s\", ins, outs, this->attr_map_,\n" " egr::legacy::RunOp(\"%s\", ins, outs, this->attr_map_,\n"
" egr::Controller::Instance().GetExpectedPlace(),\n" " egr::Controller::Instance().GetExpectedPlace(),\n"
" &this->default_attr_map_, false, {});\n"; " &this->default_attr_map_, false, {});\n";
trace_opbase_str = paddle::string::Sprintf(TRACE_OP_TEMPLATE, op_base_type); trace_opbase_str = paddle::string::Sprintf(TRACE_OP_TEMPLATE, op_base_type);
......
...@@ -20,6 +20,7 @@ ...@@ -20,6 +20,7 @@
#include "paddle/fluid/framework/operator.h" #include "paddle/fluid/framework/operator.h"
namespace egr { namespace egr {
namespace legacy {
AmpOperators::AmpOperators() AmpOperators::AmpOperators()
: allow_ops_(new std::unordered_set<std::string>()), : allow_ops_(new std::unordered_set<std::string>()),
...@@ -85,12 +86,12 @@ std::ostream& operator<<(std::ostream& os, AmpOperators& ops) { ...@@ -85,12 +86,12 @@ std::ostream& operator<<(std::ostream& os, AmpOperators& ops) {
inline std::string GetDtypeStr( inline std::string GetDtypeStr(
const std::shared_ptr<egr::EagerTensor>& tensor) { const std::shared_ptr<egr::EagerTensor>& tensor) {
return paddle::framework::DataTypeToString( return paddle::framework::DataTypeToString(
egr::GetDtypeFromVar(tensor->Var())); egr::legacy::GetDtypeFromVar(tensor->Var()));
} }
inline bool NeedCast(const std::shared_ptr<egr::EagerTensor>& tensor) { inline bool NeedCast(const std::shared_ptr<egr::EagerTensor>& tensor) {
auto place = egr::GetPlaceFromVar(tensor->Var()); auto place = egr::legacy::GetPlaceFromVar(tensor->Var());
auto data_type = egr::GetDtypeFromVar(tensor->Var()); auto data_type = egr::legacy::GetDtypeFromVar(tensor->Var());
if (paddle::platform::is_gpu_place(place) || if (paddle::platform::is_gpu_place(place) ||
paddle::platform::is_cuda_pinned_place(place) || paddle::platform::is_cuda_pinned_place(place) ||
paddle::platform::is_xpu_place(place)) { paddle::platform::is_xpu_place(place)) {
...@@ -109,7 +110,7 @@ static inline std::shared_ptr<egr::EagerTensor> CastToType( ...@@ -109,7 +110,7 @@ static inline std::shared_ptr<egr::EagerTensor> CastToType(
const std::shared_ptr<egr::EagerTensor>& tensor, const std::shared_ptr<egr::EagerTensor>& tensor,
const paddle::framework::proto::VarType::Type dst_type) { const paddle::framework::proto::VarType::Type dst_type) {
NameTensorMap ins = {{"X", {tensor}}}; NameTensorMap ins = {{"X", {tensor}}};
auto in_data_type = egr::GetDtypeFromVar(tensor->Var()); auto in_data_type = egr::legacy::GetDtypeFromVar(tensor->Var());
paddle::framework::AttributeMap attrs = {{"in_dtype", in_data_type}, paddle::framework::AttributeMap attrs = {{"in_dtype", in_data_type},
{"out_dtype", dst_type}}; {"out_dtype", dst_type}};
auto out = std::shared_ptr<egr::EagerTensor>(new egr::EagerTensor()); auto out = std::shared_ptr<egr::EagerTensor>(new egr::EagerTensor());
...@@ -127,7 +128,8 @@ static inline std::shared_ptr<egr::EagerTensor> CastToType( ...@@ -127,7 +128,8 @@ static inline std::shared_ptr<egr::EagerTensor> CastToType(
static inline std::shared_ptr<egr::EagerTensor> CastToFP16( static inline std::shared_ptr<egr::EagerTensor> CastToFP16(
const std::shared_ptr<egr::EagerTensor>& tensor) { const std::shared_ptr<egr::EagerTensor>& tensor) {
auto dst_type = paddle::framework::proto::VarType::FP16; auto dst_type = paddle::framework::proto::VarType::FP16;
if (NeedCast(tensor) && (egr::GetDtypeFromVar(tensor->Var()) != dst_type)) { if (NeedCast(tensor) &&
(egr::legacy::GetDtypeFromVar(tensor->Var()) != dst_type)) {
return CastToType(tensor, dst_type); return CastToType(tensor, dst_type);
} }
return tensor; return tensor;
...@@ -136,7 +138,8 @@ static inline std::shared_ptr<egr::EagerTensor> CastToFP16( ...@@ -136,7 +138,8 @@ static inline std::shared_ptr<egr::EagerTensor> CastToFP16(
static inline std::shared_ptr<egr::EagerTensor> CastToFP32( static inline std::shared_ptr<egr::EagerTensor> CastToFP32(
const std::shared_ptr<egr::EagerTensor>& tensor) { const std::shared_ptr<egr::EagerTensor>& tensor) {
auto dst_type = paddle::framework::proto::VarType::FP32; auto dst_type = paddle::framework::proto::VarType::FP32;
if (NeedCast(tensor) && (egr::GetDtypeFromVar(tensor->Var()) != dst_type)) { if (NeedCast(tensor) &&
(egr::legacy::GetDtypeFromVar(tensor->Var()) != dst_type)) {
return CastToType(tensor, dst_type); return CastToType(tensor, dst_type);
} }
return tensor; return tensor;
...@@ -147,9 +150,9 @@ static inline paddle::framework::proto::VarType::Type GetPromoteType( ...@@ -147,9 +150,9 @@ static inline paddle::framework::proto::VarType::Type GetPromoteType(
auto dst_type = paddle::framework::proto::VarType::FP16; auto dst_type = paddle::framework::proto::VarType::FP16;
for (const auto& pair : ins) { for (const auto& pair : ins) {
for (const auto& tensor : pair.second) { for (const auto& tensor : pair.second) {
if (egr::GetDtypeFromVar(tensor->Var()) == if (egr::legacy::GetDtypeFromVar(tensor->Var()) ==
paddle::framework::proto::VarType::FP32) { paddle::framework::proto::VarType::FP32) {
dst_type = egr::GetDtypeFromVar(tensor->Var()); dst_type = egr::legacy::GetDtypeFromVar(tensor->Var());
break; break;
} }
} }
...@@ -160,7 +163,7 @@ static inline paddle::framework::proto::VarType::Type GetPromoteType( ...@@ -160,7 +163,7 @@ static inline paddle::framework::proto::VarType::Type GetPromoteType(
if (op_type == "moving_average_abs_max_scale") { if (op_type == "moving_average_abs_max_scale") {
for (const auto& pair : ins) { for (const auto& pair : ins) {
if (pair.first == "X" && if (pair.first == "X" &&
egr::GetDtypeFromVar(pair.second.front()->Var()) == egr::legacy::GetDtypeFromVar(pair.second.front()->Var()) ==
paddle::framework::proto::VarType::FP16) { paddle::framework::proto::VarType::FP16) {
dst_type = paddle::framework::proto::VarType::FP16; dst_type = paddle::framework::proto::VarType::FP16;
} }
...@@ -255,4 +258,5 @@ NameTensorMap CastPureFp16Inputs(const std::string& op_type, ...@@ -255,4 +258,5 @@ NameTensorMap CastPureFp16Inputs(const std::string& op_type,
return new_ins; return new_ins;
} }
} // namespace legacy
} // namespace egr } // namespace egr
...@@ -24,6 +24,7 @@ ...@@ -24,6 +24,7 @@
#include "paddle/fluid/eager/legacy/type_def.h" #include "paddle/fluid/eager/legacy/type_def.h"
namespace egr { namespace egr {
namespace legacy {
// NOTE(zhiqiu): only O1 and O2 are valid now // NOTE(zhiqiu): only O1 and O2 are valid now
enum class AmpLevel { enum class AmpLevel {
...@@ -92,4 +93,5 @@ NameTensorMap AutoCastInputs(const std::string& op_type, ...@@ -92,4 +93,5 @@ NameTensorMap AutoCastInputs(const std::string& op_type,
NameTensorMap CastPureFp16Inputs(const std::string& op_type, NameTensorMap CastPureFp16Inputs(const std::string& op_type,
const NameTensorMap& ins); const NameTensorMap& ins);
} // namespace legacy
} // namespace egr } // namespace egr
...@@ -22,6 +22,7 @@ ...@@ -22,6 +22,7 @@
#include "paddle/fluid/framework/type_defs.h" #include "paddle/fluid/framework/type_defs.h"
#include "paddle/fluid/framework/variable.h" #include "paddle/fluid/framework/variable.h"
namespace egr { namespace egr {
namespace legacy {
class EagerExecutionContext : public paddle::framework::ExecutionContext { class EagerExecutionContext : public paddle::framework::ExecutionContext {
using Variable = paddle::framework::Variable; using Variable = paddle::framework::Variable;
...@@ -209,4 +210,5 @@ class EagerExecutionContext : public paddle::framework::ExecutionContext { ...@@ -209,4 +210,5 @@ class EagerExecutionContext : public paddle::framework::ExecutionContext {
const paddle::framework::AttributeMap& default_attrs_; const paddle::framework::AttributeMap& default_attrs_;
}; };
} // namespace legacy
} // namespace egr } // namespace egr
...@@ -25,6 +25,7 @@ ...@@ -25,6 +25,7 @@
#include "paddle/fluid/framework/type_defs.h" #include "paddle/fluid/framework/type_defs.h"
#include "paddle/fluid/framework/var_type.h" #include "paddle/fluid/framework/var_type.h"
namespace egr { namespace egr {
namespace legacy {
class EagerInferShapeContext : public paddle::framework::InferShapeContext { class EagerInferShapeContext : public paddle::framework::InferShapeContext {
using DDim = paddle::framework::DDim; using DDim = paddle::framework::DDim;
...@@ -401,4 +402,5 @@ class EagerInferShapeContext : public paddle::framework::InferShapeContext { ...@@ -401,4 +402,5 @@ class EagerInferShapeContext : public paddle::framework::InferShapeContext {
const std::string op_type_; const std::string op_type_;
}; };
} // namespace legacy
} // namespace egr } // namespace egr
...@@ -29,6 +29,7 @@ ...@@ -29,6 +29,7 @@
#include "paddle/pten/include/core.h" #include "paddle/pten/include/core.h"
namespace egr { namespace egr {
namespace legacy {
// infer var type context for imperative mode // infer var type context for imperative mode
class TensorRuntimeInferVarTypeContext class TensorRuntimeInferVarTypeContext
...@@ -255,4 +256,5 @@ class TensorRuntimeInferVarTypeContext ...@@ -255,4 +256,5 @@ class TensorRuntimeInferVarTypeContext
const paddle::framework::AttributeMap& default_attrs_; const paddle::framework::AttributeMap& default_attrs_;
}; };
} // namespace legacy
} // namespace egr } // namespace egr
...@@ -30,6 +30,7 @@ DECLARE_string(tracer_mkldnn_ops_on); ...@@ -30,6 +30,7 @@ DECLARE_string(tracer_mkldnn_ops_on);
DECLARE_string(tracer_mkldnn_ops_off); DECLARE_string(tracer_mkldnn_ops_off);
namespace egr { namespace egr {
namespace legacy {
void OpRunImpl(const paddle::framework::OperatorBase& op, void OpRunImpl(const paddle::framework::OperatorBase& op,
const NameTensorMap& ins, const NameTensorMap& outs, const NameTensorMap& ins, const NameTensorMap& outs,
...@@ -43,8 +44,8 @@ void OpRunImpl(const paddle::framework::OperatorBase& op, ...@@ -43,8 +44,8 @@ void OpRunImpl(const paddle::framework::OperatorBase& op,
"Only support operator with kernel in Dygraph mode.")); "Only support operator with kernel in Dygraph mode."));
auto& info = op.Info(); auto& info = op.Info();
if (info.infer_var_type_) { if (info.infer_var_type_) {
egr::TensorRuntimeInferVarTypeContext infer_var_type_ctx(ins, outs, attrs, egr::legacy::TensorRuntimeInferVarTypeContext infer_var_type_ctx(
default_attrs); ins, outs, attrs, default_attrs);
info.infer_var_type_(&infer_var_type_ctx); info.infer_var_type_(&infer_var_type_ctx);
} }
...@@ -76,10 +77,10 @@ void OpRunImpl(const paddle::framework::OperatorBase& op, ...@@ -76,10 +77,10 @@ void OpRunImpl(const paddle::framework::OperatorBase& op,
* after the execution of op, but the original input is directly * after the execution of op, but the original input is directly
* overwritten in the previous dynamic graph implemention. * overwritten in the previous dynamic graph implemention.
*/ */
auto prepared_op = egr::PreparedOp::Prepare(ins, outs, *op_kernel, place, auto prepared_op = egr::legacy::PreparedOp::Prepare(
attrs, default_attrs); ins, outs, *op_kernel, place, attrs, default_attrs);
auto tmp_ins_ptr = auto tmp_ins_ptr =
egr::PrepareData(*op_kernel, ins, prepared_op.kernel_type()); egr::legacy::PrepareData(*op_kernel, ins, prepared_op.kernel_type());
if (tmp_ins_ptr == nullptr) { if (tmp_ins_ptr == nullptr) {
prepared_op.Run(ins, outs, attrs, default_attrs); prepared_op.Run(ins, outs, attrs, default_attrs);
} else { } else {
...@@ -188,4 +189,6 @@ void RunOp(const std::string& type, const NameTensorMap& ins, ...@@ -188,4 +189,6 @@ void RunOp(const std::string& type, const NameTensorMap& ins,
// program_desc_tracer_->InsertOp(type, new_ins, outs, attrs); // program_desc_tracer_->InsertOp(type, new_ins, outs, attrs);
// } // }
} }
} // namespace legacy
} // namespace egr } // namespace egr
...@@ -14,11 +14,11 @@ ...@@ -14,11 +14,11 @@
#pragma once #pragma once
#include "paddle/fluid/eager/legacy/type_def.h" #include "paddle/fluid/eager/legacy/type_def.h"
// TODO(Jiabin): We should not depends on this header remove it later
#include "paddle/fluid/imperative/jit/program_desc_tracer.h" #include "paddle/fluid/imperative/jit/program_desc_tracer.h"
#include "paddle/pten/core/tensor_meta.h" #include "paddle/pten/core/tensor_meta.h"
namespace egr { namespace egr {
namespace legacy {
void RunOp(const std::string& type, const NameTensorMap& ins, void RunOp(const std::string& type, const NameTensorMap& ins,
const NameTensorMap& outs, paddle::framework::AttributeMap attrs, const NameTensorMap& outs, paddle::framework::AttributeMap attrs,
...@@ -26,4 +26,6 @@ void RunOp(const std::string& type, const NameTensorMap& ins, ...@@ -26,4 +26,6 @@ void RunOp(const std::string& type, const NameTensorMap& ins,
paddle::framework::AttributeMap* default_attrs, paddle::framework::AttributeMap* default_attrs,
bool override_default_attr_map, bool override_default_attr_map,
const std::map<std::string, std::string>& inplace_map = {}); const std::map<std::string, std::string>& inplace_map = {});
}
} // namespace legacy
} // namespace egr
...@@ -26,6 +26,7 @@ DECLARE_bool(check_nan_inf); ...@@ -26,6 +26,7 @@ DECLARE_bool(check_nan_inf);
DECLARE_bool(run_pten_kernel); DECLARE_bool(run_pten_kernel);
namespace egr { namespace egr {
namespace legacy {
const paddle::framework::Tensor* GetTensorFromVar( const paddle::framework::Tensor* GetTensorFromVar(
const paddle::framework::Variable& var) { const paddle::framework::Variable& var) {
...@@ -96,9 +97,9 @@ PreparedOp PrepareImpl(const NameTensorMap& ins, const NameTensorMap& outs, ...@@ -96,9 +97,9 @@ PreparedOp PrepareImpl(const NameTensorMap& ins, const NameTensorMap& outs,
#endif #endif
// 1. get expected kernel key // 1. get expected kernel key
auto dygraph_exe_ctx = auto dygraph_exe_ctx = egr::legacy::EagerExecutionContext(
egr::EagerExecutionContext(op, paddle::framework::Scope(), *dev_ctx, ctx, op, paddle::framework::Scope(), *dev_ctx, ctx, ins, outs, attrs,
ins, outs, attrs, default_attrs); default_attrs);
auto expected_kernel_key = op.GetExpectedKernelType(dygraph_exe_ctx); auto expected_kernel_key = op.GetExpectedKernelType(dygraph_exe_ctx);
VLOG(3) << "expected_kernel_key:" << expected_kernel_key; VLOG(3) << "expected_kernel_key:" << expected_kernel_key;
...@@ -251,4 +252,6 @@ std::shared_ptr<NameTensorMap> PrepareData( ...@@ -251,4 +252,6 @@ std::shared_ptr<NameTensorMap> PrepareData(
} }
return tmp_ins_ptr; return tmp_ins_ptr;
} }
} // namespace legacy
} // namespace egr } // namespace egr
...@@ -40,6 +40,7 @@ class DeviceContext; ...@@ -40,6 +40,7 @@ class DeviceContext;
} // namespace paddle } // namespace paddle
namespace egr { namespace egr {
namespace legacy {
const paddle::framework::Tensor* GetTensorFromVar( const paddle::framework::Tensor* GetTensorFromVar(
const paddle::framework::Variable& var); const paddle::framework::Variable& var);
...@@ -79,4 +80,5 @@ class PreparedOp { ...@@ -79,4 +80,5 @@ class PreparedOp {
paddle::platform::DeviceContext* dev_ctx_; paddle::platform::DeviceContext* dev_ctx_;
}; };
} // namespace legacy
} // namespace egr } // namespace egr
...@@ -25,6 +25,7 @@ ...@@ -25,6 +25,7 @@
#include "paddle/fluid/platform/place.h" #include "paddle/fluid/platform/place.h"
namespace egr { namespace egr {
namespace legacy {
void InitializeVariable(paddle::framework::Variable *var, void InitializeVariable(paddle::framework::Variable *var,
paddle::framework::proto::VarType::Type var_type) { paddle::framework::proto::VarType::Type var_type) {
...@@ -108,4 +109,6 @@ const paddle::platform::Place &GetPlaceFromVar( ...@@ -108,4 +109,6 @@ const paddle::platform::Place &GetPlaceFromVar(
paddle::framework::ToTypeName(var.Type()))); paddle::framework::ToTypeName(var.Type())));
} }
} }
} // namespace legacy
} // namespace egr } // namespace egr
...@@ -19,6 +19,8 @@ ...@@ -19,6 +19,8 @@
#include "paddle/pten/api/all.h" #include "paddle/pten/api/all.h"
#include "paddle/pten/include/core.h" #include "paddle/pten/include/core.h"
namespace egr { namespace egr {
namespace legacy {
void InitializeVariable(paddle::framework::Variable* var, void InitializeVariable(paddle::framework::Variable* var,
paddle::framework::proto::VarType::Type var_type); paddle::framework::proto::VarType::Type var_type);
paddle::framework::proto::VarType::Type GetDtypeFromVar( paddle::framework::proto::VarType::Type GetDtypeFromVar(
...@@ -27,4 +29,6 @@ const paddle::platform::Place& GetPlaceFromVar( ...@@ -27,4 +29,6 @@ const paddle::platform::Place& GetPlaceFromVar(
const paddle::framework::Variable& var); const paddle::framework::Variable& var);
void CopyVariable(const paddle::framework::Variable& src_var, void CopyVariable(const paddle::framework::Variable& src_var,
paddle::framework::Variable* dst_var); paddle::framework::Variable* dst_var);
}
} // namespace legacy
} // namespace egr
...@@ -22,6 +22,9 @@ ...@@ -22,6 +22,9 @@
namespace egr { namespace egr {
class EagerTensor; class EagerTensor;
namespace legacy {
namespace details { namespace details {
template <typename T> template <typename T>
struct NameVarMapTrait {}; struct NameVarMapTrait {};
...@@ -36,4 +39,6 @@ template <typename T> ...@@ -36,4 +39,6 @@ template <typename T>
using NameMap = typename details::NameVarMapTrait<T>::Type; using NameMap = typename details::NameVarMapTrait<T>::Type;
using NameTensorMap = NameMap<EagerTensor>; using NameTensorMap = NameMap<EagerTensor>;
} // namespace legacy
} // namespace egr } // namespace egr
...@@ -55,9 +55,9 @@ void CheckOpHasNanOrInfInDygraph(const std::string& op_type, ...@@ -55,9 +55,9 @@ void CheckOpHasNanOrInfInDygraph(const std::string& op_type,
} }
template <typename TensorType> template <typename TensorType>
static void CheckOpHasNanOrInfInEager(const std::string& op_type, static void CheckOpHasNanOrInfInEager(
const egr::NameMap<TensorType>& op_outs, const std::string& op_type, const egr::legacy::NameMap<TensorType>& op_outs,
platform::Place place) { platform::Place place) {
for (const auto& pair : op_outs) { for (const auto& pair : op_outs) {
for (const auto& tensor : pair.second) { for (const auto& tensor : pair.second) {
auto* var = tensor->MutableVar(); auto* var = tensor->MutableVar();
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册