未验证 提交 74fdba7c 编写于 作者: Z Zhanlue Yang 提交者: GitHub

Refactored eager legacy namespace (#37659)

上级 46c71f2c
......@@ -779,7 +779,7 @@ static std::pair<std::string, std::string> GenerateForwardFunctionContents(
,ConstructDuplicableOutput(Out1Num)} };
// According to op_proto->attrs()
egr::RunOp("op_type", ins, outs, attr_map,
egr::legacy::RunOp("op_type", ins, outs, attr_map,
Controller.Instance().GetExpectedPlace(), {});
// According to fwd_outputs_names
......@@ -894,7 +894,7 @@ static std::pair<std::string, std::string> GenerateForwardFunctionContents(
const char* FWD_TRACE_OP_TEMPLATE =
" paddle::framework::AttributeMap attrs = attr_map;\n"
" paddle::framework::AttributeMap default_attrs;\n"
" egr::RunOp(\"%s\", ins, outs, attrs, \n"
" egr::legacy::RunOp(\"%s\", ins, outs, attrs, \n"
" egr::Controller::Instance().GetExpectedPlace(),\n"
" &default_attrs, true, {});\n";
std::string trace_op_str =
......@@ -1052,7 +1052,7 @@ static std::string GenerateGradNodeCCContents(
// Visit each OpBase
for(auto iter = "grad_node->begin()"; iter < "grad_node->end()"; iter++) {
// Simply pass entire attribute map to kernels
egr::RunOp("iter->Type()", ins, outs, this->attr_map_,
egr::legacy::RunOp("iter->Type()", ins, outs, this->attr_map_,
egr::Controller::Instance().ExpectedPlace(), false, {});
}
......@@ -1180,7 +1180,7 @@ static std::string GenerateGradNodeCCContents(
" // Pass the entire attribute map to TraceOp\n"
" // The underlying kernel will pickup whatever attribute they need "
"at runtime\n"
" egr::RunOp(\"%s\", ins, outs, this->attr_map_,\n"
" egr::legacy::RunOp(\"%s\", ins, outs, this->attr_map_,\n"
" egr::Controller::Instance().GetExpectedPlace(),\n"
" &this->default_attr_map_, false, {});\n";
trace_opbase_str = paddle::string::Sprintf(TRACE_OP_TEMPLATE, op_base_type);
......
......@@ -20,6 +20,7 @@
#include "paddle/fluid/framework/operator.h"
namespace egr {
namespace legacy {
AmpOperators::AmpOperators()
: allow_ops_(new std::unordered_set<std::string>()),
......@@ -85,12 +86,12 @@ std::ostream& operator<<(std::ostream& os, AmpOperators& ops) {
inline std::string GetDtypeStr(
const std::shared_ptr<egr::EagerTensor>& tensor) {
return paddle::framework::DataTypeToString(
egr::GetDtypeFromVar(tensor->Var()));
egr::legacy::GetDtypeFromVar(tensor->Var()));
}
inline bool NeedCast(const std::shared_ptr<egr::EagerTensor>& tensor) {
auto place = egr::GetPlaceFromVar(tensor->Var());
auto data_type = egr::GetDtypeFromVar(tensor->Var());
auto place = egr::legacy::GetPlaceFromVar(tensor->Var());
auto data_type = egr::legacy::GetDtypeFromVar(tensor->Var());
if (paddle::platform::is_gpu_place(place) ||
paddle::platform::is_cuda_pinned_place(place) ||
paddle::platform::is_xpu_place(place)) {
......@@ -109,7 +110,7 @@ static inline std::shared_ptr<egr::EagerTensor> CastToType(
const std::shared_ptr<egr::EagerTensor>& tensor,
const paddle::framework::proto::VarType::Type dst_type) {
NameTensorMap ins = {{"X", {tensor}}};
auto in_data_type = egr::GetDtypeFromVar(tensor->Var());
auto in_data_type = egr::legacy::GetDtypeFromVar(tensor->Var());
paddle::framework::AttributeMap attrs = {{"in_dtype", in_data_type},
{"out_dtype", dst_type}};
auto out = std::shared_ptr<egr::EagerTensor>(new egr::EagerTensor());
......@@ -127,7 +128,8 @@ static inline std::shared_ptr<egr::EagerTensor> CastToType(
static inline std::shared_ptr<egr::EagerTensor> CastToFP16(
const std::shared_ptr<egr::EagerTensor>& tensor) {
auto dst_type = paddle::framework::proto::VarType::FP16;
if (NeedCast(tensor) && (egr::GetDtypeFromVar(tensor->Var()) != dst_type)) {
if (NeedCast(tensor) &&
(egr::legacy::GetDtypeFromVar(tensor->Var()) != dst_type)) {
return CastToType(tensor, dst_type);
}
return tensor;
......@@ -136,7 +138,8 @@ static inline std::shared_ptr<egr::EagerTensor> CastToFP16(
static inline std::shared_ptr<egr::EagerTensor> CastToFP32(
const std::shared_ptr<egr::EagerTensor>& tensor) {
auto dst_type = paddle::framework::proto::VarType::FP32;
if (NeedCast(tensor) && (egr::GetDtypeFromVar(tensor->Var()) != dst_type)) {
if (NeedCast(tensor) &&
(egr::legacy::GetDtypeFromVar(tensor->Var()) != dst_type)) {
return CastToType(tensor, dst_type);
}
return tensor;
......@@ -147,9 +150,9 @@ static inline paddle::framework::proto::VarType::Type GetPromoteType(
auto dst_type = paddle::framework::proto::VarType::FP16;
for (const auto& pair : ins) {
for (const auto& tensor : pair.second) {
if (egr::GetDtypeFromVar(tensor->Var()) ==
if (egr::legacy::GetDtypeFromVar(tensor->Var()) ==
paddle::framework::proto::VarType::FP32) {
dst_type = egr::GetDtypeFromVar(tensor->Var());
dst_type = egr::legacy::GetDtypeFromVar(tensor->Var());
break;
}
}
......@@ -160,7 +163,7 @@ static inline paddle::framework::proto::VarType::Type GetPromoteType(
if (op_type == "moving_average_abs_max_scale") {
for (const auto& pair : ins) {
if (pair.first == "X" &&
egr::GetDtypeFromVar(pair.second.front()->Var()) ==
egr::legacy::GetDtypeFromVar(pair.second.front()->Var()) ==
paddle::framework::proto::VarType::FP16) {
dst_type = paddle::framework::proto::VarType::FP16;
}
......@@ -255,4 +258,5 @@ NameTensorMap CastPureFp16Inputs(const std::string& op_type,
return new_ins;
}
} // namespace legacy
} // namespace egr
......@@ -24,6 +24,7 @@
#include "paddle/fluid/eager/legacy/type_def.h"
namespace egr {
namespace legacy {
// NOTE(zhiqiu): only O1 and O2 are valid now
enum class AmpLevel {
......@@ -92,4 +93,5 @@ NameTensorMap AutoCastInputs(const std::string& op_type,
NameTensorMap CastPureFp16Inputs(const std::string& op_type,
const NameTensorMap& ins);
} // namespace legacy
} // namespace egr
......@@ -22,6 +22,7 @@
#include "paddle/fluid/framework/type_defs.h"
#include "paddle/fluid/framework/variable.h"
namespace egr {
namespace legacy {
class EagerExecutionContext : public paddle::framework::ExecutionContext {
using Variable = paddle::framework::Variable;
......@@ -209,4 +210,5 @@ class EagerExecutionContext : public paddle::framework::ExecutionContext {
const paddle::framework::AttributeMap& default_attrs_;
};
} // namespace legacy
} // namespace egr
......@@ -25,6 +25,7 @@
#include "paddle/fluid/framework/type_defs.h"
#include "paddle/fluid/framework/var_type.h"
namespace egr {
namespace legacy {
class EagerInferShapeContext : public paddle::framework::InferShapeContext {
using DDim = paddle::framework::DDim;
......@@ -401,4 +402,5 @@ class EagerInferShapeContext : public paddle::framework::InferShapeContext {
const std::string op_type_;
};
} // namespace legacy
} // namespace egr
......@@ -29,6 +29,7 @@
#include "paddle/pten/include/core.h"
namespace egr {
namespace legacy {
// infer var type context for imperative mode
class TensorRuntimeInferVarTypeContext
......@@ -255,4 +256,5 @@ class TensorRuntimeInferVarTypeContext
const paddle::framework::AttributeMap& default_attrs_;
};
} // namespace legacy
} // namespace egr
......@@ -30,6 +30,7 @@ DECLARE_string(tracer_mkldnn_ops_on);
DECLARE_string(tracer_mkldnn_ops_off);
namespace egr {
namespace legacy {
void OpRunImpl(const paddle::framework::OperatorBase& op,
const NameTensorMap& ins, const NameTensorMap& outs,
......@@ -43,8 +44,8 @@ void OpRunImpl(const paddle::framework::OperatorBase& op,
"Only support operator with kernel in Dygraph mode."));
auto& info = op.Info();
if (info.infer_var_type_) {
egr::TensorRuntimeInferVarTypeContext infer_var_type_ctx(ins, outs, attrs,
default_attrs);
egr::legacy::TensorRuntimeInferVarTypeContext infer_var_type_ctx(
ins, outs, attrs, default_attrs);
info.infer_var_type_(&infer_var_type_ctx);
}
......@@ -76,10 +77,10 @@ void OpRunImpl(const paddle::framework::OperatorBase& op,
* after the execution of op, but the original input is directly
* overwritten in the previous dynamic graph implemention.
*/
auto prepared_op = egr::PreparedOp::Prepare(ins, outs, *op_kernel, place,
attrs, default_attrs);
auto prepared_op = egr::legacy::PreparedOp::Prepare(
ins, outs, *op_kernel, place, attrs, default_attrs);
auto tmp_ins_ptr =
egr::PrepareData(*op_kernel, ins, prepared_op.kernel_type());
egr::legacy::PrepareData(*op_kernel, ins, prepared_op.kernel_type());
if (tmp_ins_ptr == nullptr) {
prepared_op.Run(ins, outs, attrs, default_attrs);
} else {
......@@ -188,4 +189,6 @@ void RunOp(const std::string& type, const NameTensorMap& ins,
// program_desc_tracer_->InsertOp(type, new_ins, outs, attrs);
// }
}
} // namespace legacy
} // namespace egr
......@@ -14,11 +14,11 @@
#pragma once
#include "paddle/fluid/eager/legacy/type_def.h"
// TODO(Jiabin): We should not depends on this header remove it later
#include "paddle/fluid/imperative/jit/program_desc_tracer.h"
#include "paddle/pten/core/tensor_meta.h"
namespace egr {
namespace legacy {
void RunOp(const std::string& type, const NameTensorMap& ins,
const NameTensorMap& outs, paddle::framework::AttributeMap attrs,
......@@ -26,4 +26,6 @@ void RunOp(const std::string& type, const NameTensorMap& ins,
paddle::framework::AttributeMap* default_attrs,
bool override_default_attr_map,
const std::map<std::string, std::string>& inplace_map = {});
}
} // namespace legacy
} // namespace egr
......@@ -26,6 +26,7 @@ DECLARE_bool(check_nan_inf);
DECLARE_bool(run_pten_kernel);
namespace egr {
namespace legacy {
const paddle::framework::Tensor* GetTensorFromVar(
const paddle::framework::Variable& var) {
......@@ -96,9 +97,9 @@ PreparedOp PrepareImpl(const NameTensorMap& ins, const NameTensorMap& outs,
#endif
// 1. get expected kernel key
auto dygraph_exe_ctx =
egr::EagerExecutionContext(op, paddle::framework::Scope(), *dev_ctx, ctx,
ins, outs, attrs, default_attrs);
auto dygraph_exe_ctx = egr::legacy::EagerExecutionContext(
op, paddle::framework::Scope(), *dev_ctx, ctx, ins, outs, attrs,
default_attrs);
auto expected_kernel_key = op.GetExpectedKernelType(dygraph_exe_ctx);
VLOG(3) << "expected_kernel_key:" << expected_kernel_key;
......@@ -251,4 +252,6 @@ std::shared_ptr<NameTensorMap> PrepareData(
}
return tmp_ins_ptr;
}
} // namespace legacy
} // namespace egr
......@@ -40,6 +40,7 @@ class DeviceContext;
} // namespace paddle
namespace egr {
namespace legacy {
const paddle::framework::Tensor* GetTensorFromVar(
const paddle::framework::Variable& var);
......@@ -79,4 +80,5 @@ class PreparedOp {
paddle::platform::DeviceContext* dev_ctx_;
};
} // namespace legacy
} // namespace egr
......@@ -25,6 +25,7 @@
#include "paddle/fluid/platform/place.h"
namespace egr {
namespace legacy {
void InitializeVariable(paddle::framework::Variable *var,
paddle::framework::proto::VarType::Type var_type) {
......@@ -108,4 +109,6 @@ const paddle::platform::Place &GetPlaceFromVar(
paddle::framework::ToTypeName(var.Type())));
}
}
} // namespace legacy
} // namespace egr
......@@ -19,6 +19,8 @@
#include "paddle/pten/api/all.h"
#include "paddle/pten/include/core.h"
namespace egr {
namespace legacy {
void InitializeVariable(paddle::framework::Variable* var,
paddle::framework::proto::VarType::Type var_type);
paddle::framework::proto::VarType::Type GetDtypeFromVar(
......@@ -27,4 +29,6 @@ const paddle::platform::Place& GetPlaceFromVar(
const paddle::framework::Variable& var);
void CopyVariable(const paddle::framework::Variable& src_var,
paddle::framework::Variable* dst_var);
}
} // namespace legacy
} // namespace egr
......@@ -22,6 +22,9 @@
namespace egr {
class EagerTensor;
namespace legacy {
namespace details {
template <typename T>
struct NameVarMapTrait {};
......@@ -36,4 +39,6 @@ template <typename T>
using NameMap = typename details::NameVarMapTrait<T>::Type;
using NameTensorMap = NameMap<EagerTensor>;
} // namespace legacy
} // namespace egr
......@@ -55,9 +55,9 @@ void CheckOpHasNanOrInfInDygraph(const std::string& op_type,
}
template <typename TensorType>
static void CheckOpHasNanOrInfInEager(const std::string& op_type,
const egr::NameMap<TensorType>& op_outs,
platform::Place place) {
static void CheckOpHasNanOrInfInEager(
const std::string& op_type, const egr::legacy::NameMap<TensorType>& op_outs,
platform::Place place) {
for (const auto& pair : op_outs) {
for (const auto& tensor : pair.second) {
auto* var = tensor->MutableVar();
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册