Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
74fdba7c
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
74fdba7c
编写于
11月 29, 2021
作者:
Z
Zhanlue Yang
提交者:
GitHub
11月 29, 2021
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Refactored eager legacy namespace (#37659)
上级
46c71f2c
变更
14
隐藏空白更改
内联
并排
Showing
14 changed file
with
61 addition
and
27 deletion
+61
-27
paddle/fluid/eager/auto_code_generator/eager_generator.cc
paddle/fluid/eager/auto_code_generator/eager_generator.cc
+4
-4
paddle/fluid/eager/legacy/amp_auto_cast.cc
paddle/fluid/eager/legacy/amp_auto_cast.cc
+13
-9
paddle/fluid/eager/legacy/amp_auto_cast.h
paddle/fluid/eager/legacy/amp_auto_cast.h
+2
-0
paddle/fluid/eager/legacy/execution_context.h
paddle/fluid/eager/legacy/execution_context.h
+2
-0
paddle/fluid/eager/legacy/infer_shape_context.h
paddle/fluid/eager/legacy/infer_shape_context.h
+2
-0
paddle/fluid/eager/legacy/infer_var_type_context.h
paddle/fluid/eager/legacy/infer_var_type_context.h
+2
-0
paddle/fluid/eager/legacy/op_runner.cc
paddle/fluid/eager/legacy/op_runner.cc
+8
-5
paddle/fluid/eager/legacy/op_runner.h
paddle/fluid/eager/legacy/op_runner.h
+4
-2
paddle/fluid/eager/legacy/prepared_operator.cc
paddle/fluid/eager/legacy/prepared_operator.cc
+6
-3
paddle/fluid/eager/legacy/prepared_operator.h
paddle/fluid/eager/legacy/prepared_operator.h
+2
-0
paddle/fluid/eager/legacy/tensor_helper.cc
paddle/fluid/eager/legacy/tensor_helper.cc
+3
-0
paddle/fluid/eager/legacy/tensor_helper.h
paddle/fluid/eager/legacy/tensor_helper.h
+5
-1
paddle/fluid/eager/legacy/type_def.h
paddle/fluid/eager/legacy/type_def.h
+5
-0
paddle/fluid/framework/details/nan_inf_utils.h
paddle/fluid/framework/details/nan_inf_utils.h
+3
-3
未找到文件。
paddle/fluid/eager/auto_code_generator/eager_generator.cc
浏览文件 @
74fdba7c
...
...
@@ -779,7 +779,7 @@ static std::pair<std::string, std::string> GenerateForwardFunctionContents(
,ConstructDuplicableOutput(Out1Num)} };
// According to op_proto->attrs()
egr::RunOp("op_type", ins, outs, attr_map,
egr::
legacy::
RunOp("op_type", ins, outs, attr_map,
Controller.Instance().GetExpectedPlace(), {});
// According to fwd_outputs_names
...
...
@@ -894,7 +894,7 @@ static std::pair<std::string, std::string> GenerateForwardFunctionContents(
const
char
*
FWD_TRACE_OP_TEMPLATE
=
" paddle::framework::AttributeMap attrs = attr_map;
\n
"
" paddle::framework::AttributeMap default_attrs;
\n
"
" egr::RunOp(
\"
%s
\"
, ins, outs, attrs,
\n
"
" egr::
legacy::
RunOp(
\"
%s
\"
, ins, outs, attrs,
\n
"
" egr::Controller::Instance().GetExpectedPlace(),
\n
"
" &default_attrs, true, {});
\n
"
;
std
::
string
trace_op_str
=
...
...
@@ -1052,7 +1052,7 @@ static std::string GenerateGradNodeCCContents(
// Visit each OpBase
for(auto iter = "grad_node->begin()"; iter < "grad_node->end()"; iter++) {
// Simply pass entire attribute map to kernels
egr::RunOp("iter->Type()", ins, outs, this->attr_map_,
egr::
legacy::
RunOp("iter->Type()", ins, outs, this->attr_map_,
egr::Controller::Instance().ExpectedPlace(), false, {});
}
...
...
@@ -1180,7 +1180,7 @@ static std::string GenerateGradNodeCCContents(
" // Pass the entire attribute map to TraceOp
\n
"
" // The underlying kernel will pickup whatever attribute they need "
"at runtime
\n
"
" egr::RunOp(
\"
%s
\"
, ins, outs, this->attr_map_,
\n
"
" egr::
legacy::
RunOp(
\"
%s
\"
, ins, outs, this->attr_map_,
\n
"
" egr::Controller::Instance().GetExpectedPlace(),
\n
"
" &this->default_attr_map_, false, {});
\n
"
;
trace_opbase_str
=
paddle
::
string
::
Sprintf
(
TRACE_OP_TEMPLATE
,
op_base_type
);
...
...
paddle/fluid/eager/legacy/amp_auto_cast.cc
浏览文件 @
74fdba7c
...
...
@@ -20,6 +20,7 @@
#include "paddle/fluid/framework/operator.h"
namespace
egr
{
namespace
legacy
{
AmpOperators
::
AmpOperators
()
:
allow_ops_
(
new
std
::
unordered_set
<
std
::
string
>
()),
...
...
@@ -85,12 +86,12 @@ std::ostream& operator<<(std::ostream& os, AmpOperators& ops) {
inline
std
::
string
GetDtypeStr
(
const
std
::
shared_ptr
<
egr
::
EagerTensor
>&
tensor
)
{
return
paddle
::
framework
::
DataTypeToString
(
egr
::
GetDtypeFromVar
(
tensor
->
Var
()));
egr
::
legacy
::
GetDtypeFromVar
(
tensor
->
Var
()));
}
inline
bool
NeedCast
(
const
std
::
shared_ptr
<
egr
::
EagerTensor
>&
tensor
)
{
auto
place
=
egr
::
GetPlaceFromVar
(
tensor
->
Var
());
auto
data_type
=
egr
::
GetDtypeFromVar
(
tensor
->
Var
());
auto
place
=
egr
::
legacy
::
GetPlaceFromVar
(
tensor
->
Var
());
auto
data_type
=
egr
::
legacy
::
GetDtypeFromVar
(
tensor
->
Var
());
if
(
paddle
::
platform
::
is_gpu_place
(
place
)
||
paddle
::
platform
::
is_cuda_pinned_place
(
place
)
||
paddle
::
platform
::
is_xpu_place
(
place
))
{
...
...
@@ -109,7 +110,7 @@ static inline std::shared_ptr<egr::EagerTensor> CastToType(
const
std
::
shared_ptr
<
egr
::
EagerTensor
>&
tensor
,
const
paddle
::
framework
::
proto
::
VarType
::
Type
dst_type
)
{
NameTensorMap
ins
=
{{
"X"
,
{
tensor
}}};
auto
in_data_type
=
egr
::
GetDtypeFromVar
(
tensor
->
Var
());
auto
in_data_type
=
egr
::
legacy
::
GetDtypeFromVar
(
tensor
->
Var
());
paddle
::
framework
::
AttributeMap
attrs
=
{{
"in_dtype"
,
in_data_type
},
{
"out_dtype"
,
dst_type
}};
auto
out
=
std
::
shared_ptr
<
egr
::
EagerTensor
>
(
new
egr
::
EagerTensor
());
...
...
@@ -127,7 +128,8 @@ static inline std::shared_ptr<egr::EagerTensor> CastToType(
static
inline
std
::
shared_ptr
<
egr
::
EagerTensor
>
CastToFP16
(
const
std
::
shared_ptr
<
egr
::
EagerTensor
>&
tensor
)
{
auto
dst_type
=
paddle
::
framework
::
proto
::
VarType
::
FP16
;
if
(
NeedCast
(
tensor
)
&&
(
egr
::
GetDtypeFromVar
(
tensor
->
Var
())
!=
dst_type
))
{
if
(
NeedCast
(
tensor
)
&&
(
egr
::
legacy
::
GetDtypeFromVar
(
tensor
->
Var
())
!=
dst_type
))
{
return
CastToType
(
tensor
,
dst_type
);
}
return
tensor
;
...
...
@@ -136,7 +138,8 @@ static inline std::shared_ptr<egr::EagerTensor> CastToFP16(
static
inline
std
::
shared_ptr
<
egr
::
EagerTensor
>
CastToFP32
(
const
std
::
shared_ptr
<
egr
::
EagerTensor
>&
tensor
)
{
auto
dst_type
=
paddle
::
framework
::
proto
::
VarType
::
FP32
;
if
(
NeedCast
(
tensor
)
&&
(
egr
::
GetDtypeFromVar
(
tensor
->
Var
())
!=
dst_type
))
{
if
(
NeedCast
(
tensor
)
&&
(
egr
::
legacy
::
GetDtypeFromVar
(
tensor
->
Var
())
!=
dst_type
))
{
return
CastToType
(
tensor
,
dst_type
);
}
return
tensor
;
...
...
@@ -147,9 +150,9 @@ static inline paddle::framework::proto::VarType::Type GetPromoteType(
auto
dst_type
=
paddle
::
framework
::
proto
::
VarType
::
FP16
;
for
(
const
auto
&
pair
:
ins
)
{
for
(
const
auto
&
tensor
:
pair
.
second
)
{
if
(
egr
::
GetDtypeFromVar
(
tensor
->
Var
())
==
if
(
egr
::
legacy
::
GetDtypeFromVar
(
tensor
->
Var
())
==
paddle
::
framework
::
proto
::
VarType
::
FP32
)
{
dst_type
=
egr
::
GetDtypeFromVar
(
tensor
->
Var
());
dst_type
=
egr
::
legacy
::
GetDtypeFromVar
(
tensor
->
Var
());
break
;
}
}
...
...
@@ -160,7 +163,7 @@ static inline paddle::framework::proto::VarType::Type GetPromoteType(
if
(
op_type
==
"moving_average_abs_max_scale"
)
{
for
(
const
auto
&
pair
:
ins
)
{
if
(
pair
.
first
==
"X"
&&
egr
::
GetDtypeFromVar
(
pair
.
second
.
front
()
->
Var
())
==
egr
::
legacy
::
GetDtypeFromVar
(
pair
.
second
.
front
()
->
Var
())
==
paddle
::
framework
::
proto
::
VarType
::
FP16
)
{
dst_type
=
paddle
::
framework
::
proto
::
VarType
::
FP16
;
}
...
...
@@ -255,4 +258,5 @@ NameTensorMap CastPureFp16Inputs(const std::string& op_type,
return
new_ins
;
}
}
// namespace legacy
}
// namespace egr
paddle/fluid/eager/legacy/amp_auto_cast.h
浏览文件 @
74fdba7c
...
...
@@ -24,6 +24,7 @@
#include "paddle/fluid/eager/legacy/type_def.h"
namespace
egr
{
namespace
legacy
{
// NOTE(zhiqiu): only O1 and O2 are valid now
enum
class
AmpLevel
{
...
...
@@ -92,4 +93,5 @@ NameTensorMap AutoCastInputs(const std::string& op_type,
NameTensorMap
CastPureFp16Inputs
(
const
std
::
string
&
op_type
,
const
NameTensorMap
&
ins
);
}
// namespace legacy
}
// namespace egr
paddle/fluid/eager/legacy/execution_context.h
浏览文件 @
74fdba7c
...
...
@@ -22,6 +22,7 @@
#include "paddle/fluid/framework/type_defs.h"
#include "paddle/fluid/framework/variable.h"
namespace
egr
{
namespace
legacy
{
class
EagerExecutionContext
:
public
paddle
::
framework
::
ExecutionContext
{
using
Variable
=
paddle
::
framework
::
Variable
;
...
...
@@ -209,4 +210,5 @@ class EagerExecutionContext : public paddle::framework::ExecutionContext {
const
paddle
::
framework
::
AttributeMap
&
default_attrs_
;
};
}
// namespace legacy
}
// namespace egr
paddle/fluid/eager/legacy/infer_shape_context.h
浏览文件 @
74fdba7c
...
...
@@ -25,6 +25,7 @@
#include "paddle/fluid/framework/type_defs.h"
#include "paddle/fluid/framework/var_type.h"
namespace
egr
{
namespace
legacy
{
class
EagerInferShapeContext
:
public
paddle
::
framework
::
InferShapeContext
{
using
DDim
=
paddle
::
framework
::
DDim
;
...
...
@@ -401,4 +402,5 @@ class EagerInferShapeContext : public paddle::framework::InferShapeContext {
const
std
::
string
op_type_
;
};
}
// namespace legacy
}
// namespace egr
paddle/fluid/eager/legacy/infer_var_type_context.h
浏览文件 @
74fdba7c
...
...
@@ -29,6 +29,7 @@
#include "paddle/pten/include/core.h"
namespace
egr
{
namespace
legacy
{
// infer var type context for imperative mode
class
TensorRuntimeInferVarTypeContext
...
...
@@ -255,4 +256,5 @@ class TensorRuntimeInferVarTypeContext
const
paddle
::
framework
::
AttributeMap
&
default_attrs_
;
};
}
// namespace legacy
}
// namespace egr
paddle/fluid/eager/legacy/op_runner.cc
浏览文件 @
74fdba7c
...
...
@@ -30,6 +30,7 @@ DECLARE_string(tracer_mkldnn_ops_on);
DECLARE_string
(
tracer_mkldnn_ops_off
);
namespace
egr
{
namespace
legacy
{
void
OpRunImpl
(
const
paddle
::
framework
::
OperatorBase
&
op
,
const
NameTensorMap
&
ins
,
const
NameTensorMap
&
outs
,
...
...
@@ -43,8 +44,8 @@ void OpRunImpl(const paddle::framework::OperatorBase& op,
"Only support operator with kernel in Dygraph mode."
));
auto
&
info
=
op
.
Info
();
if
(
info
.
infer_var_type_
)
{
egr
::
TensorRuntimeInferVarTypeContext
infer_var_type_ctx
(
ins
,
outs
,
attrs
,
default_attrs
);
egr
::
legacy
::
TensorRuntimeInferVarTypeContext
infer_var_type_ctx
(
ins
,
outs
,
attrs
,
default_attrs
);
info
.
infer_var_type_
(
&
infer_var_type_ctx
);
}
...
...
@@ -76,10 +77,10 @@ void OpRunImpl(const paddle::framework::OperatorBase& op,
* after the execution of op, but the original input is directly
* overwritten in the previous dynamic graph implemention.
*/
auto
prepared_op
=
egr
::
PreparedOp
::
Prepare
(
ins
,
outs
,
*
op_kernel
,
place
,
attrs
,
default_attrs
);
auto
prepared_op
=
egr
::
legacy
::
PreparedOp
::
Prepare
(
ins
,
outs
,
*
op_kernel
,
place
,
attrs
,
default_attrs
);
auto
tmp_ins_ptr
=
egr
::
PrepareData
(
*
op_kernel
,
ins
,
prepared_op
.
kernel_type
());
egr
::
legacy
::
PrepareData
(
*
op_kernel
,
ins
,
prepared_op
.
kernel_type
());
if
(
tmp_ins_ptr
==
nullptr
)
{
prepared_op
.
Run
(
ins
,
outs
,
attrs
,
default_attrs
);
}
else
{
...
...
@@ -188,4 +189,6 @@ void RunOp(const std::string& type, const NameTensorMap& ins,
// program_desc_tracer_->InsertOp(type, new_ins, outs, attrs);
// }
}
}
// namespace legacy
}
// namespace egr
paddle/fluid/eager/legacy/op_runner.h
浏览文件 @
74fdba7c
...
...
@@ -14,11 +14,11 @@
#pragma once
#include "paddle/fluid/eager/legacy/type_def.h"
// TODO(Jiabin): We should not depends on this header remove it later
#include "paddle/fluid/imperative/jit/program_desc_tracer.h"
#include "paddle/pten/core/tensor_meta.h"
namespace
egr
{
namespace
legacy
{
void
RunOp
(
const
std
::
string
&
type
,
const
NameTensorMap
&
ins
,
const
NameTensorMap
&
outs
,
paddle
::
framework
::
AttributeMap
attrs
,
...
...
@@ -26,4 +26,6 @@ void RunOp(const std::string& type, const NameTensorMap& ins,
paddle
::
framework
::
AttributeMap
*
default_attrs
,
bool
override_default_attr_map
,
const
std
::
map
<
std
::
string
,
std
::
string
>&
inplace_map
=
{});
}
}
// namespace legacy
}
// namespace egr
paddle/fluid/eager/legacy/prepared_operator.cc
浏览文件 @
74fdba7c
...
...
@@ -26,6 +26,7 @@ DECLARE_bool(check_nan_inf);
DECLARE_bool
(
run_pten_kernel
);
namespace
egr
{
namespace
legacy
{
const
paddle
::
framework
::
Tensor
*
GetTensorFromVar
(
const
paddle
::
framework
::
Variable
&
var
)
{
...
...
@@ -96,9 +97,9 @@ PreparedOp PrepareImpl(const NameTensorMap& ins, const NameTensorMap& outs,
#endif
// 1. get expected kernel key
auto
dygraph_exe_ctx
=
egr
::
EagerExecutionContext
(
op
,
paddle
::
framework
::
Scope
(),
*
dev_ctx
,
ctx
,
ins
,
outs
,
attrs
,
default_attrs
);
auto
dygraph_exe_ctx
=
egr
::
legacy
::
EagerExecutionContext
(
op
,
paddle
::
framework
::
Scope
(),
*
dev_ctx
,
ctx
,
ins
,
outs
,
attrs
,
default_attrs
);
auto
expected_kernel_key
=
op
.
GetExpectedKernelType
(
dygraph_exe_ctx
);
VLOG
(
3
)
<<
"expected_kernel_key:"
<<
expected_kernel_key
;
...
...
@@ -251,4 +252,6 @@ std::shared_ptr<NameTensorMap> PrepareData(
}
return
tmp_ins_ptr
;
}
}
// namespace legacy
}
// namespace egr
paddle/fluid/eager/legacy/prepared_operator.h
浏览文件 @
74fdba7c
...
...
@@ -40,6 +40,7 @@ class DeviceContext;
}
// namespace paddle
namespace
egr
{
namespace
legacy
{
const
paddle
::
framework
::
Tensor
*
GetTensorFromVar
(
const
paddle
::
framework
::
Variable
&
var
);
...
...
@@ -79,4 +80,5 @@ class PreparedOp {
paddle
::
platform
::
DeviceContext
*
dev_ctx_
;
};
}
// namespace legacy
}
// namespace egr
paddle/fluid/eager/legacy/tensor_helper.cc
浏览文件 @
74fdba7c
...
...
@@ -25,6 +25,7 @@
#include "paddle/fluid/platform/place.h"
namespace
egr
{
namespace
legacy
{
void
InitializeVariable
(
paddle
::
framework
::
Variable
*
var
,
paddle
::
framework
::
proto
::
VarType
::
Type
var_type
)
{
...
...
@@ -108,4 +109,6 @@ const paddle::platform::Place &GetPlaceFromVar(
paddle
::
framework
::
ToTypeName
(
var
.
Type
())));
}
}
}
// namespace legacy
}
// namespace egr
paddle/fluid/eager/legacy/tensor_helper.h
浏览文件 @
74fdba7c
...
...
@@ -19,6 +19,8 @@
#include "paddle/pten/api/all.h"
#include "paddle/pten/include/core.h"
namespace
egr
{
namespace
legacy
{
void
InitializeVariable
(
paddle
::
framework
::
Variable
*
var
,
paddle
::
framework
::
proto
::
VarType
::
Type
var_type
);
paddle
::
framework
::
proto
::
VarType
::
Type
GetDtypeFromVar
(
...
...
@@ -27,4 +29,6 @@ const paddle::platform::Place& GetPlaceFromVar(
const
paddle
::
framework
::
Variable
&
var
);
void
CopyVariable
(
const
paddle
::
framework
::
Variable
&
src_var
,
paddle
::
framework
::
Variable
*
dst_var
);
}
}
// namespace legacy
}
// namespace egr
paddle/fluid/eager/legacy/type_def.h
浏览文件 @
74fdba7c
...
...
@@ -22,6 +22,9 @@
namespace
egr
{
class
EagerTensor
;
namespace
legacy
{
namespace
details
{
template
<
typename
T
>
struct
NameVarMapTrait
{};
...
...
@@ -36,4 +39,6 @@ template <typename T>
using
NameMap
=
typename
details
::
NameVarMapTrait
<
T
>::
Type
;
using
NameTensorMap
=
NameMap
<
EagerTensor
>
;
}
// namespace legacy
}
// namespace egr
paddle/fluid/framework/details/nan_inf_utils.h
浏览文件 @
74fdba7c
...
...
@@ -55,9 +55,9 @@ void CheckOpHasNanOrInfInDygraph(const std::string& op_type,
}
template
<
typename
TensorType
>
static
void
CheckOpHasNanOrInfInEager
(
const
std
::
string
&
op_type
,
const
egr
::
NameMap
<
TensorType
>&
op_outs
,
platform
::
Place
place
)
{
static
void
CheckOpHasNanOrInfInEager
(
const
std
::
string
&
op_type
,
const
egr
::
legacy
::
NameMap
<
TensorType
>&
op_outs
,
platform
::
Place
place
)
{
for
(
const
auto
&
pair
:
op_outs
)
{
for
(
const
auto
&
tensor
:
pair
.
second
)
{
auto
*
var
=
tensor
->
MutableVar
();
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录