Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
a9608f60
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
a9608f60
编写于
11月 26, 2021
作者:
Z
Zhanlue Yang
提交者:
GitHub
11月 26, 2021
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Added fluid dependencies to Eager Dygraph (#37555)
上级
a68eeb0c
变更
5
隐藏空白更改
内联
并排
Showing
5 changed file
with
971 addition
and
0 deletion
+971
-0
paddle/fluid/eager/legacy/CMakeLists.txt
paddle/fluid/eager/legacy/CMakeLists.txt
+2
-0
paddle/fluid/eager/legacy/amp_auto_cast.cc
paddle/fluid/eager/legacy/amp_auto_cast.cc
+258
-0
paddle/fluid/eager/legacy/amp_auto_cast.h
paddle/fluid/eager/legacy/amp_auto_cast.h
+95
-0
paddle/fluid/eager/legacy/execution_context.h
paddle/fluid/eager/legacy/execution_context.h
+212
-0
paddle/fluid/eager/legacy/infer_shape_context.h
paddle/fluid/eager/legacy/infer_shape_context.h
+404
-0
未找到文件。
paddle/fluid/eager/legacy/CMakeLists.txt
0 → 100644
浏览文件 @
a9608f60
file
(
GLOB DYGRAPH_LEGACY
"*.cpp"
"*.cc"
)
set
(
DYGRAPH_LEGACY
${
DYGRAPH_LEGACY
}
PARENT_SCOPE
)
paddle/fluid/eager/legacy/amp_auto_cast.cc
0 → 100644
浏览文件 @
a9608f60
// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/fluid/eager/legacy/amp_auto_cast.h"
#include <memory>
#include <string>
#include "paddle/fluid/eager/legacy/op_runner.h"
#include "paddle/fluid/eager/legacy/tensor_helper.h"
#include "paddle/fluid/framework/operator.h"
namespace
egr
{
AmpOperators
::
AmpOperators
()
:
allow_ops_
(
new
std
::
unordered_set
<
std
::
string
>
()),
block_ops_
(
new
std
::
unordered_set
<
std
::
string
>
()),
unsupported_fp16_ops_
(
new
std
::
unordered_set
<
std
::
string
>
())
{
auto
&
all_kernels
=
paddle
::
framework
::
OperatorWithKernel
::
AllOpKernels
();
auto
fp16_dtype
=
paddle
::
framework
::
proto
::
VarType
::
FP16
;
for
(
auto
it
=
all_kernels
.
begin
();
it
!=
all_kernels
.
end
();
it
++
)
{
bool
supported
=
false
;
for
(
auto
&
kernel_type
:
it
->
second
)
{
if
((
paddle
::
platform
::
is_gpu_place
(
kernel_type
.
first
.
place_
)
||
paddle
::
platform
::
is_xpu_place
(
kernel_type
.
first
.
place_
))
&&
kernel_type
.
first
.
data_type_
==
fp16_dtype
)
{
supported
=
true
;
}
}
if
(
!
supported
)
{
unsupported_fp16_ops_
->
insert
(
it
->
first
);
}
}
}
AmpOperators
::~
AmpOperators
()
{}
AmpOperators
&
AmpOperators
::
Instance
()
{
static
AmpOperators
instance
;
return
instance
;
}
std
::
shared_ptr
<
std
::
unordered_set
<
std
::
string
>>
AmpOperators
::
GetMutableAllowOps
()
{
return
allow_ops_
;
}
std
::
shared_ptr
<
std
::
unordered_set
<
std
::
string
>>
AmpOperators
::
GetMutableBlockOps
()
{
return
block_ops_
;
}
std
::
shared_ptr
<
std
::
unordered_set
<
std
::
string
>>
AmpOperators
::
GetMutableUnsupportedFp16Ops
()
{
return
unsupported_fp16_ops_
;
}
std
::
ostream
&
operator
<<
(
std
::
ostream
&
os
,
AmpOperators
&
ops
)
{
os
<<
"allow ops: "
;
auto
allow_ops
=
ops
.
GetMutableAllowOps
();
std
::
copy
((
*
allow_ops
).
begin
(),
(
*
allow_ops
).
end
(),
std
::
ostream_iterator
<
std
::
string
>
(
os
,
" "
));
os
<<
"
\n
"
;
os
<<
"block ops: "
;
auto
block_ops
=
ops
.
GetMutableBlockOps
();
std
::
copy
((
*
block_ops
).
begin
(),
(
*
block_ops
).
end
(),
std
::
ostream_iterator
<
std
::
string
>
(
os
,
" "
));
os
<<
"
\n
"
;
os
<<
"unsupported fp16 ops: "
;
auto
unsupported_fp16_ops
=
ops
.
GetMutableUnsupportedFp16Ops
();
std
::
copy
((
*
unsupported_fp16_ops
).
begin
(),
(
*
unsupported_fp16_ops
).
end
(),
std
::
ostream_iterator
<
std
::
string
>
(
os
,
" "
));
return
os
;
}
inline
std
::
string
GetDtypeStr
(
const
std
::
shared_ptr
<
egr
::
EagerTensor
>&
tensor
)
{
return
paddle
::
framework
::
DataTypeToString
(
egr
::
GetDtypeFromVar
(
tensor
->
Var
()));
}
inline
bool
NeedCast
(
const
std
::
shared_ptr
<
egr
::
EagerTensor
>&
tensor
)
{
auto
place
=
egr
::
GetPlaceFromVar
(
tensor
->
Var
());
auto
data_type
=
egr
::
GetDtypeFromVar
(
tensor
->
Var
());
if
(
paddle
::
platform
::
is_gpu_place
(
place
)
||
paddle
::
platform
::
is_cuda_pinned_place
(
place
)
||
paddle
::
platform
::
is_xpu_place
(
place
))
{
// CudaPinndePlace is added for varbase created by dataloader
if
(
data_type
==
paddle
::
framework
::
proto
::
VarType
::
FP32
||
data_type
==
paddle
::
framework
::
proto
::
VarType
::
FP16
)
{
return
true
;
}
}
return
false
;
}
// NOTE: Trace a cast op, so if a var is casted from fp32 to fp16, then the grad
// var will be cast back from fp16 to fp32 during backward phase.
static
inline
std
::
shared_ptr
<
egr
::
EagerTensor
>
CastToType
(
const
std
::
shared_ptr
<
egr
::
EagerTensor
>&
tensor
,
const
paddle
::
framework
::
proto
::
VarType
::
Type
dst_type
)
{
NameTensorMap
ins
=
{{
"X"
,
{
tensor
}}};
auto
in_data_type
=
egr
::
GetDtypeFromVar
(
tensor
->
Var
());
paddle
::
framework
::
AttributeMap
attrs
=
{{
"in_dtype"
,
in_data_type
},
{
"out_dtype"
,
dst_type
}};
auto
out
=
std
::
shared_ptr
<
egr
::
EagerTensor
>
(
new
egr
::
EagerTensor
());
NameTensorMap
outs
=
{{
"Out"
,
{
out
}}};
{
AutoCastGuard
guard
(
0
);
paddle
::
framework
::
AttributeMap
default_attrs
;
RunOp
(
"cast"
,
ins
,
outs
,
std
::
move
(
attrs
),
{},
&
default_attrs
,
true
);
}
return
out
;
}
static
inline
std
::
shared_ptr
<
egr
::
EagerTensor
>
CastToFP16
(
const
std
::
shared_ptr
<
egr
::
EagerTensor
>&
tensor
)
{
auto
dst_type
=
paddle
::
framework
::
proto
::
VarType
::
FP16
;
if
(
NeedCast
(
tensor
)
&&
(
egr
::
GetDtypeFromVar
(
tensor
->
Var
())
!=
dst_type
))
{
return
CastToType
(
tensor
,
dst_type
);
}
return
tensor
;
}
static
inline
std
::
shared_ptr
<
egr
::
EagerTensor
>
CastToFP32
(
const
std
::
shared_ptr
<
egr
::
EagerTensor
>&
tensor
)
{
auto
dst_type
=
paddle
::
framework
::
proto
::
VarType
::
FP32
;
if
(
NeedCast
(
tensor
)
&&
(
egr
::
GetDtypeFromVar
(
tensor
->
Var
())
!=
dst_type
))
{
return
CastToType
(
tensor
,
dst_type
);
}
return
tensor
;
}
static
inline
paddle
::
framework
::
proto
::
VarType
::
Type
GetPromoteType
(
const
std
::
string
&
op_type
,
const
NameTensorMap
&
ins
)
{
auto
dst_type
=
paddle
::
framework
::
proto
::
VarType
::
FP16
;
for
(
const
auto
&
pair
:
ins
)
{
for
(
const
auto
&
tensor
:
pair
.
second
)
{
if
(
egr
::
GetDtypeFromVar
(
tensor
->
Var
())
==
paddle
::
framework
::
proto
::
VarType
::
FP32
)
{
dst_type
=
egr
::
GetDtypeFromVar
(
tensor
->
Var
());
break
;
}
}
}
// NOTE(juncai): moving_average_abs_max_scale only consider the
// dtype of input(X)
if
(
op_type
==
"moving_average_abs_max_scale"
)
{
for
(
const
auto
&
pair
:
ins
)
{
if
(
pair
.
first
==
"X"
&&
egr
::
GetDtypeFromVar
(
pair
.
second
.
front
()
->
Var
())
==
paddle
::
framework
::
proto
::
VarType
::
FP16
)
{
dst_type
=
paddle
::
framework
::
proto
::
VarType
::
FP16
;
}
}
}
return
dst_type
;
}
NameTensorMap
AutoCastInputs
(
const
std
::
string
&
op_type
,
const
NameTensorMap
&
ins
)
{
NameTensorMap
new_ins
(
ins
);
if
(
AmpOperators
::
Instance
().
GetMutableAllowOps
()
->
count
(
op_type
))
{
for
(
auto
&
pair
:
new_ins
)
{
// NOTE(zhiqiu): batch_norm and layer_norm support only input x is fp16.
if
((
op_type
==
"batch_norm"
||
op_type
==
"layer_norm"
||
op_type
==
"sync_batch_norm"
)
&&
pair
.
first
!=
"X"
)
{
continue
;
}
VLOG
(
5
)
<<
"Op("
<<
op_type
<<
"): Cast "
<<
pair
.
first
<<
" from "
<<
GetDtypeStr
(
*
pair
.
second
.
cbegin
())
<<
" to float16"
;
for
(
auto
&
var
:
pair
.
second
)
{
var
=
CastToFP16
(
var
);
}
}
return
new_ins
;
}
else
if
(
AmpOperators
::
Instance
().
GetMutableBlockOps
()
->
count
(
op_type
))
{
for
(
auto
&
pair
:
new_ins
)
{
VLOG
(
5
)
<<
"Op("
<<
op_type
<<
"): Cast "
<<
pair
.
first
<<
" from "
<<
GetDtypeStr
(
*
pair
.
second
.
cbegin
())
<<
" to float"
;
for
(
auto
&
var
:
pair
.
second
)
{
var
=
CastToFP32
(
var
);
}
}
return
new_ins
;
}
else
{
auto
dst_type
=
GetPromoteType
(
op_type
,
ins
);
// NOTE(zhiqiu): if the op has op fp16 kernel, fall back to fp32.
if
(
dst_type
==
paddle
::
framework
::
proto
::
VarType
::
FP16
&&
AmpOperators
::
Instance
().
GetMutableUnsupportedFp16Ops
()
->
count
(
op_type
))
{
dst_type
=
paddle
::
framework
::
proto
::
VarType
::
FP32
;
}
for
(
auto
&
pair
:
new_ins
)
{
// NOTE(zhiqiu): batch_norm and layer_norm support only input x is fp16.
if
((
op_type
==
"batch_norm"
||
op_type
==
"layer_norm"
||
op_type
==
"sync_batch_norm"
)
&&
pair
.
first
==
"X"
&&
dst_type
==
paddle
::
framework
::
proto
::
VarType
::
FP32
)
{
continue
;
}
VLOG
(
5
)
<<
"Op("
<<
op_type
<<
"): Cast "
<<
pair
.
first
<<
" from "
<<
GetDtypeStr
(
*
pair
.
second
.
cbegin
())
<<
" to "
<<
paddle
::
framework
::
DataTypeToString
(
dst_type
);
for
(
auto
&
var
:
pair
.
second
)
{
var
=
(
dst_type
==
paddle
::
framework
::
proto
::
VarType
::
FP32
?
CastToFP32
(
var
)
:
CastToFP16
(
var
));
}
}
return
new_ins
;
}
return
new_ins
;
}
NameTensorMap
CastPureFp16Inputs
(
const
std
::
string
&
op_type
,
const
NameTensorMap
&
ins
)
{
NameTensorMap
new_ins
(
ins
);
auto
dst_type
=
paddle
::
framework
::
proto
::
VarType
::
FP16
;
if
(
AmpOperators
::
Instance
().
GetMutableUnsupportedFp16Ops
()
->
count
(
op_type
)
||
AmpOperators
::
Instance
().
GetMutableBlockOps
()
->
count
(
op_type
))
{
dst_type
=
paddle
::
framework
::
proto
::
VarType
::
FP32
;
}
for
(
auto
&
pair
:
new_ins
)
{
if
((
op_type
==
"batch_norm"
||
op_type
==
"layer_norm"
||
op_type
==
"sync_batch_norm"
)
&&
pair
.
first
!=
"X"
)
{
continue
;
}
VLOG
(
5
)
<<
"Op("
<<
op_type
<<
"): Cast "
<<
pair
.
first
<<
" from "
<<
GetDtypeStr
(
*
pair
.
second
.
cbegin
())
<<
" to "
<<
paddle
::
framework
::
DataTypeToString
(
dst_type
);
for
(
auto
&
var
:
pair
.
second
)
{
var
=
(
dst_type
==
paddle
::
framework
::
proto
::
VarType
::
FP32
?
CastToFP32
(
var
)
:
CastToFP16
(
var
));
}
}
return
new_ins
;
}
}
// namespace egr
paddle/fluid/eager/legacy/amp_auto_cast.h
0 → 100644
浏览文件 @
a9608f60
// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <memory>
#include <set>
#include <string>
#include <tuple>
#include <unordered_set>
#include "paddle/fluid/eager/api/utils/global_utils.h"
#include "paddle/fluid/eager/eager_tensor.h"
#include "paddle/fluid/eager/legacy/type_def.h"
namespace
egr
{
// NOTE(zhiqiu): only O1 and O2 are valid now
enum
class
AmpLevel
{
O0
=
0
,
// fp32
O1
,
// amp, mixed fp32-fp16
O2
,
// almost fp16
O3
,
// fp16
};
class
AmpOperators
{
public:
~
AmpOperators
();
AmpOperators
(
const
AmpOperators
&
o
)
=
delete
;
const
AmpOperators
&
operator
=
(
const
AmpOperators
&
o
)
=
delete
;
static
AmpOperators
&
Instance
();
std
::
shared_ptr
<
std
::
unordered_set
<
std
::
string
>>
GetMutableAllowOps
();
std
::
shared_ptr
<
std
::
unordered_set
<
std
::
string
>>
GetMutableBlockOps
();
std
::
shared_ptr
<
std
::
unordered_set
<
std
::
string
>>
GetMutableUnsupportedFp16Ops
();
private:
AmpOperators
();
// forbid calling default constructor
// The set of ops that support fp16 calculation and are considered numerically
// safe and performance critical. These ops are always converted to fp16.
std
::
shared_ptr
<
std
::
unordered_set
<
std
::
string
>>
allow_ops_
;
// The set of ops that support fp16 calculation and are considered numerically
// dangerous and whose effects may also be observed in downstream ops.
std
::
shared_ptr
<
std
::
unordered_set
<
std
::
string
>>
block_ops_
;
// The set of ops that has no fp16 CUDA kennel.
std
::
shared_ptr
<
std
::
unordered_set
<
std
::
string
>>
unsupported_fp16_ops_
;
};
std
::
ostream
&
operator
<<
(
std
::
ostream
&
os
,
AmpOperators
&
ops
);
// NOTE(zhiqiu): AutoCastGuard is used for RAII.
class
AutoCastGuard
{
public:
explicit
AutoCastGuard
(
int
guard_level
)
{
pre_amp_level_
=
Controller
::
Instance
().
GetAMPLevel
();
if
(
pre_amp_level_
!=
guard_level
)
{
Controller
::
Instance
().
SetAMPLevel
(
guard_level
);
}
}
~
AutoCastGuard
()
{
Controller
::
Instance
().
SetAMPLevel
(
pre_amp_level_
);
}
// forbid copy and operator=
AutoCastGuard
(
const
AutoCastGuard
&
guard
)
=
delete
;
AutoCastGuard
&
operator
=
(
const
AutoCastGuard
&
guard
)
=
delete
;
private:
int
pre_amp_level_
;
};
NameTensorMap
AutoCastInputs
(
const
std
::
string
&
op_type
,
const
NameTensorMap
&
ins
);
NameTensorMap
CastPureFp16Inputs
(
const
std
::
string
&
op_type
,
const
NameTensorMap
&
ins
);
}
// namespace egr
paddle/fluid/eager/legacy/execution_context.h
0 → 100644
浏览文件 @
a9608f60
// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <string>
#include <vector>
#include "paddle/fluid/eager/eager_tensor.h"
#include "paddle/fluid/eager/legacy/type_def.h"
#include "paddle/fluid/framework/operator.h"
#include "paddle/fluid/framework/type_defs.h"
#include "paddle/fluid/framework/variable.h"
namespace
egr
{
class
EagerExecutionContext
:
public
paddle
::
framework
::
ExecutionContext
{
using
Variable
=
paddle
::
framework
::
Variable
;
public:
EagerExecutionContext
(
const
paddle
::
framework
::
OperatorBase
&
op
,
const
paddle
::
framework
::
Scope
&
scope
,
const
paddle
::
platform
::
DeviceContext
&
device_context
,
const
paddle
::
framework
::
RuntimeContext
&
ctx
,
const
NameTensorMap
&
tensor_map_in
,
const
NameTensorMap
&
tensor_map_out
,
const
paddle
::
framework
::
AttributeMap
&
attrs
,
const
paddle
::
framework
::
AttributeMap
&
default_attrs
)
:
ExecutionContext
(
op
,
scope
,
device_context
,
ctx
),
tensor_map_in_
(
tensor_map_in
),
tensor_map_out_
(
tensor_map_out
),
attrs_
(
attrs
),
default_attrs_
(
default_attrs
)
{}
std
::
string
InputName
(
const
std
::
string
&
name
)
const
override
{
auto
it
=
tensor_map_in_
.
find
(
name
);
PADDLE_ENFORCE_NE
(
it
,
tensor_map_in_
.
end
(),
paddle
::
platform
::
errors
::
PreconditionNotMet
(
"Can not find [%s] in Input"
,
name
));
// TODO(jiabin): This is used for egr::EagerTensor temporally,
// once we have name, remove it.
return
it
->
second
[
0
]
?
it
->
second
[
0
]
->
name
()
:
paddle
::
framework
::
kEmptyVarName
;
}
std
::
vector
<
std
::
string
>
InputNames
(
const
std
::
string
&
name
)
const
override
{
auto
it
=
tensor_map_in_
.
find
(
name
);
PADDLE_ENFORCE_NE
(
it
,
tensor_map_in_
.
end
(),
paddle
::
platform
::
errors
::
NotFound
(
"Can not find [%s] in Input"
,
name
));
std
::
vector
<
std
::
string
>
vec_res
;
vec_res
.
reserve
(
it
->
second
.
size
());
for
(
size_t
i
=
0
;
i
<
it
->
second
.
size
();
++
i
)
{
if
(
it
->
second
[
i
])
{
// TODO(jiabin): This is used for egr::EagerTensor
// temporally, once we have name, remove it.
vec_res
.
push_back
(
it
->
second
[
i
]
->
name
());
}
else
{
vec_res
.
push_back
(
paddle
::
framework
::
kEmptyVarName
);
}
}
return
vec_res
;
}
std
::
string
OutputName
(
const
std
::
string
&
name
)
const
override
{
auto
it
=
tensor_map_out_
.
find
(
name
);
PADDLE_ENFORCE_NE
(
it
,
tensor_map_out_
.
end
(),
paddle
::
platform
::
errors
::
NotFound
(
"Can not find [%s] in Output"
,
name
));
return
it
->
second
[
0
]
?
it
->
second
[
0
]
->
name
()
:
paddle
::
framework
::
kEmptyVarName
;
}
std
::
vector
<
std
::
string
>
OutputNames
(
const
std
::
string
&
name
)
const
override
{
auto
it
=
tensor_map_out_
.
find
(
name
);
PADDLE_ENFORCE_NE
(
it
,
tensor_map_out_
.
end
(),
paddle
::
platform
::
errors
::
NotFound
(
"Can not find [%s] in Output"
,
name
));
std
::
vector
<
std
::
string
>
vec_res
;
vec_res
.
reserve
(
it
->
second
.
size
());
for
(
size_t
i
=
0
;
i
<
it
->
second
.
size
();
++
i
)
{
if
(
it
->
second
[
i
])
{
vec_res
.
push_back
(
it
->
second
[
i
]
->
name
());
}
else
{
vec_res
.
push_back
(
paddle
::
framework
::
kEmptyVarName
);
}
}
return
vec_res
;
}
bool
HasAttr
(
const
std
::
string
&
name
)
const
override
{
return
attrs_
.
count
(
name
)
!=
0
||
default_attrs_
.
count
(
name
)
!=
0
;
}
const
paddle
::
framework
::
AttributeMap
&
Attrs
()
const
override
{
return
attrs_
;
}
const
paddle
::
framework
::
Attribute
&
GetAttr
(
const
std
::
string
&
name
)
const
override
{
auto
it
=
attrs_
.
find
(
name
);
if
(
it
==
attrs_
.
end
())
{
it
=
default_attrs_
.
find
(
name
);
if
(
it
==
default_attrs_
.
end
())
{
PADDLE_THROW
(
paddle
::
platform
::
errors
::
NotFound
(
"Can not find [%s] in attributes of op %s."
,
name
,
this
->
GetOp
().
Type
()));
}
}
return
it
->
second
;
}
std
::
vector
<
std
::
string
>
InNameList
()
const
override
{
std
::
vector
<
std
::
string
>
vec_temp
;
vec_temp
.
reserve
(
tensor_map_in_
.
size
());
for
(
auto
&
v
:
tensor_map_in_
)
{
vec_temp
.
push_back
(
v
.
first
);
}
return
vec_temp
;
}
bool
HasInput
(
const
std
::
string
&
name
)
const
override
{
auto
it
=
tensor_map_in_
.
find
(
name
);
return
(
it
!=
tensor_map_in_
.
end
()
&&
it
->
second
.
size
()
>
0
);
}
bool
HasOutput
(
const
std
::
string
&
name
)
const
override
{
auto
it
=
tensor_map_out_
.
find
(
name
);
return
(
it
!=
tensor_map_out_
.
end
()
&&
it
->
second
.
size
()
>
0
);
}
size_t
InputSize
(
const
std
::
string
&
name
)
const
override
{
return
InputNames
(
name
).
size
();
}
size_t
OutputSize
(
const
std
::
string
&
name
)
const
override
{
return
OutputNames
(
name
).
size
();
}
const
Variable
*
InputVar
(
const
std
::
string
&
name
)
const
override
{
auto
it
=
tensor_map_in_
.
find
(
name
);
if
(
it
==
tensor_map_in_
.
end
())
{
return
nullptr
;
}
return
it
->
second
.
empty
()
||
it
->
second
[
0
]
==
nullptr
?
nullptr
:
it
->
second
[
0
]
->
MutableVar
();
}
Variable
*
OutputVar
(
const
std
::
string
&
name
)
const
override
{
auto
it
=
tensor_map_out_
.
find
(
name
);
if
(
it
==
tensor_map_out_
.
end
())
{
return
nullptr
;
}
return
it
->
second
.
empty
()
||
it
->
second
[
0
]
==
nullptr
?
nullptr
:
it
->
second
[
0
]
->
MutableVar
();
}
const
std
::
vector
<
Variable
*>
MultiInputVar
(
const
std
::
string
&
name
)
const
override
{
auto
it
=
tensor_map_in_
.
find
(
name
);
if
(
it
==
tensor_map_in_
.
end
())
{
return
{};
}
std
::
vector
<
Variable
*>
vec_res
;
vec_res
.
reserve
(
it
->
second
.
size
());
for
(
size_t
i
=
0
;
i
<
it
->
second
.
size
();
++
i
)
{
vec_res
.
push_back
(
it
->
second
[
i
]
?
it
->
second
[
i
]
->
MutableVar
()
:
nullptr
);
}
return
vec_res
;
}
std
::
vector
<
Variable
*>
MultiOutputVar
(
const
std
::
string
&
name
)
const
override
{
auto
it
=
tensor_map_out_
.
find
(
name
);
if
(
it
==
tensor_map_out_
.
end
())
{
return
{};
}
std
::
vector
<
Variable
*>
vec_res
;
vec_res
.
reserve
(
it
->
second
.
size
());
for
(
size_t
i
=
0
;
i
<
it
->
second
.
size
();
++
i
)
{
vec_res
.
push_back
(
it
->
second
[
i
]
?
it
->
second
[
i
]
->
MutableVar
()
:
nullptr
);
}
return
vec_res
;
}
private:
const
NameTensorMap
&
tensor_map_in_
;
const
NameTensorMap
&
tensor_map_out_
;
const
paddle
::
framework
::
AttributeMap
&
attrs_
;
const
paddle
::
framework
::
AttributeMap
&
default_attrs_
;
};
}
// namespace egr
paddle/fluid/eager/legacy/infer_shape_context.h
0 → 100644
浏览文件 @
a9608f60
// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <string>
#include <vector>
#include "paddle/fluid/eager/eager_tensor.h"
#include "paddle/fluid/eager/legacy/type_def.h"
#include "paddle/fluid/framework/ddim.h"
#include "paddle/fluid/framework/operator.h"
#include "paddle/fluid/framework/shape_inference.h"
#include "paddle/fluid/framework/type_defs.h"
#include "paddle/fluid/framework/var_type.h"
namespace
egr
{
class
EagerInferShapeContext
:
public
paddle
::
framework
::
InferShapeContext
{
using
DDim
=
paddle
::
framework
::
DDim
;
public:
EagerInferShapeContext
(
const
NameTensorMap
*
in
,
const
NameTensorMap
*
out
,
const
paddle
::
framework
::
AttributeMap
*
attr
,
const
paddle
::
framework
::
AttributeMap
*
default_attr
,
const
std
::
string
op_type
)
:
tensor_in_
(
in
),
tensor_out_
(
out
),
attrs_
(
attr
),
default_attrs_
(
default_attr
),
op_type_
(
op_type
)
{}
bool
HasInput
(
const
std
::
string
&
name
)
const
override
{
// has only one input
auto
it
=
tensor_in_
->
find
(
name
);
if
(
it
==
tensor_in_
->
end
())
{
return
false
;
}
const
auto
&
in
=
it
->
second
;
if
(
in
.
size
()
==
0
)
return
false
;
PADDLE_ENFORCE_EQ
(
in
.
size
(),
1UL
,
paddle
::
platform
::
errors
::
PreconditionNotMet
(
"Input %s should not have more than one inputs"
,
name
));
return
in
[
0
]
!=
nullptr
;
}
bool
HasOutput
(
const
std
::
string
&
name
)
const
override
{
// has only one output
auto
it
=
tensor_out_
->
find
(
name
);
if
(
it
==
tensor_out_
->
end
())
{
return
false
;
}
const
auto
&
out
=
it
->
second
;
if
(
out
.
size
()
==
0
)
{
return
false
;
}
PADDLE_ENFORCE_EQ
(
out
.
size
(),
1UL
,
paddle
::
platform
::
errors
::
PreconditionNotMet
(
"Output %s should not have more than one outputs"
,
name
));
return
out
[
0
]
!=
nullptr
;
}
bool
HasInputs
(
const
std
::
string
&
name
)
const
override
{
auto
it
=
tensor_in_
->
find
(
name
);
if
(
it
==
tensor_in_
->
end
()
||
it
->
second
.
empty
())
{
return
false
;
}
for
(
auto
&
input
:
it
->
second
)
{
if
(
input
==
nullptr
)
{
return
false
;
}
}
return
true
;
}
bool
HasOutputs
(
const
std
::
string
&
name
)
const
override
{
auto
it
=
tensor_out_
->
find
(
name
);
if
(
it
==
tensor_out_
->
end
()
||
it
->
second
.
empty
())
{
return
false
;
}
for
(
auto
&
output
:
it
->
second
)
{
if
(
output
==
nullptr
)
{
return
false
;
}
}
return
true
;
}
paddle
::
framework
::
AttrReader
Attrs
()
const
override
{
return
paddle
::
framework
::
AttrReader
(
*
attrs_
,
*
default_attrs_
);
}
std
::
vector
<
std
::
string
>
Inputs
(
const
std
::
string
&
name
)
const
override
{
std
::
vector
<
std
::
string
>
vec_res
;
auto
it
=
tensor_in_
->
find
(
name
);
PADDLE_ENFORCE_NE
(
it
,
tensor_in_
->
end
(),
paddle
::
platform
::
errors
::
NotFound
(
"can not find [%s] in input"
,
name
));
vec_res
.
reserve
(
it
->
second
.
size
());
for
(
auto
&
var
:
it
->
second
)
{
if
(
var
)
{
vec_res
.
push_back
(
var
->
name
());
}
else
{
vec_res
.
push_back
(
paddle
::
framework
::
kEmptyVarName
);
}
}
return
vec_res
;
}
std
::
vector
<
std
::
string
>
Outputs
(
const
std
::
string
&
name
)
const
override
{
std
::
vector
<
std
::
string
>
vec_res
;
auto
it
=
tensor_out_
->
find
(
name
);
PADDLE_ENFORCE_NE
(
it
,
tensor_out_
->
end
(),
paddle
::
platform
::
errors
::
NotFound
(
"can not find [%s] in output"
,
name
));
vec_res
.
reserve
(
it
->
second
.
size
());
for
(
auto
&
var
:
it
->
second
)
{
if
(
var
)
{
vec_res
.
push_back
(
var
->
name
());
}
else
{
vec_res
.
push_back
(
paddle
::
framework
::
kEmptyVarName
);
}
}
return
vec_res
;
}
std
::
string
GetInputNameByIdx
(
size_t
idx
)
const
override
{
auto
&
op_proto
=
paddle
::
framework
::
OpInfoMap
::
Instance
().
Get
(
op_type_
).
proto_
;
PADDLE_ENFORCE_LT
(
idx
,
op_proto
->
inputs
().
size
(),
paddle
::
platform
::
errors
::
OutOfRange
(
"The index should be less than the size of inputs of "
"operator %s, but got index is %d and size is %d"
,
op_type_
,
idx
,
op_proto
->
inputs
().
size
()));
return
op_proto
->
inputs
()[
idx
].
name
();
}
std
::
string
GetOutputNameByIdx
(
size_t
idx
)
const
override
{
auto
&
op_proto
=
paddle
::
framework
::
OpInfoMap
::
Instance
().
Get
(
op_type_
).
proto_
;
PADDLE_ENFORCE_LT
(
idx
,
op_proto
->
outputs
().
size
(),
paddle
::
platform
::
errors
::
OutOfRange
(
"The index should be less than the size of outputs of "
"operator %s, but got index is %d and size is %d"
,
op_type_
,
idx
,
op_proto
->
outputs
().
size
()));
return
op_proto
->
outputs
()[
idx
].
name
();
}
void
ShareDim
(
const
std
::
string
&
in
,
const
std
::
string
&
out
,
size_t
i
=
0
,
size_t
j
=
0
)
override
{
auto
in_it
=
tensor_in_
->
find
(
in
);
auto
out_it
=
tensor_out_
->
find
(
out
);
PADDLE_ENFORCE_NE
(
in_it
,
tensor_in_
->
end
(),
paddle
::
platform
::
errors
::
NotFound
(
"can not found [%s] in input"
,
in
));
PADDLE_ENFORCE_GT
(
in_it
->
second
.
size
(),
i
,
paddle
::
platform
::
errors
::
PreconditionNotMet
(
"Inputs %s should have %llu argument"
,
in
,
i
));
PADDLE_ENFORCE_NE
(
out_it
,
tensor_out_
->
end
(),
paddle
::
platform
::
errors
::
NotFound
(
"can not found [%s] in input"
,
in
));
PADDLE_ENFORCE_GT
(
out_it
->
second
.
size
(),
j
,
paddle
::
platform
::
errors
::
PreconditionNotMet
(
"Outputs %s should have %llu argument"
,
out
,
j
));
paddle
::
framework
::
Variable
*
in_var
=
in_it
->
second
[
i
]
->
MutableVar
();
paddle
::
framework
::
Variable
*
out_var
=
out_it
->
second
[
j
]
->
MutableVar
();
PADDLE_ENFORCE_EQ
(
in_var
->
Type
(),
out_var
->
Type
(),
paddle
::
platform
::
errors
::
PreconditionNotMet
(
"The type of %s and %s is not the same."
,
in
,
out
));
if
(
in_var
->
IsType
<
paddle
::
framework
::
LoDTensor
>
())
{
auto
&
in_lod_tensor
=
in_var
->
Get
<
paddle
::
framework
::
LoDTensor
>
();
auto
*
out_lod_tensor
=
out_var
->
GetMutable
<
paddle
::
framework
::
LoDTensor
>
();
out_lod_tensor
->
Resize
(
in_lod_tensor
.
dims
());
}
else
{
auto
&
in_sele_rows
=
in_var
->
Get
<
paddle
::
framework
::
SelectedRows
>
();
auto
out_sele_rows
=
out_var
->
GetMutable
<
paddle
::
framework
::
SelectedRows
>
();
out_sele_rows
->
mutable_value
()
->
Resize
(
in_sele_rows
.
value
().
dims
());
out_sele_rows
->
set_rows
(
in_sele_rows
.
rows
());
out_sele_rows
->
set_height
(
in_sele_rows
.
height
());
}
}
void
ShareAllLoD
(
const
std
::
string
&
in
,
const
std
::
string
&
out
)
const
override
{
// do nothing
}
void
ShareLoD
(
const
std
::
string
&
in
,
const
std
::
string
&
out
,
size_t
i
=
0
,
size_t
j
=
0
)
const
override
{
// do nothing
}
bool
IsRuntime
()
const
override
{
return
true
;
}
// TODO(paddle-dev): Can this be template?
std
::
vector
<
paddle
::
framework
::
InferShapeVarPtr
>
GetInputVarPtrs
(
const
std
::
string
&
name
)
override
{
PADDLE_THROW
(
paddle
::
platform
::
errors
::
PermissionDenied
(
"GetInputVarPtrs not support in dygraph runtime context"
));
}
std
::
vector
<
paddle
::
framework
::
InferShapeVarPtr
>
GetOutputVarPtrs
(
const
std
::
string
&
name
)
override
{
PADDLE_THROW
(
paddle
::
platform
::
errors
::
PermissionDenied
(
"GetOutputVarPtrs not support in dygraph runtime context"
));
}
DDim
GetInputDim
(
const
std
::
string
&
name
)
const
override
{
auto
it
=
tensor_in_
->
find
(
name
);
PADDLE_ENFORCE_NE
(
it
,
tensor_in_
->
end
(),
paddle
::
platform
::
errors
::
NotFound
(
"can not find [%s] in input"
,
name
));
PADDLE_ENFORCE_EQ
(
it
->
second
.
size
(),
1UL
,
paddle
::
platform
::
errors
::
PreconditionNotMet
(
"Input(%s) should hold one element, but now it holds %d"
,
name
,
it
->
second
.
size
()));
return
this
->
GetDim
(
it
->
second
[
0
]
->
MutableVar
());
}
std
::
vector
<
DDim
>
GetInputsDim
(
const
std
::
string
&
name
)
const
override
{
// const std::vector<Variable*>& vars = InputVars(name);
std
::
vector
<
DDim
>
vec_res
;
auto
it
=
tensor_in_
->
find
(
name
);
PADDLE_ENFORCE_NE
(
it
,
tensor_in_
->
end
(),
paddle
::
platform
::
errors
::
NotFound
(
"can not find [%s] in output"
,
name
));
vec_res
.
reserve
(
it
->
second
.
size
());
for
(
size_t
i
=
0
;
i
<
it
->
second
.
size
();
++
i
)
{
if
(
it
->
second
[
i
])
{
vec_res
.
emplace_back
(
GetDim
(
it
->
second
[
i
]
->
MutableVar
()));
}
else
{
vec_res
.
emplace_back
();
}
}
return
vec_res
;
}
std
::
vector
<
paddle
::
framework
::
proto
::
VarType
::
Type
>
GetInputsVarType
(
const
std
::
string
&
name
)
const
override
{
std
::
vector
<
paddle
::
framework
::
proto
::
VarType
::
Type
>
vec_res
;
auto
it
=
tensor_in_
->
find
(
name
);
PADDLE_ENFORCE_NE
(
it
,
tensor_in_
->
end
(),
paddle
::
platform
::
errors
::
NotFound
(
"can not find [%s] in input"
,
name
));
vec_res
.
reserve
(
it
->
second
.
size
());
for
(
size_t
i
=
0
;
i
<
it
->
second
.
size
();
++
i
)
{
if
(
it
->
second
[
i
])
{
vec_res
.
emplace_back
(
paddle
::
framework
::
ToVarType
(
it
->
second
[
i
]
->
MutableVar
()
->
Type
()));
}
else
{
vec_res
.
emplace_back
();
}
}
return
vec_res
;
}
std
::
vector
<
paddle
::
framework
::
proto
::
VarType
::
Type
>
GetOutputsVarType
(
const
std
::
string
&
name
)
const
override
{
std
::
vector
<
paddle
::
framework
::
proto
::
VarType
::
Type
>
vec_res
;
auto
it
=
tensor_out_
->
find
(
name
);
PADDLE_ENFORCE_NE
(
it
,
tensor_out_
->
end
(),
paddle
::
platform
::
errors
::
NotFound
(
"can not find [%s] in output"
,
name
));
vec_res
.
reserve
(
it
->
second
.
size
());
for
(
size_t
i
=
0
;
i
<
it
->
second
.
size
();
++
i
)
{
if
(
it
->
second
[
i
])
{
vec_res
.
emplace_back
(
paddle
::
framework
::
ToVarType
(
it
->
second
[
i
]
->
MutableVar
()
->
Type
()));
}
else
{
vec_res
.
emplace_back
(
static_cast
<
paddle
::
framework
::
proto
::
VarType
::
Type
>
(
-
1
));
}
}
return
vec_res
;
}
void
SetOutputDim
(
const
std
::
string
&
name
,
const
DDim
&
dim
)
override
{
auto
it
=
tensor_out_
->
find
(
name
);
PADDLE_ENFORCE_NE
(
it
,
tensor_out_
->
end
(),
paddle
::
platform
::
errors
::
NotFound
(
"can not find [%s] in output"
,
name
));
if
(
it
->
second
[
0
])
{
SetDim
(
it
->
second
[
0
]
->
MutableVar
(),
dim
);
}
}
void
SetOutputsDim
(
const
std
::
string
&
name
,
const
std
::
vector
<
DDim
>&
dims
)
override
{
auto
it
=
tensor_out_
->
find
(
name
);
PADDLE_ENFORCE_NE
(
it
,
tensor_out_
->
end
(),
paddle
::
platform
::
errors
::
NotFound
(
"can not find [%s] in output"
,
name
));
PADDLE_ENFORCE_EQ
(
dims
.
size
(),
it
->
second
.
size
(),
paddle
::
platform
::
errors
::
InvalidArgument
(
"The number of dims is expected to be equal to the "
"number of Outputs(%s). But receieved: the number of "
"dims = %d, the number of Outputs(%s) = %d."
,
name
,
dims
.
size
(),
name
,
it
->
second
.
size
()));
for
(
size_t
i
=
0
;
i
<
dims
.
size
();
++
i
)
{
if
(
it
->
second
[
i
])
{
SetDim
(
it
->
second
[
i
]
->
MutableVar
(),
dims
[
i
]);
}
}
}
int32_t
GetLoDLevel
(
const
std
::
string
&
in
,
size_t
i
=
0
)
const
override
{
PADDLE_THROW
(
paddle
::
platform
::
errors
::
PermissionDenied
(
"GetLoDLevel function not support in dygraph mode"
));
}
void
SetLoDLevel
(
const
std
::
string
&
out
,
int32_t
lod_level
,
size_t
j
=
0
)
const
override
{
PADDLE_THROW
(
paddle
::
platform
::
errors
::
PermissionDenied
(
"SetLoDLevel function not support in dygraph mode"
));
}
protected:
DDim
GetDim
(
paddle
::
framework
::
Variable
*
var
)
const
{
PADDLE_ENFORCE_NOT_NULL
(
var
,
paddle
::
platform
::
errors
::
PreconditionNotMet
(
"Input variable should not be null"
));
if
(
var
->
IsType
<
paddle
::
framework
::
LoDTensor
>
())
{
return
var
->
Get
<
paddle
::
framework
::
LoDTensor
>
().
dims
();
}
else
if
(
var
->
IsType
<
paddle
::
framework
::
SelectedRows
>
())
{
return
var
->
Get
<
paddle
::
framework
::
SelectedRows
>
().
GetCompleteDims
();
}
else
{
PADDLE_THROW
(
paddle
::
platform
::
errors
::
PermissionDenied
(
"Only LoDTensor/SelectedRows support 'GetDim', but Variables "
"type_id is xx."
));
}
}
std
::
vector
<
DDim
>
GetRepeatedDims
(
const
std
::
string
&
name
)
const
override
{
PADDLE_THROW
(
paddle
::
platform
::
errors
::
PermissionDenied
(
"GetRepeatedDims not support in dygraph runtime"
));
}
void
SetDim
(
paddle
::
framework
::
Variable
*
var
,
const
DDim
&
dim
)
{
if
(
var
->
IsType
<
paddle
::
framework
::
LoDTensor
>
())
{
var
->
GetMutable
<
paddle
::
framework
::
LoDTensor
>
()
->
Resize
(
dim
);
}
else
if
(
var
->
IsType
<
paddle
::
framework
::
SelectedRows
>
())
{
var
->
GetMutable
<
paddle
::
framework
::
SelectedRows
>
()
->
set_height
(
dim
[
0
]);
}
else
{
PADDLE_THROW
(
paddle
::
platform
::
errors
::
PermissionDenied
(
"Variable type_id %s, expect LoDTensor/SelectedRows."
));
}
}
void
SetDims
(
const
std
::
vector
<
paddle
::
framework
::
Variable
*>&
vars
,
const
std
::
vector
<
DDim
>&
dims
)
{
size_t
length
=
vars
.
size
();
PADDLE_ENFORCE_EQ
(
length
,
dims
.
size
(),
paddle
::
platform
::
errors
::
PreconditionNotMet
(
"Vars number [%d] should be equal with dims number [%d]"
,
length
,
dims
.
size
()));
for
(
size_t
i
=
0
;
i
<
length
;
++
i
)
{
if
(
vars
[
i
]
==
nullptr
)
{
continue
;
}
SetDim
(
vars
[
i
],
dims
[
i
]);
}
}
void
SetRepeatedDims
(
const
std
::
string
&
name
,
const
std
::
vector
<
DDim
>&
dims
)
override
{
PADDLE_THROW
(
paddle
::
platform
::
errors
::
PermissionDenied
(
"SetRepeatedDims not support in dygraph runtime"
));
}
private:
const
NameTensorMap
*
tensor_in_
;
const
NameTensorMap
*
tensor_out_
;
const
paddle
::
framework
::
AttributeMap
*
attrs_
;
const
paddle
::
framework
::
AttributeMap
*
default_attrs_
;
const
std
::
string
op_type_
;
};
}
// namespace egr
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录