Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
Paddle
提交
e9589e35
P
Paddle
项目概览
PaddlePaddle
/
Paddle
大约 1 年 前同步成功
通知
2298
Star
20931
Fork
5422
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1423
列表
看板
标记
里程碑
合并请求
543
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1,423
Issue
1,423
列表
看板
标记
里程碑
合并请求
543
合并请求
543
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
e9589e35
编写于
5月 31, 2022
作者:
C
Chen Weihang
提交者:
GitHub
5月 31, 2022
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
[Eager] Polish append op using for model perf (#43102)
* polish append op using * fix var error * fix group norm impl
上级
f9e55dee
变更
7
隐藏空白更改
内联
并排
Showing
7 changed file
with
96 addition
and
29 deletion
+96
-29
paddle/fluid/pybind/op_function_generator.h
paddle/fluid/pybind/op_function_generator.h
+6
-0
python/paddle/fluid/dygraph/nn.py
python/paddle/fluid/dygraph/nn.py
+8
-6
python/paddle/fluid/framework.py
python/paddle/fluid/framework.py
+4
-0
python/paddle/fluid/layers/nn.py
python/paddle/fluid/layers/nn.py
+32
-2
python/paddle/fluid/layers/tensor.py
python/paddle/fluid/layers/tensor.py
+13
-8
python/paddle/nn/layer/norm.py
python/paddle/nn/layer/norm.py
+20
-5
python/paddle/tensor/creation.py
python/paddle/tensor/creation.py
+13
-8
未找到文件。
paddle/fluid/pybind/op_function_generator.h
浏览文件 @
e9589e35
...
@@ -125,6 +125,11 @@ std::map<std::string, std::set<std::string>> op_ins_map = {
...
@@ -125,6 +125,11 @@ std::map<std::string, std::set<std::string>> op_ins_map = {
{
"X"
,
"Scale"
,
"Bias"
,
"Mean"
,
"Variance"
,
"MomentumTensor"
}},
{
"X"
,
"Scale"
,
"Bias"
,
"Mean"
,
"Variance"
,
"MomentumTensor"
}},
{
"inplace_abn"
,
{
"inplace_abn"
,
{
"X"
,
"Scale"
,
"Bias"
,
"Mean"
,
"Variance"
,
"MomentumTensor"
}},
{
"X"
,
"Scale"
,
"Bias"
,
"Mean"
,
"Variance"
,
"MomentumTensor"
}},
{
"linear_interp"
,
{
"X"
,
"OutSize"
}},
{
"bilinear_interp"
,
{
"X"
,
"OutSize"
}},
{
"trilinear_interp"
,
{
"X"
,
"OutSize"
}},
{
"nearest_interp"
,
{
"X"
,
"OutSize"
}},
{
"bicubic_interp"
,
{
"X"
,
"OutSize"
}},
};
};
// NOTE(zhiqiu): Like op_ins_map.
// NOTE(zhiqiu): Like op_ins_map.
...
@@ -270,6 +275,7 @@ std::map<std::string, std::set<std::string>> op_passing_outs_map = {
...
@@ -270,6 +275,7 @@ std::map<std::string, std::set<std::string>> op_passing_outs_map = {
{
"split"
,
{
"Out"
}},
{
"split"
,
{
"Out"
}},
{
"concat"
,
{
"Out"
}},
{
"concat"
,
{
"Out"
}},
{
"fused_multi_transformer"
,
{
"CacheKVOut"
}},
{
"fused_multi_transformer"
,
{
"CacheKVOut"
}},
{
"group_norm"
,
{
"Mean"
,
"Variance"
}},
};
};
// NOTE(pangyoki): Tensor View Strategy.
// NOTE(pangyoki): Tensor View Strategy.
...
...
python/paddle/fluid/dygraph/nn.py
浏览文件 @
e9589e35
...
@@ -3016,9 +3016,15 @@ class GroupNorm(layers.Layer):
...
@@ -3016,9 +3016,15 @@ class GroupNorm(layers.Layer):
is_bias
=
True
)
is_bias
=
True
)
def
forward
(
self
,
input
):
def
forward
(
self
,
input
):
if
in_dygraph_mode
():
mean_out
=
self
.
_helper
.
create_variable_for_type_inference
(
dtype
=
self
.
_dtype
,
stop_gradient
=
True
)
variance_out
=
self
.
_helper
.
create_variable_for_type_inference
(
dtype
=
self
.
_dtype
,
stop_gradient
=
True
)
if
_non_static_mode
():
attrs
=
(
'epsilon'
,
self
.
_epsilon
,
'groups'
,
self
.
_groups
)
attrs
=
(
'epsilon'
,
self
.
_epsilon
,
'groups'
,
self
.
_groups
)
out
,
_
,
_
=
_C_ops
.
group_norm
(
input
,
self
.
weight
,
self
.
bias
,
*
attrs
)
out
,
_
,
_
=
_C_ops
.
group_norm
(
input
,
self
.
weight
,
self
.
bias
,
mean_out
,
variance_out
,
*
attrs
)
return
dygraph_utils
.
_append_activation_in_dygraph
(
out
,
self
.
_act
)
return
dygraph_utils
.
_append_activation_in_dygraph
(
out
,
self
.
_act
)
else
:
else
:
...
@@ -3029,10 +3035,6 @@ class GroupNorm(layers.Layer):
...
@@ -3029,10 +3035,6 @@ class GroupNorm(layers.Layer):
inputs
[
'Scale'
]
=
self
.
weight
inputs
[
'Scale'
]
=
self
.
weight
# create output
# create output
mean_out
=
self
.
_helper
.
create_variable_for_type_inference
(
dtype
=
self
.
_dtype
,
stop_gradient
=
True
)
variance_out
=
self
.
_helper
.
create_variable_for_type_inference
(
dtype
=
self
.
_dtype
,
stop_gradient
=
True
)
group_norm_out
=
self
.
_helper
.
create_variable_for_type_inference
(
group_norm_out
=
self
.
_helper
.
create_variable_for_type_inference
(
dtype
=
self
.
_dtype
)
dtype
=
self
.
_dtype
)
...
...
python/paddle/fluid/framework.py
浏览文件 @
e9589e35
...
@@ -3600,6 +3600,10 @@ class Block(object):
...
@@ -3600,6 +3600,10 @@ class Block(object):
attrs
=
kwargs
.
get
(
"attrs"
,
{})
attrs
=
kwargs
.
get
(
"attrs"
,
{})
inplace_map
=
kwargs
.
get
(
"inplace_map"
,
None
)
inplace_map
=
kwargs
.
get
(
"inplace_map"
,
None
)
type
=
kwargs
.
get
(
"type"
,
None
)
type
=
kwargs
.
get
(
"type"
,
None
)
warnings
.
warn
(
"Op `%s` is executed through `append_op` under the dynamic mode, "
"the corresponding API implementation needs to be upgraded to "
"using `_C_ops` method."
%
type
,
DeprecationWarning
)
op
=
Operator
(
op
=
Operator
(
block
=
self
,
block
=
self
,
desc
=
None
,
desc
=
None
,
...
...
python/paddle/fluid/layers/nn.py
浏览文件 @
e9589e35
...
@@ -7793,10 +7793,18 @@ def image_resize(input,
...
@@ -7793,10 +7793,18 @@ def image_resize(input,
}
}
if out_shape is not None:
if out_shape is not None:
if isinstance(out_shape, Variable):
if isinstance(out_shape, Variable)
and not _non_static_mode()
:
out_shape.stop_gradient = True
out_shape.stop_gradient = True
inputs['OutSize'] = out_shape
inputs['OutSize'] = out_shape
else:
else:
if _non_static_mode():
if isinstance(out_shape, Variable):
out_shape = list(out_shape.numpy())
else:
out_shape = list(out_shape)
for i, dim in enumerate(out_shape):
if isinstance(dim, Variable):
out_shape[i] = dim.numpy()[0]
if not (_is_list_or_turple_(out_shape)):
if not (_is_list_or_turple_(out_shape)):
raise TypeError(
raise TypeError(
"out_shape should be a list or tuple or Variable.")
"out_shape should be a list or tuple or Variable.")
...
@@ -7863,7 +7871,9 @@ def image_resize(input,
...
@@ -7863,7 +7871,9 @@ def image_resize(input,
attrs['out_w'] = out_shape[2]
attrs['out_w'] = out_shape[2]
else:
else:
if isinstance(scale, Variable):
if _non_static_mode() and isinstance(scale, Variable):
scale = scale.numpy()
elif isinstance(scale, Variable):
scale.stop_gradient = True
scale.stop_gradient = True
inputs["Scale"] = scale
inputs["Scale"] = scale
elif isinstance(scale, float) or isinstance(scale, int):
elif isinstance(scale, float) or isinstance(scale, int):
...
@@ -7883,6 +7893,26 @@ def image_resize(input,
...
@@ -7883,6 +7893,26 @@ def image_resize(input,
inputs["OutSize"] = actual_shape
inputs["OutSize"] = actual_shape
elif actual_shape is not None:
elif actual_shape is not None:
raise TypeError("actual_shape should either be Variable or None.")
raise TypeError("actual_shape should either be Variable or None.")
if _non_static_mode():
attr_list = []
for k, v in attrs.items():
attr_list.append(k)
attr_list.append(v)
dy_attr = tuple(attr_list)
if resample_type == "linear":
out = _C_ops.linear_interp(input, actual_shape, *dy_attr)
elif resample_type == "bilinear":
out = _C_ops.bilinear_interp(input, actual_shape, *dy_attr)
elif resample_type == "trilinear":
out = _C_ops.trilinear_interp(input, actual_shape, *dy_attr)
elif resample_type == "nearest":
out = _C_ops.nearest_interp(input, actual_shape, *dy_attr)
elif resample_type == "bicubic":
out = _C_ops.bicubic_interp(input, actual_shape, *dy_attr)
return out
out = helper.create_variable_for_type_inference(dtype)
out = helper.create_variable_for_type_inference(dtype)
helper.append_op(
helper.append_op(
type='{}_interp'.format(resample_type),
type='{}_interp'.format(resample_type),
...
...
python/paddle/fluid/layers/tensor.py
浏览文件 @
e9589e35
...
@@ -681,14 +681,19 @@ def assign(input, output=None):
...
@@ -681,14 +681,19 @@ def assign(input, output=None):
"saving it to file and 'load_op' to load it"
)
"saving it to file and 'load_op' to load it"
)
if
output
is
None
:
if
output
is
None
:
output
=
helper
.
create_variable_for_type_inference
(
dtype
=
dtype
)
output
=
helper
.
create_variable_for_type_inference
(
dtype
=
dtype
)
helper
.
append_op
(
if
_non_static_mode
():
type
=
'assign_value'
,
_C_ops
.
assign_value
(
output
,
'shape'
,
outputs
=
{
'Out'
:
[
output
]},
list
(
input
.
shape
),
'dtype'
,
dtype
,
value_name
,
attrs
=
{
values
)
'dtype'
:
dtype
,
else
:
'shape'
:
list
(
input
.
shape
),
helper
.
append_op
(
value_name
:
values
type
=
'assign_value'
,
})
outputs
=
{
'Out'
:
[
output
]},
attrs
=
{
'dtype'
:
dtype
,
'shape'
:
list
(
input
.
shape
),
value_name
:
values
})
if
is_inplace
and
_non_static_mode
():
if
is_inplace
and
_non_static_mode
():
output
.
_bump_inplace_version
()
output
.
_bump_inplace_version
()
...
...
python/paddle/nn/layer/norm.py
浏览文件 @
e9589e35
...
@@ -32,7 +32,7 @@ import six
...
@@ -32,7 +32,7 @@ import six
from
...fluid.dygraph
import
BatchNorm
# noqa: F401
from
...fluid.dygraph
import
BatchNorm
# noqa: F401
from
...fluid.dygraph
import
SpectralNorm
# noqa: F401
from
...fluid.dygraph
import
SpectralNorm
# noqa: F401
from
...framework
import
get_default_dtype
,
set_default_dtype
from
...framework
import
get_default_dtype
,
set_default_dtype
,
_non_static_mode
from
..initializer
import
Constant
from
..initializer
import
Constant
from
...framework
import
ParamAttr
from
...framework
import
ParamAttr
...
@@ -404,6 +404,25 @@ class GroupNorm(Layer):
...
@@ -404,6 +404,25 @@ class GroupNorm(Layer):
self
.
bias
.
stop_gradient
=
self
.
_bias_attr
!=
None
and
self
.
_bias_attr
.
learning_rate
==
0.
self
.
bias
.
stop_gradient
=
self
.
_bias_attr
!=
None
and
self
.
_bias_attr
.
learning_rate
==
0.
def
forward
(
self
,
input
):
def
forward
(
self
,
input
):
mean_out
=
self
.
_helper
.
create_variable_for_type_inference
(
dtype
=
input
.
dtype
,
stop_gradient
=
True
)
variance_out
=
self
.
_helper
.
create_variable_for_type_inference
(
dtype
=
input
.
dtype
,
stop_gradient
=
True
)
if
_non_static_mode
():
pre_act
,
_
,
_
=
_C_ops
.
group_norm
(
input
,
self
.
weight
,
self
.
bias
,
mean_out
,
variance_out
,
'epsilon'
,
self
.
_epsilon
,
'groups'
,
self
.
_num_groups
,
)
return
dygraph_utils
.
_append_activation_in_dygraph
(
pre_act
,
act
=
None
)
inputs
=
{
'X'
:
input
}
inputs
=
{
'X'
:
input
}
if
self
.
bias
is
not
None
:
if
self
.
bias
is
not
None
:
inputs
[
'Bias'
]
=
self
.
bias
inputs
[
'Bias'
]
=
self
.
bias
...
@@ -411,10 +430,6 @@ class GroupNorm(Layer):
...
@@ -411,10 +430,6 @@ class GroupNorm(Layer):
inputs
[
'Scale'
]
=
self
.
weight
inputs
[
'Scale'
]
=
self
.
weight
# create output
# create output
mean_out
=
self
.
_helper
.
create_variable_for_type_inference
(
dtype
=
input
.
dtype
,
stop_gradient
=
True
)
variance_out
=
self
.
_helper
.
create_variable_for_type_inference
(
dtype
=
input
.
dtype
,
stop_gradient
=
True
)
group_norm_out
=
self
.
_helper
.
create_variable_for_type_inference
(
group_norm_out
=
self
.
_helper
.
create_variable_for_type_inference
(
dtype
=
input
.
dtype
)
dtype
=
input
.
dtype
)
...
...
python/paddle/tensor/creation.py
浏览文件 @
e9589e35
...
@@ -1568,14 +1568,19 @@ def assign(x, output=None):
...
@@ -1568,14 +1568,19 @@ def assign(x, output=None):
if
output
is
None
:
if
output
is
None
:
output
=
helper
.
create_variable_for_type_inference
(
output
=
helper
.
create_variable_for_type_inference
(
dtype
=
input
.
dtype
)
dtype
=
input
.
dtype
)
helper
.
append_op
(
if
_non_static_mode
():
type
=
'assign_value'
,
_C_ops
.
assign_value
(
output
,
'shape'
,
outputs
=
{
'Out'
:
[
output
]},
list
(
input
.
shape
),
'dtype'
,
dtype
,
value_name
,
attrs
=
{
values
)
'dtype'
:
dtype
,
else
:
'shape'
:
list
(
input
.
shape
),
helper
.
append_op
(
value_name
:
values
type
=
'assign_value'
,
})
outputs
=
{
'Out'
:
[
output
]},
attrs
=
{
'dtype'
:
dtype
,
'shape'
:
list
(
input
.
shape
),
value_name
:
values
})
if
is_inplace
and
_in_legacy_dygraph
():
if
is_inplace
and
_in_legacy_dygraph
():
output
.
_bump_inplace_version
()
output
.
_bump_inplace_version
()
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录