Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
b4954ce4
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
b4954ce4
编写于
6月 09, 2021
作者:
W
wanghuancoder
提交者:
GitHub
6月 09, 2021
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
cache core.globals() to speed up dynamic graph (#32098)
* modify API nn.Bilinear's doc, test=develop
上级
626c1edc
变更
10
显示空白变更内容
内联
并排
Showing
10 changed file
with
45 addition
and
37 deletion
+45
-37
python/paddle/distributed/fleet/base/distributed_strategy.py
python/paddle/distributed/fleet/base/distributed_strategy.py
+11
-11
python/paddle/fluid/backward.py
python/paddle/fluid/backward.py
+1
-1
python/paddle/fluid/dygraph/layer_object_helper.py
python/paddle/fluid/dygraph/layer_object_helper.py
+2
-2
python/paddle/fluid/dygraph/nn.py
python/paddle/fluid/dygraph/nn.py
+5
-5
python/paddle/fluid/framework.py
python/paddle/fluid/framework.py
+11
-6
python/paddle/fluid/layer_helper.py
python/paddle/fluid/layer_helper.py
+2
-2
python/paddle/fluid/layers/nn.py
python/paddle/fluid/layers/nn.py
+3
-3
python/paddle/fluid/tests/unittests/mkldnn/check_flags_mkldnn_ops_on_off.py
...d/tests/unittests/mkldnn/check_flags_mkldnn_ops_on_off.py
+5
-4
python/paddle/fluid/tests/unittests/mkldnn/check_flags_use_mkldnn.py
...le/fluid/tests/unittests/mkldnn/check_flags_use_mkldnn.py
+3
-2
python/paddle/nn/functional/conv.py
python/paddle/nn/functional/conv.py
+2
-1
未找到文件。
python/paddle/distributed/fleet/base/distributed_strategy.py
浏览文件 @
b4954ce4
...
...
@@ -14,7 +14,7 @@
import
paddle
from
paddle.distributed.fleet.proto
import
distributed_strategy_pb2
from
paddle.fluid.framework
import
Variable
,
set_flags
,
core
from
paddle.fluid.framework
import
Variable
,
set_flags
,
core
,
_global_flags
from
paddle.fluid.wrapped_decorator
import
wrap_decorator
import
google.protobuf.text_format
import
google.protobuf
...
...
@@ -121,18 +121,18 @@ class DistributedStrategy(object):
# Set the default values of the following flags to the ones set by users
key
=
'FLAGS_cudnn_batchnorm_spatial_persistent'
if
core
.
global
s
().
is_public
(
key
):
if
_global_flag
s
().
is_public
(
key
):
self
.
strategy
.
cudnn_batchnorm_spatial_persistent
=
bool
(
core
.
global
s
()[
key
])
_global_flag
s
()[
key
])
key
=
'FLAGS_conv_workspace_size_limit'
if
core
.
global
s
().
is_public
(
key
):
self
.
strategy
.
conv_workspace_size_limit
=
int
(
core
.
global
s
()[
key
])
if
_global_flag
s
().
is_public
(
key
):
self
.
strategy
.
conv_workspace_size_limit
=
int
(
_global_flag
s
()[
key
])
key
=
'FLAGS_cudnn_exhaustive_search'
if
core
.
global
s
().
is_public
(
key
):
self
.
strategy
.
cudnn_exhaustive_search
=
bool
(
core
.
global
s
()[
key
])
if
_global_flag
s
().
is_public
(
key
):
self
.
strategy
.
cudnn_exhaustive_search
=
bool
(
_global_flag
s
()[
key
])
key
=
'FLAGS_sync_nccl_allreduce'
if
core
.
global
s
().
is_public
(
key
):
self
.
strategy
.
sync_nccl_allreduce
=
bool
(
core
.
global
s
()[
key
])
if
_global_flag
s
().
is_public
(
key
):
self
.
strategy
.
sync_nccl_allreduce
=
bool
(
_global_flag
s
()[
key
])
self
.
__lock_attr
=
True
...
...
@@ -1561,8 +1561,8 @@ class DistributedStrategy(object):
]
for
i
,
key
in
enumerate
(
keys
):
if
core
.
global
s
().
is_public
(
key
):
core
.
global
s
()[
key
]
=
values
[
i
]
if
_global_flag
s
().
is_public
(
key
):
_global_flag
s
()[
key
]
=
values
[
i
]
def
_is_strict_auto
(
self
):
global
non_auto_func_called
...
...
python/paddle/fluid/backward.py
浏览文件 @
b4954ce4
...
...
@@ -456,7 +456,7 @@ def _addup_repetitive_outputs_(op_descs, block_idx):
In these cases, the variable should be the accumulation of all the outputs.
`sum_op`s are added to implement the accumulate.
"""
_MAX_ADD_NUM_
=
core
.
global
s
()[
'FLAGS_max_inplace_grad_add'
]
_MAX_ADD_NUM_
=
framework
.
_global_flag
s
()[
'FLAGS_max_inplace_grad_add'
]
#pending_sum_ops = []
pending_sum_ops
=
collections
.
OrderedDict
()
var_rename_count
=
collections
.
defaultdict
(
int
)
...
...
python/paddle/fluid/dygraph/layer_object_helper.py
浏览文件 @
b4954ce4
...
...
@@ -16,7 +16,7 @@ from __future__ import print_function
import
copy
import
six
from
..framework
import
Parameter
,
in_dygraph_mode
from
..framework
import
Parameter
,
in_dygraph_mode
,
_global_flags
from
..param_attr
import
ParamAttr
from
..
import
core
from
six.moves
import
zip
...
...
@@ -158,7 +158,7 @@ class LayerObjectHelper(LayerHelperBase):
if
(
use_cudnn
is
not
None
)
and
use_cudnn
:
act
[
'use_cudnn'
]
=
use_cudnn
use_mkldnn
=
core
.
global
s
()[
"FLAGS_use_mkldnn"
]
use_mkldnn
=
_global_flag
s
()[
"FLAGS_use_mkldnn"
]
if
(
use_mkldnn
is
not
None
)
and
use_mkldnn
:
act
[
'use_mkldnn'
]
=
use_mkldnn
act_type
=
act
.
pop
(
'type'
)
...
...
python/paddle/fluid/dygraph/nn.py
浏览文件 @
b4954ce4
...
...
@@ -21,7 +21,7 @@ from ..layers import utils
from
..layers
import
nn
as
F
from
..
import
dygraph_utils
from
.
import
layers
from
..framework
import
Variable
,
in_dygraph_mode
,
OpProtoHolder
,
Parameter
,
_dygraph_tracer
,
_varbase_creator
,
default_main_program
from
..framework
import
Variable
,
in_dygraph_mode
,
OpProtoHolder
,
Parameter
,
_dygraph_tracer
,
_varbase_creator
,
default_main_program
,
_global_flags
from
..data_feeder
import
convert_dtype
,
check_variable_and_dtype
,
check_type
,
check_dtype
from
..param_attr
import
ParamAttr
from
..initializer
import
Normal
,
Constant
,
NumpyArrayInitializer
...
...
@@ -188,7 +188,7 @@ class Conv2D(layers.Layer):
if
not
isinstance
(
use_cudnn
,
bool
):
raise
ValueError
(
"use_cudnn should be True or False"
)
self
.
_use_cudnn
=
use_cudnn
self
.
_use_mkldnn
=
core
.
global
s
()[
"FLAGS_use_mkldnn"
]
self
.
_use_mkldnn
=
_global_flag
s
()[
"FLAGS_use_mkldnn"
]
self
.
_filter_size
=
filter_size
self
.
_num_filters
=
num_filters
self
.
_param_attr
=
param_attr
...
...
@@ -837,7 +837,7 @@ class Pool2D(layers.Layer):
if
not
isinstance
(
use_cudnn
,
bool
):
raise
ValueError
(
"use_cudnn should be True or False"
)
self
.
_use_mkldnn
=
core
.
global
s
()[
"FLAGS_use_mkldnn"
]
self
.
_use_mkldnn
=
_global_flag
s
()[
"FLAGS_use_mkldnn"
]
if
data_format
not
in
[
"NCHW"
,
"NHWC"
]:
raise
ValueError
(
...
...
@@ -966,7 +966,7 @@ class Linear(layers.Layer):
self
.
bias
=
self
.
create_parameter
(
shape
=
[
output_dim
],
attr
=
bias_attr
,
dtype
=
dtype
,
is_bias
=
True
)
self
.
_use_mkldnn
=
core
.
global
s
()[
"FLAGS_use_mkldnn"
]
self
.
_use_mkldnn
=
_global_flag
s
()[
"FLAGS_use_mkldnn"
]
def
forward
(
self
,
input
):
if
in_dygraph_mode
():
...
...
@@ -1268,7 +1268,7 @@ class BatchNorm(layers.Layer):
self
.
_param_attr
=
param_attr
self
.
_bias_attr
=
bias_attr
self
.
_act
=
act
self
.
_use_mkldnn
=
core
.
global
s
()[
"FLAGS_use_mkldnn"
]
self
.
_use_mkldnn
=
_global_flag
s
()[
"FLAGS_use_mkldnn"
]
assert
bias_attr
is
not
False
,
"bias_attr should not be False in batch_norm."
...
...
python/paddle/fluid/framework.py
浏览文件 @
b4954ce4
...
...
@@ -72,6 +72,7 @@ _dygraph_tracer_ = None
_global_expected_place_
=
None
_current_device
=
None
global_prog_seed
=
0
_global_flags_
=
core
.
globals
()
def
require_version
(
min_version
,
max_version
=
None
):
...
...
@@ -286,6 +287,10 @@ def _dygraph_tracer():
return
_dygraph_tracer_
def
_global_flags
():
return
_global_flags_
def
_current_expected_place
():
global
_global_expected_place_
if
_global_expected_place_
is
None
:
...
...
@@ -5833,8 +5838,8 @@ def set_flags(flags):
if
not
isinstance
(
flags
,
dict
):
raise
TypeError
(
'flags in set_flags should be a dict'
)
for
key
,
value
in
flags
.
items
():
if
core
.
global
s
().
is_public
(
key
):
core
.
global
s
()[
key
]
=
value
if
_global_flag
s
().
is_public
(
key
):
_global_flag
s
()[
key
]
=
value
else
:
raise
ValueError
(
"Flag %s cannot set its value through this function."
%
(
key
))
...
...
@@ -5863,8 +5868,8 @@ def get_flags(flags):
flags_value
=
{}
if
isinstance
(
flags
,
(
list
,
tuple
)):
for
key
in
flags
:
if
(
core
.
global
s
().
is_public
(
key
)):
value
=
core
.
global
s
()[
key
]
if
(
_global_flag
s
().
is_public
(
key
)):
value
=
_global_flag
s
()[
key
]
temp
=
{
key
:
value
}
flags_value
.
update
(
temp
)
else
:
...
...
@@ -5872,8 +5877,8 @@ def get_flags(flags):
'Flag %s cannot get its value through this function.'
%
(
key
))
elif
isinstance
(
flags
,
str
):
if
(
core
.
global
s
().
is_public
(
flags
)):
value
=
core
.
global
s
()[
flags
]
if
(
_global_flag
s
().
is_public
(
flags
)):
value
=
_global_flag
s
()[
flags
]
temp
=
{
flags
:
value
}
flags_value
.
update
(
temp
)
else
:
...
...
python/paddle/fluid/layer_helper.py
浏览文件 @
b4954ce4
...
...
@@ -17,7 +17,7 @@ from __future__ import print_function
import
copy
import
six
from
.framework
import
Parameter
,
dtype_is_floating
,
in_dygraph_mode
,
OpProtoHolder
from
.framework
import
Parameter
,
dtype_is_floating
,
in_dygraph_mode
,
OpProtoHolder
,
_global_flags
from
.
import
unique_name
from
paddle.fluid.initializer
import
Constant
,
Xavier
from
.param_attr
import
ParamAttr
...
...
@@ -148,7 +148,7 @@ class LayerHelper(LayerHelperBase):
if
'use_cudnn'
in
self
.
kwargs
and
self
.
kwargs
.
get
(
'use_cudnn'
):
act
[
'use_cudnn'
]
=
self
.
kwargs
.
get
(
'use_cudnn'
)
use_mkldnn
=
self
.
kwargs
.
get
(
'use_mkldnn'
,
core
.
global
s
().
get
(
"FLAGS_use_mkldnn"
,
False
))
'use_mkldnn'
,
_global_flag
s
().
get
(
"FLAGS_use_mkldnn"
,
False
))
if
use_mkldnn
:
act
[
'use_mkldnn'
]
=
use_mkldnn
act_type
=
act
.
pop
(
'type'
)
...
...
python/paddle/fluid/layers/nn.py
浏览文件 @
b4954ce4
...
...
@@ -26,7 +26,7 @@ import six
import paddle
from ..layer_helper import LayerHelper
from ..initializer import Normal, Constant, NumpyArrayInitializer
from ..framework import Variable, OpProtoHolder, in_dygraph_mode, dygraph_only, _dygraph_tracer, default_main_program, _varbase_creator, static_only
from ..framework import Variable, OpProtoHolder, in_dygraph_mode, dygraph_only, _dygraph_tracer, default_main_program, _varbase_creator, static_only
, _global_flags
from .. import dygraph_utils
from ..param_attr import ParamAttr
from .layer_function_generator import autodoc, templatedoc, _generate_doc_string_
...
...
@@ -9500,7 +9500,7 @@ def relu6(x, threshold=6.0, name=None):
outputs={'Out': out},
attrs={
'threshold': threshold,
'use_mkldnn':
core.global
s()["FLAGS_use_mkldnn"]
'use_mkldnn':
_global_flag
s()["FLAGS_use_mkldnn"]
})
return out
...
...
@@ -11569,7 +11569,7 @@ Examples:
axis=axis,
act=act,
op_name='elementwise_add',
use_mkldnn=
core.global
s()["FLAGS_use_mkldnn"])
use_mkldnn=
_global_flag
s()["FLAGS_use_mkldnn"])
return _elementwise_op(LayerHelper('elementwise_add', **locals()))
...
...
python/paddle/fluid/tests/unittests/mkldnn/check_flags_mkldnn_ops_on_off.py
浏览文件 @
b4954ce4
...
...
@@ -19,18 +19,19 @@ import numpy as np
import
paddle.fluid
as
fluid
import
os
from
paddle.fluid.layer_helper
import
LayerHelper
from
paddle.fluid.framework
import
_global_flags
def
check
():
print
(
"check:
fluid.core.global
s()['FLAGS_use_mkldnn']="
,
fluid
.
core
.
global
s
()[
"FLAGS_use_mkldnn"
])
print
(
"check:
_global_flag
s()['FLAGS_use_mkldnn']="
,
_global_flag
s
()[
"FLAGS_use_mkldnn"
])
print
(
"check: fluid.get_flags('FLAGS_use_mkldnn')="
,
fluid
.
get_flags
([
'FLAGS_use_mkldnn'
]))
print
(
"check: DNNL_VERBOSE="
,
os
.
environ
[
'DNNL_VERBOSE'
])
print
(
"check: FLAGS_tracer_mkldnn_ops_on="
,
fluid
.
core
.
global
s
()[
'FLAGS_tracer_mkldnn_ops_on'
])
_global_flag
s
()[
'FLAGS_tracer_mkldnn_ops_on'
])
print
(
"check: FLAGS_tracer_mkldnn_ops_off="
,
fluid
.
core
.
global
s
()[
'FLAGS_tracer_mkldnn_ops_off'
])
_global_flag
s
()[
'FLAGS_tracer_mkldnn_ops_off'
])
a_np
=
np
.
random
.
uniform
(
-
2
,
2
,
(
10
,
20
,
30
)).
astype
(
np
.
float32
)
b_np
=
np
.
random
.
uniform
(
-
5
,
5
,
(
10
,
20
,
30
)).
astype
(
np
.
float32
)
helper
=
LayerHelper
(
fluid
.
unique_name
.
generate
(
str
(
"test"
)),
act
=
"relu"
)
...
...
python/paddle/fluid/tests/unittests/mkldnn/check_flags_use_mkldnn.py
浏览文件 @
b4954ce4
...
...
@@ -19,11 +19,12 @@ import numpy as np
import
paddle.fluid
as
fluid
import
os
from
paddle.fluid.layer_helper
import
LayerHelper
from
paddle.fluid.framework
import
_global_flags
def
check
():
print
(
"check:
fluid.core.global
s()['FLAGS_use_mkldnn']="
,
fluid
.
core
.
global
s
()[
"FLAGS_use_mkldnn"
])
print
(
"check:
_global_flag
s()['FLAGS_use_mkldnn']="
,
_global_flag
s
()[
"FLAGS_use_mkldnn"
])
print
(
"check: fluid.get_flags('FLAGS_use_mkldnn')="
,
fluid
.
get_flags
([
'FLAGS_use_mkldnn'
]))
print
(
"check: DNNL_VERBOSE="
,
os
.
environ
[
'DNNL_VERBOSE'
])
...
...
python/paddle/nn/functional/conv.py
浏览文件 @
b4954ce4
...
...
@@ -12,6 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from
__future__
import
print_function
from
paddle.fluid.framework
import
_global_flags
import
numpy
as
np
from
...device
import
get_cudnn_version
...
...
@@ -537,7 +538,7 @@ def conv2d(x,
use_cudnn
=
True
if
(
core
.
is_compiled_with_cuda
()
and
cudnn_version
is
not
None
)
else
False
use_mkldnn
=
core
.
global
s
()[
"FLAGS_use_mkldnn"
]
use_mkldnn
=
_global_flag
s
()[
"FLAGS_use_mkldnn"
]
# update attrs
padding
,
padding_algorithm
=
_update_padding_nd
(
padding
,
channel_last
,
2
)
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录