Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
Crayon鑫
Paddle
提交
4c11be86
P
Paddle
项目概览
Crayon鑫
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
4c11be86
编写于
4月 13, 2022
作者:
J
Jiabin Yang
提交者:
GitHub
4月 13, 2022
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
fix_ci_problem3 (#41484) (#41705)
* fix_ci_problem3 * support windows no default error
上级
7f1e81fd
变更
5
隐藏空白更改
内联
并排
Showing
5 changed file
with
141 addition
and
58 deletion
+141
-58
paddle/fluid/eager/backward.cc
paddle/fluid/eager/backward.cc
+3
-2
python/paddle/fluid/tests/unittests/test_graph_khop_sampler.py
...n/paddle/fluid/tests/unittests/test_graph_khop_sampler.py
+23
-6
python/paddle/fluid/tests/unittests/test_switch_autotune.py
python/paddle/fluid/tests/unittests/test_switch_autotune.py
+24
-4
python/paddle/fluid/tests/unittests/test_zeropad2d.py
python/paddle/fluid/tests/unittests/test_zeropad2d.py
+37
-6
python/paddle/nn/functional/common.py
python/paddle/nn/functional/common.py
+54
-40
未找到文件。
paddle/fluid/eager/backward.cc
浏览文件 @
4c11be86
...
...
@@ -22,10 +22,10 @@
#include "paddle/fluid/platform/profiler.h"
#include "paddle/fluid/platform/profiler/event_tracing.h"
#include "glog/logging.h"
#include "paddle/fluid/platform/enforce.h"
#include "paddle/fluid/platform/errors.h"
#include "glog/logging.h"
#include "paddle/phi/kernels/autotune/switch_autotune.h"
namespace
egr
{
...
...
@@ -799,6 +799,7 @@ void Backward(
paddle
::
platform
::
RecordEvent
backward_record_event
(
"backward"
,
paddle
::
platform
::
TracerEventType
::
Operator
,
1
);
RunBackward
(
tensors
,
grad_tensors
,
retain_graph
);
phi
::
autotune
::
AutoTuneStatus
::
Instance
().
Update
();
}
std
::
vector
<
paddle
::
experimental
::
Tensor
>
Grad
(
...
...
python/paddle/fluid/tests/unittests/test_graph_khop_sampler.py
浏览文件 @
4c11be86
...
...
@@ -46,7 +46,7 @@ class TestGraphKhopSampler(unittest.TestCase):
self
.
sample_sizes
=
[
5
,
5
]
self
.
dst_src_dict
=
dst_src_dict
def
test
_sample_result
(
self
):
def
func
_sample_result
(
self
):
paddle
.
disable_static
()
row
=
paddle
.
to_tensor
(
self
.
row
)
colptr
=
paddle
.
to_tensor
(
self
.
colptr
)
...
...
@@ -79,13 +79,25 @@ class TestGraphKhopSampler(unittest.TestCase):
# Ensure the correct sample neighbors.
self
.
assertTrue
(
np
.
sum
(
in_neighbors
)
==
in_neighbors
.
shape
[
0
])
def
test_uva_sample_result
(
self
):
def
test_sample_result
(
self
):
with
fluid
.
framework
.
_test_eager_guard
():
self
.
func_sample_result
()
self
.
func_sample_result
()
def
func_uva_sample_result
(
self
):
paddle
.
disable_static
()
if
paddle
.
fluid
.
core
.
is_compiled_with_cuda
():
row
=
paddle
.
fluid
.
core
.
to_uva_tensor
(
self
.
row
.
astype
(
self
.
row
.
dtype
))
sorted_eid
=
paddle
.
fluid
.
core
.
to_uva_tensor
(
self
.
sorted_eid
.
astype
(
self
.
sorted_eid
.
dtype
))
row
=
None
if
fluid
.
framework
.
in_dygraph_mode
():
row
=
paddle
.
fluid
.
core
.
eager
.
to_uva_tensor
(
self
.
row
.
astype
(
self
.
row
.
dtype
),
0
)
sorted_eid
=
paddle
.
fluid
.
core
.
eager
.
to_uva_tensor
(
self
.
sorted_eid
.
astype
(
self
.
sorted_eid
.
dtype
),
0
)
else
:
row
=
paddle
.
fluid
.
core
.
to_uva_tensor
(
self
.
row
.
astype
(
self
.
row
.
dtype
))
sorted_eid
=
paddle
.
fluid
.
core
.
to_uva_tensor
(
self
.
sorted_eid
.
astype
(
self
.
sorted_eid
.
dtype
))
colptr
=
paddle
.
to_tensor
(
self
.
colptr
)
nodes
=
paddle
.
to_tensor
(
self
.
nodes
)
...
...
@@ -114,6 +126,11 @@ class TestGraphKhopSampler(unittest.TestCase):
in_neighbors
=
np
.
isin
(
edge_src_n
.
numpy
(),
self
.
dst_src_dict
[
n
])
self
.
assertTrue
(
np
.
sum
(
in_neighbors
)
==
in_neighbors
.
shape
[
0
])
def
test_uva_sample_result
(
self
):
with
fluid
.
framework
.
_test_eager_guard
():
self
.
func_uva_sample_result
()
self
.
func_uva_sample_result
()
def
test_sample_result_static_with_eids
(
self
):
paddle
.
enable_static
()
with
paddle
.
static
.
program_guard
(
paddle
.
static
.
Program
()):
...
...
python/paddle/fluid/tests/unittests/test_switch_autotune.py
浏览文件 @
4c11be86
...
...
@@ -87,12 +87,22 @@ class TestDygraphAutoTuneStatus(TestAutoTune):
}
self
.
check_status
(
expected_res
)
def
test
_enable_autotune
(
self
):
def
func
_enable_autotune
(
self
):
self
.
run_program
(
enable_autotune
=
True
)
def
test_disable_autotune
(
self
):
def
test_enable_autotune
(
self
):
with
paddle
.
fluid
.
framework
.
_test_eager_guard
():
self
.
func_enable_autotune
()
self
.
func_enable_autotune
()
def
func_disable_autotune
(
self
):
self
.
run_program
(
enable_autotune
=
False
)
def
test_disable_autotune
(
self
):
with
paddle
.
fluid
.
framework
.
_test_eager_guard
():
self
.
func_disable_autotune
()
self
.
func_disable_autotune
()
class
TestStaticAutoTuneStatus
(
TestAutoTune
):
def
run_program
(
self
,
enable_autotune
):
...
...
@@ -136,12 +146,22 @@ class TestStaticAutoTuneStatus(TestAutoTune):
self
.
check_status
(
expected_res
)
paddle
.
disable_static
()
def
test
_enable_autotune
(
self
):
def
func
_enable_autotune
(
self
):
self
.
run_program
(
enable_autotune
=
True
)
def
test_disable_autotune
(
self
):
def
test_enable_autotune
(
self
):
with
paddle
.
fluid
.
framework
.
_test_eager_guard
():
self
.
func_enable_autotune
()
self
.
func_enable_autotune
()
def
func_disable_autotune
(
self
):
self
.
run_program
(
enable_autotune
=
False
)
def
test_disable_autotune
(
self
):
with
paddle
.
fluid
.
framework
.
_test_eager_guard
():
self
.
func_disable_autotune
()
self
.
func_disable_autotune
()
if
__name__
==
'__main__'
:
unittest
.
main
()
python/paddle/fluid/tests/unittests/test_zeropad2d.py
浏览文件 @
4c11be86
...
...
@@ -16,6 +16,7 @@ from __future__ import print_function
import
unittest
import
numpy
as
np
import
paddle
from
paddle
import
to_tensor
from
paddle.nn.functional
import
zeropad2d
from
paddle.nn
import
ZeroPad2D
...
...
@@ -33,7 +34,7 @@ class TestZeroPad2dAPIError(unittest.TestCase):
self
.
shape
=
[
4
,
3
,
224
,
224
]
self
.
unsupport_dtypes
=
[
'bool'
,
'int8'
]
def
test
_unsupport_dtypes
(
self
):
def
func
_unsupport_dtypes
(
self
):
"""
test unsupport dtypes.
"""
...
...
@@ -43,6 +44,11 @@ class TestZeroPad2dAPIError(unittest.TestCase):
x_tensor
=
to_tensor
(
x
).
astype
(
dtype
)
self
.
assertRaises
(
TypeError
,
zeropad2d
,
x
=
x_tensor
,
padding
=
pad
)
def
test_unsupport_dtypes
(
self
):
with
paddle
.
fluid
.
framework
.
_test_eager_guard
():
self
.
func_unsupport_dtypes
()
self
.
func_unsupport_dtypes
()
class
TestZeroPad2dAPI
(
unittest
.
TestCase
):
"""
...
...
@@ -56,7 +62,7 @@ class TestZeroPad2dAPI(unittest.TestCase):
self
.
shape
=
[
4
,
3
,
224
,
224
]
self
.
support_dtypes
=
[
'float32'
,
'float64'
,
'int32'
,
'int64'
]
def
test
_support_dtypes
(
self
):
def
func
_support_dtypes
(
self
):
"""
test support types
"""
...
...
@@ -69,7 +75,12 @@ class TestZeroPad2dAPI(unittest.TestCase):
ret_res
=
zeropad2d
(
x_tensor
,
[
pad
,
pad
,
pad
,
pad
]).
numpy
()
self
.
assertTrue
(
np
.
allclose
(
expect_res
,
ret_res
))
def
test_support_pad2
(
self
):
def
test_support_dtypes
(
self
):
with
paddle
.
fluid
.
framework
.
_test_eager_guard
():
self
.
func_support_dtypes
()
self
.
func_support_dtypes
()
def
func_support_pad2
(
self
):
"""
test the type of 'pad' is list.
"""
...
...
@@ -82,7 +93,12 @@ class TestZeroPad2dAPI(unittest.TestCase):
ret_res
=
zeropad2d
(
x_tensor
,
pad
).
numpy
()
self
.
assertTrue
(
np
.
allclose
(
expect_res
,
ret_res
))
def
test_support_pad3
(
self
):
def
test_support_pad2
(
self
):
with
paddle
.
fluid
.
framework
.
_test_eager_guard
():
self
.
func_support_pad2
()
self
.
func_support_pad2
()
def
func_support_pad3
(
self
):
"""
test the type of 'pad' is tuple.
"""
...
...
@@ -95,7 +111,12 @@ class TestZeroPad2dAPI(unittest.TestCase):
ret_res
=
zeropad2d
(
x_tensor
,
pad
).
numpy
()
self
.
assertTrue
(
np
.
allclose
(
expect_res
,
ret_res
))
def
test_support_pad4
(
self
):
def
test_support_pad3
(
self
):
with
paddle
.
fluid
.
framework
.
_test_eager_guard
():
self
.
func_support_pad3
()
self
.
func_support_pad3
()
def
func_support_pad4
(
self
):
"""
test the type of 'pad' is paddle.Tensor.
"""
...
...
@@ -109,6 +130,11 @@ class TestZeroPad2dAPI(unittest.TestCase):
ret_res
=
zeropad2d
(
x_tensor
,
pad_tensor
).
numpy
()
self
.
assertTrue
(
np
.
allclose
(
expect_res
,
ret_res
))
def
test_support_pad4
(
self
):
with
paddle
.
fluid
.
framework
.
_test_eager_guard
():
self
.
func_support_pad4
()
self
.
func_support_pad4
()
class
TestZeroPad2DLayer
(
unittest
.
TestCase
):
"""
...
...
@@ -124,12 +150,17 @@ class TestZeroPad2DLayer(unittest.TestCase):
[[
0
,
0
],
[
0
,
0
],
[
self
.
pad
[
2
],
self
.
pad
[
3
]],
[
self
.
pad
[
0
],
self
.
pad
[
1
]]])
def
test
_layer
(
self
):
def
func
_layer
(
self
):
self
.
assertTrue
(
np
.
allclose
(
zeropad2d
(
to_tensor
(
self
.
x
),
self
.
pad
).
numpy
(),
self
.
padLayer
(
to_tensor
(
self
.
x
))))
def
test_layer
(
self
):
with
paddle
.
fluid
.
framework
.
_test_eager_guard
():
self
.
func_layer
()
self
.
func_layer
()
if
__name__
==
'__main__'
:
unittest
.
main
()
python/paddle/nn/functional/common.py
浏览文件 @
4c11be86
...
...
@@ -1356,29 +1356,31 @@ def pad(x, pad, mode='constant', value=0, data_format="NCHW", name=None):
unsqueezed_dim
=
[
1
]
x
=
unsqueeze
(
x
,
axis
=
unsqueezed_dim
)
if
in_dy
namic
_mode
():
if
in_dy
graph
_mode
():
if
isinstance
(
pad
,
Variable
):
pad
=
pad
.
numpy
()
pad
=
pad
.
numpy
().
tolist
()
out
=
_C_ops
.
final_state_pad3d
(
x
,
pad
,
mode
,
value
,
data_format
)
else
:
if
_in_legacy_dygraph
():
if
isinstance
(
pad
,
Variable
):
pad
=
pad
.
numpy
().
tolist
()
out
=
_C_ops
.
pad3d
(
x
,
"paddings"
,
pad
,
"mode"
,
mode
,
"value"
,
value
,
"data_format"
,
data_format
,
"name"
,
name
)
else
:
out
=
_C_ops
.
final_state_pad3d
(
x
,
pad
,
mode
,
value
,
data_format
)
else
:
attrs
=
{
'mode'
:
mode
,
'value'
:
value
,
'data_format'
:
data_format
}
inputs
=
{
'X'
:
[
x
]}
if
isinstance
(
pad
,
Variable
):
inputs
[
'Paddings'
]
=
[
pad
]
attrs
[
'paddings'
]
=
[]
else
:
attrs
[
'paddings'
]
=
pad
attrs
=
{
'mode'
:
mode
,
'value'
:
value
,
'data_format'
:
data_format
}
inputs
=
{
'X'
:
[
x
]}
if
isinstance
(
pad
,
Variable
):
inputs
[
'Paddings'
]
=
[
pad
]
attrs
[
'paddings'
]
=
[]
else
:
attrs
[
'paddings'
]
=
pad
helper
=
LayerHelper
(
'pad3d'
,
**
locals
())
helper
=
LayerHelper
(
'pad3d'
,
**
locals
())
dtype
=
helper
.
input_dtype
(
input_param_name
=
'input'
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
)
helper
.
append_op
(
type
=
'pad3d'
,
inputs
=
inputs
,
outputs
=
{
"Out"
:
out
},
attrs
=
attrs
)
dtype
=
helper
.
input_dtype
(
input_param_name
=
'input'
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
)
helper
.
append_op
(
type
=
'pad3d'
,
inputs
=
inputs
,
outputs
=
{
"Out"
:
out
},
attrs
=
attrs
)
if
len
(
unsqueezed_dim
)
!=
0
:
out
=
squeeze
(
out
,
axis
=
unsqueezed_dim
)
...
...
@@ -1531,38 +1533,50 @@ def linear(x, weight, bias=None, name=None):
# [0.9440598 0.9440598 0.9440598 0.9440598 ]
# [2.1077576 2.1077576 2.1077576 2.1077576 ]]
"""
if
in_dynamic_mode
():
pre_bias
=
_C_ops
.
matmul_v2
(
x
,
weight
,
'trans_x'
,
False
,
'trans_y'
,
False
)
if
in_dygraph_mode
():
pre_bias
=
_C_ops
.
final_state_matmul
(
x
,
weight
,
False
,
False
)
if
bias
is
None
:
return
pre_bias
return
_C_ops
.
elementwis
e_add
(
pre_bias
,
bias
)
return
_C_ops
.
final_stat
e_add
(
pre_bias
,
bias
)
else
:
helper
=
LayerHelper
(
'linear'
,
**
locals
())
dtype
=
x
.
dtype
if
_in_legacy_dygraph
():
pre_bias
=
_C_ops
.
matmul_v2
(
x
,
weight
,
'trans_x'
,
False
,
'trans_y'
,
False
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'linear'
)
check_dtype
(
dtype
,
'dtype'
,
[
'float16'
,
'float32'
,
'float64'
],
'linear'
)
if
bias
is
None
:
return
pre_bias
inputs
=
{
'X'
:
[
x
],
'Y'
:
[
weight
]}
attrs
=
{
'trans_x'
:
False
,
'trans_y'
:
False
}
tmp
=
helper
.
create_variable_for_type_inference
(
dtype
)
helper
.
append_op
(
type
=
'matmul_v2'
,
inputs
=
inputs
,
outputs
=
{
'Out'
:
tmp
},
attrs
=
attrs
)
if
bias
is
not
None
:
res
=
helper
.
create_variable_for_type_inference
(
dtype
)
helper
.
append_op
(
type
=
'elementwise_add'
,
inputs
=
{
'X'
:
[
tmp
],
'Y'
:
[
bias
]},
outputs
=
{
'Out'
:
[
res
]},
attrs
=
{
'axis'
:
len
(
x
.
shape
)
-
1
})
return
_C_ops
.
elementwise_add
(
pre_bias
,
bias
)
else
:
res
=
tmp
return
res
helper
=
LayerHelper
(
'linear'
,
**
locals
())
dtype
=
x
.
dtype
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'linear'
)
check_dtype
(
dtype
,
'dtype'
,
[
'float16'
,
'float32'
,
'float64'
],
'linear'
)
inputs
=
{
'X'
:
[
x
],
'Y'
:
[
weight
]}
attrs
=
{
'trans_x'
:
False
,
'trans_y'
:
False
}
tmp
=
helper
.
create_variable_for_type_inference
(
dtype
)
helper
.
append_op
(
type
=
'matmul_v2'
,
inputs
=
inputs
,
outputs
=
{
'Out'
:
tmp
},
attrs
=
attrs
)
if
bias
is
not
None
:
res
=
helper
.
create_variable_for_type_inference
(
dtype
)
helper
.
append_op
(
type
=
'elementwise_add'
,
inputs
=
{
'X'
:
[
tmp
],
'Y'
:
[
bias
]},
outputs
=
{
'Out'
:
[
res
]},
attrs
=
{
'axis'
:
len
(
x
.
shape
)
-
1
})
else
:
res
=
tmp
return
res
def
label_smooth
(
label
,
prior_dist
=
None
,
epsilon
=
0.1
,
name
=
None
):
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录