未验证 提交 4c11be86 编写于 作者: J Jiabin Yang 提交者: GitHub

fix_ci_problem3 (#41484) (#41705)

* fix_ci_problem3

* support windows no default error
上级 7f1e81fd
...@@ -22,10 +22,10 @@ ...@@ -22,10 +22,10 @@
#include "paddle/fluid/platform/profiler.h" #include "paddle/fluid/platform/profiler.h"
#include "paddle/fluid/platform/profiler/event_tracing.h" #include "paddle/fluid/platform/profiler/event_tracing.h"
#include "glog/logging.h"
#include "paddle/fluid/platform/enforce.h" #include "paddle/fluid/platform/enforce.h"
#include "paddle/fluid/platform/errors.h" #include "paddle/fluid/platform/errors.h"
#include "paddle/phi/kernels/autotune/switch_autotune.h"
#include "glog/logging.h"
namespace egr { namespace egr {
...@@ -799,6 +799,7 @@ void Backward( ...@@ -799,6 +799,7 @@ void Backward(
paddle::platform::RecordEvent backward_record_event( paddle::platform::RecordEvent backward_record_event(
"backward", paddle::platform::TracerEventType::Operator, 1); "backward", paddle::platform::TracerEventType::Operator, 1);
RunBackward(tensors, grad_tensors, retain_graph); RunBackward(tensors, grad_tensors, retain_graph);
phi::autotune::AutoTuneStatus::Instance().Update();
} }
std::vector<paddle::experimental::Tensor> Grad( std::vector<paddle::experimental::Tensor> Grad(
......
...@@ -46,7 +46,7 @@ class TestGraphKhopSampler(unittest.TestCase): ...@@ -46,7 +46,7 @@ class TestGraphKhopSampler(unittest.TestCase):
self.sample_sizes = [5, 5] self.sample_sizes = [5, 5]
self.dst_src_dict = dst_src_dict self.dst_src_dict = dst_src_dict
def test_sample_result(self): def func_sample_result(self):
paddle.disable_static() paddle.disable_static()
row = paddle.to_tensor(self.row) row = paddle.to_tensor(self.row)
colptr = paddle.to_tensor(self.colptr) colptr = paddle.to_tensor(self.colptr)
...@@ -79,13 +79,25 @@ class TestGraphKhopSampler(unittest.TestCase): ...@@ -79,13 +79,25 @@ class TestGraphKhopSampler(unittest.TestCase):
# Ensure the correct sample neighbors. # Ensure the correct sample neighbors.
self.assertTrue(np.sum(in_neighbors) == in_neighbors.shape[0]) self.assertTrue(np.sum(in_neighbors) == in_neighbors.shape[0])
def test_uva_sample_result(self): def test_sample_result(self):
with fluid.framework._test_eager_guard():
self.func_sample_result()
self.func_sample_result()
def func_uva_sample_result(self):
paddle.disable_static() paddle.disable_static()
if paddle.fluid.core.is_compiled_with_cuda(): if paddle.fluid.core.is_compiled_with_cuda():
row = paddle.fluid.core.to_uva_tensor( row = None
self.row.astype(self.row.dtype)) if fluid.framework.in_dygraph_mode():
sorted_eid = paddle.fluid.core.to_uva_tensor( row = paddle.fluid.core.eager.to_uva_tensor(
self.sorted_eid.astype(self.sorted_eid.dtype)) self.row.astype(self.row.dtype), 0)
sorted_eid = paddle.fluid.core.eager.to_uva_tensor(
self.sorted_eid.astype(self.sorted_eid.dtype), 0)
else:
row = paddle.fluid.core.to_uva_tensor(
self.row.astype(self.row.dtype))
sorted_eid = paddle.fluid.core.to_uva_tensor(
self.sorted_eid.astype(self.sorted_eid.dtype))
colptr = paddle.to_tensor(self.colptr) colptr = paddle.to_tensor(self.colptr)
nodes = paddle.to_tensor(self.nodes) nodes = paddle.to_tensor(self.nodes)
...@@ -114,6 +126,11 @@ class TestGraphKhopSampler(unittest.TestCase): ...@@ -114,6 +126,11 @@ class TestGraphKhopSampler(unittest.TestCase):
in_neighbors = np.isin(edge_src_n.numpy(), self.dst_src_dict[n]) in_neighbors = np.isin(edge_src_n.numpy(), self.dst_src_dict[n])
self.assertTrue(np.sum(in_neighbors) == in_neighbors.shape[0]) self.assertTrue(np.sum(in_neighbors) == in_neighbors.shape[0])
def test_uva_sample_result(self):
with fluid.framework._test_eager_guard():
self.func_uva_sample_result()
self.func_uva_sample_result()
def test_sample_result_static_with_eids(self): def test_sample_result_static_with_eids(self):
paddle.enable_static() paddle.enable_static()
with paddle.static.program_guard(paddle.static.Program()): with paddle.static.program_guard(paddle.static.Program()):
......
...@@ -87,12 +87,22 @@ class TestDygraphAutoTuneStatus(TestAutoTune): ...@@ -87,12 +87,22 @@ class TestDygraphAutoTuneStatus(TestAutoTune):
} }
self.check_status(expected_res) self.check_status(expected_res)
def test_enable_autotune(self): def func_enable_autotune(self):
self.run_program(enable_autotune=True) self.run_program(enable_autotune=True)
def test_disable_autotune(self): def test_enable_autotune(self):
with paddle.fluid.framework._test_eager_guard():
self.func_enable_autotune()
self.func_enable_autotune()
def func_disable_autotune(self):
self.run_program(enable_autotune=False) self.run_program(enable_autotune=False)
def test_disable_autotune(self):
with paddle.fluid.framework._test_eager_guard():
self.func_disable_autotune()
self.func_disable_autotune()
class TestStaticAutoTuneStatus(TestAutoTune): class TestStaticAutoTuneStatus(TestAutoTune):
def run_program(self, enable_autotune): def run_program(self, enable_autotune):
...@@ -136,12 +146,22 @@ class TestStaticAutoTuneStatus(TestAutoTune): ...@@ -136,12 +146,22 @@ class TestStaticAutoTuneStatus(TestAutoTune):
self.check_status(expected_res) self.check_status(expected_res)
paddle.disable_static() paddle.disable_static()
def test_enable_autotune(self): def func_enable_autotune(self):
self.run_program(enable_autotune=True) self.run_program(enable_autotune=True)
def test_disable_autotune(self): def test_enable_autotune(self):
with paddle.fluid.framework._test_eager_guard():
self.func_enable_autotune()
self.func_enable_autotune()
def func_disable_autotune(self):
self.run_program(enable_autotune=False) self.run_program(enable_autotune=False)
def test_disable_autotune(self):
with paddle.fluid.framework._test_eager_guard():
self.func_disable_autotune()
self.func_disable_autotune()
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
...@@ -16,6 +16,7 @@ from __future__ import print_function ...@@ -16,6 +16,7 @@ from __future__ import print_function
import unittest import unittest
import numpy as np import numpy as np
import paddle
from paddle import to_tensor from paddle import to_tensor
from paddle.nn.functional import zeropad2d from paddle.nn.functional import zeropad2d
from paddle.nn import ZeroPad2D from paddle.nn import ZeroPad2D
...@@ -33,7 +34,7 @@ class TestZeroPad2dAPIError(unittest.TestCase): ...@@ -33,7 +34,7 @@ class TestZeroPad2dAPIError(unittest.TestCase):
self.shape = [4, 3, 224, 224] self.shape = [4, 3, 224, 224]
self.unsupport_dtypes = ['bool', 'int8'] self.unsupport_dtypes = ['bool', 'int8']
def test_unsupport_dtypes(self): def func_unsupport_dtypes(self):
""" """
test unsupport dtypes. test unsupport dtypes.
""" """
...@@ -43,6 +44,11 @@ class TestZeroPad2dAPIError(unittest.TestCase): ...@@ -43,6 +44,11 @@ class TestZeroPad2dAPIError(unittest.TestCase):
x_tensor = to_tensor(x).astype(dtype) x_tensor = to_tensor(x).astype(dtype)
self.assertRaises(TypeError, zeropad2d, x=x_tensor, padding=pad) self.assertRaises(TypeError, zeropad2d, x=x_tensor, padding=pad)
def test_unsupport_dtypes(self):
with paddle.fluid.framework._test_eager_guard():
self.func_unsupport_dtypes()
self.func_unsupport_dtypes()
class TestZeroPad2dAPI(unittest.TestCase): class TestZeroPad2dAPI(unittest.TestCase):
""" """
...@@ -56,7 +62,7 @@ class TestZeroPad2dAPI(unittest.TestCase): ...@@ -56,7 +62,7 @@ class TestZeroPad2dAPI(unittest.TestCase):
self.shape = [4, 3, 224, 224] self.shape = [4, 3, 224, 224]
self.support_dtypes = ['float32', 'float64', 'int32', 'int64'] self.support_dtypes = ['float32', 'float64', 'int32', 'int64']
def test_support_dtypes(self): def func_support_dtypes(self):
""" """
test support types test support types
""" """
...@@ -69,7 +75,12 @@ class TestZeroPad2dAPI(unittest.TestCase): ...@@ -69,7 +75,12 @@ class TestZeroPad2dAPI(unittest.TestCase):
ret_res = zeropad2d(x_tensor, [pad, pad, pad, pad]).numpy() ret_res = zeropad2d(x_tensor, [pad, pad, pad, pad]).numpy()
self.assertTrue(np.allclose(expect_res, ret_res)) self.assertTrue(np.allclose(expect_res, ret_res))
def test_support_pad2(self): def test_support_dtypes(self):
with paddle.fluid.framework._test_eager_guard():
self.func_support_dtypes()
self.func_support_dtypes()
def func_support_pad2(self):
""" """
test the type of 'pad' is list. test the type of 'pad' is list.
""" """
...@@ -82,7 +93,12 @@ class TestZeroPad2dAPI(unittest.TestCase): ...@@ -82,7 +93,12 @@ class TestZeroPad2dAPI(unittest.TestCase):
ret_res = zeropad2d(x_tensor, pad).numpy() ret_res = zeropad2d(x_tensor, pad).numpy()
self.assertTrue(np.allclose(expect_res, ret_res)) self.assertTrue(np.allclose(expect_res, ret_res))
def test_support_pad3(self): def test_support_pad2(self):
with paddle.fluid.framework._test_eager_guard():
self.func_support_pad2()
self.func_support_pad2()
def func_support_pad3(self):
""" """
test the type of 'pad' is tuple. test the type of 'pad' is tuple.
""" """
...@@ -95,7 +111,12 @@ class TestZeroPad2dAPI(unittest.TestCase): ...@@ -95,7 +111,12 @@ class TestZeroPad2dAPI(unittest.TestCase):
ret_res = zeropad2d(x_tensor, pad).numpy() ret_res = zeropad2d(x_tensor, pad).numpy()
self.assertTrue(np.allclose(expect_res, ret_res)) self.assertTrue(np.allclose(expect_res, ret_res))
def test_support_pad4(self): def test_support_pad3(self):
with paddle.fluid.framework._test_eager_guard():
self.func_support_pad3()
self.func_support_pad3()
def func_support_pad4(self):
""" """
test the type of 'pad' is paddle.Tensor. test the type of 'pad' is paddle.Tensor.
""" """
...@@ -109,6 +130,11 @@ class TestZeroPad2dAPI(unittest.TestCase): ...@@ -109,6 +130,11 @@ class TestZeroPad2dAPI(unittest.TestCase):
ret_res = zeropad2d(x_tensor, pad_tensor).numpy() ret_res = zeropad2d(x_tensor, pad_tensor).numpy()
self.assertTrue(np.allclose(expect_res, ret_res)) self.assertTrue(np.allclose(expect_res, ret_res))
def test_support_pad4(self):
with paddle.fluid.framework._test_eager_guard():
self.func_support_pad4()
self.func_support_pad4()
class TestZeroPad2DLayer(unittest.TestCase): class TestZeroPad2DLayer(unittest.TestCase):
""" """
...@@ -124,12 +150,17 @@ class TestZeroPad2DLayer(unittest.TestCase): ...@@ -124,12 +150,17 @@ class TestZeroPad2DLayer(unittest.TestCase):
[[0, 0], [0, 0], [self.pad[2], self.pad[3]], [[0, 0], [0, 0], [self.pad[2], self.pad[3]],
[self.pad[0], self.pad[1]]]) [self.pad[0], self.pad[1]]])
def test_layer(self): def func_layer(self):
self.assertTrue( self.assertTrue(
np.allclose( np.allclose(
zeropad2d(to_tensor(self.x), self.pad).numpy(), zeropad2d(to_tensor(self.x), self.pad).numpy(),
self.padLayer(to_tensor(self.x)))) self.padLayer(to_tensor(self.x))))
def test_layer(self):
with paddle.fluid.framework._test_eager_guard():
self.func_layer()
self.func_layer()
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
...@@ -1356,29 +1356,31 @@ def pad(x, pad, mode='constant', value=0, data_format="NCHW", name=None): ...@@ -1356,29 +1356,31 @@ def pad(x, pad, mode='constant', value=0, data_format="NCHW", name=None):
unsqueezed_dim = [1] unsqueezed_dim = [1]
x = unsqueeze(x, axis=unsqueezed_dim) x = unsqueeze(x, axis=unsqueezed_dim)
if in_dynamic_mode(): if in_dygraph_mode():
if isinstance(pad, Variable): if isinstance(pad, Variable):
pad = pad.numpy() pad = pad.numpy().tolist()
out = _C_ops.final_state_pad3d(x, pad, mode, value, data_format)
else:
if _in_legacy_dygraph(): if _in_legacy_dygraph():
if isinstance(pad, Variable):
pad = pad.numpy().tolist()
out = _C_ops.pad3d(x, "paddings", pad, "mode", mode, "value", value, out = _C_ops.pad3d(x, "paddings", pad, "mode", mode, "value", value,
"data_format", data_format, "name", name) "data_format", data_format, "name", name)
else: else:
out = _C_ops.final_state_pad3d(x, pad, mode, value, data_format) attrs = {'mode': mode, 'value': value, 'data_format': data_format}
else: inputs = {'X': [x]}
attrs = {'mode': mode, 'value': value, 'data_format': data_format} if isinstance(pad, Variable):
inputs = {'X': [x]} inputs['Paddings'] = [pad]
if isinstance(pad, Variable): attrs['paddings'] = []
inputs['Paddings'] = [pad] else:
attrs['paddings'] = [] attrs['paddings'] = pad
else:
attrs['paddings'] = pad
helper = LayerHelper('pad3d', **locals()) helper = LayerHelper('pad3d', **locals())
dtype = helper.input_dtype(input_param_name='input') dtype = helper.input_dtype(input_param_name='input')
out = helper.create_variable_for_type_inference(dtype) out = helper.create_variable_for_type_inference(dtype)
helper.append_op( helper.append_op(
type='pad3d', inputs=inputs, outputs={"Out": out}, attrs=attrs) type='pad3d', inputs=inputs, outputs={"Out": out}, attrs=attrs)
if len(unsqueezed_dim) != 0: if len(unsqueezed_dim) != 0:
out = squeeze(out, axis=unsqueezed_dim) out = squeeze(out, axis=unsqueezed_dim)
...@@ -1531,38 +1533,50 @@ def linear(x, weight, bias=None, name=None): ...@@ -1531,38 +1533,50 @@ def linear(x, weight, bias=None, name=None):
# [0.9440598 0.9440598 0.9440598 0.9440598 ] # [0.9440598 0.9440598 0.9440598 0.9440598 ]
# [2.1077576 2.1077576 2.1077576 2.1077576 ]] # [2.1077576 2.1077576 2.1077576 2.1077576 ]]
""" """
if in_dynamic_mode(): if in_dygraph_mode():
pre_bias = _C_ops.matmul_v2(x, weight, 'trans_x', False, 'trans_y', pre_bias = _C_ops.final_state_matmul(x, weight, False, False)
False)
if bias is None: if bias is None:
return pre_bias return pre_bias
return _C_ops.elementwise_add(pre_bias, bias) return _C_ops.final_state_add(pre_bias, bias)
else: else:
helper = LayerHelper('linear', **locals()) if _in_legacy_dygraph():
dtype = x.dtype pre_bias = _C_ops.matmul_v2(x, weight, 'trans_x', False, 'trans_y',
False)
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], if bias is None:
'linear') return pre_bias
check_dtype(dtype, 'dtype', ['float16', 'float32', 'float64'], 'linear')
inputs = {'X': [x], 'Y': [weight]} return _C_ops.elementwise_add(pre_bias, bias)
attrs = {'trans_x': False, 'trans_y': False}
tmp = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type='matmul_v2', inputs=inputs, outputs={'Out': tmp}, attrs=attrs)
if bias is not None:
res = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type='elementwise_add',
inputs={'X': [tmp],
'Y': [bias]},
outputs={'Out': [res]},
attrs={'axis': len(x.shape) - 1})
else: else:
res = tmp helper = LayerHelper('linear', **locals())
return res dtype = x.dtype
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'],
'linear')
check_dtype(dtype, 'dtype', ['float16', 'float32', 'float64'],
'linear')
inputs = {'X': [x], 'Y': [weight]}
attrs = {'trans_x': False, 'trans_y': False}
tmp = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type='matmul_v2',
inputs=inputs,
outputs={'Out': tmp},
attrs=attrs)
if bias is not None:
res = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type='elementwise_add',
inputs={'X': [tmp],
'Y': [bias]},
outputs={'Out': [res]},
attrs={'axis': len(x.shape) - 1})
else:
res = tmp
return res
def label_smooth(label, prior_dist=None, epsilon=0.1, name=None): def label_smooth(label, prior_dist=None, epsilon=0.1, name=None):
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册