未验证 提交 4c11be86 编写于 作者: J Jiabin Yang 提交者: GitHub

fix_ci_problem3 (#41484) (#41705)

* fix_ci_problem3

* support windows no default error
上级 7f1e81fd
......@@ -22,10 +22,10 @@
#include "paddle/fluid/platform/profiler.h"
#include "paddle/fluid/platform/profiler/event_tracing.h"
#include "glog/logging.h"
#include "paddle/fluid/platform/enforce.h"
#include "paddle/fluid/platform/errors.h"
#include "glog/logging.h"
#include "paddle/phi/kernels/autotune/switch_autotune.h"
namespace egr {
......@@ -799,6 +799,7 @@ void Backward(
paddle::platform::RecordEvent backward_record_event(
"backward", paddle::platform::TracerEventType::Operator, 1);
RunBackward(tensors, grad_tensors, retain_graph);
phi::autotune::AutoTuneStatus::Instance().Update();
}
std::vector<paddle::experimental::Tensor> Grad(
......
......@@ -46,7 +46,7 @@ class TestGraphKhopSampler(unittest.TestCase):
self.sample_sizes = [5, 5]
self.dst_src_dict = dst_src_dict
def test_sample_result(self):
def func_sample_result(self):
paddle.disable_static()
row = paddle.to_tensor(self.row)
colptr = paddle.to_tensor(self.colptr)
......@@ -79,13 +79,25 @@ class TestGraphKhopSampler(unittest.TestCase):
# Ensure the correct sample neighbors.
self.assertTrue(np.sum(in_neighbors) == in_neighbors.shape[0])
def test_uva_sample_result(self):
def test_sample_result(self):
with fluid.framework._test_eager_guard():
self.func_sample_result()
self.func_sample_result()
def func_uva_sample_result(self):
paddle.disable_static()
if paddle.fluid.core.is_compiled_with_cuda():
row = paddle.fluid.core.to_uva_tensor(
self.row.astype(self.row.dtype))
sorted_eid = paddle.fluid.core.to_uva_tensor(
self.sorted_eid.astype(self.sorted_eid.dtype))
row = None
if fluid.framework.in_dygraph_mode():
row = paddle.fluid.core.eager.to_uva_tensor(
self.row.astype(self.row.dtype), 0)
sorted_eid = paddle.fluid.core.eager.to_uva_tensor(
self.sorted_eid.astype(self.sorted_eid.dtype), 0)
else:
row = paddle.fluid.core.to_uva_tensor(
self.row.astype(self.row.dtype))
sorted_eid = paddle.fluid.core.to_uva_tensor(
self.sorted_eid.astype(self.sorted_eid.dtype))
colptr = paddle.to_tensor(self.colptr)
nodes = paddle.to_tensor(self.nodes)
......@@ -114,6 +126,11 @@ class TestGraphKhopSampler(unittest.TestCase):
in_neighbors = np.isin(edge_src_n.numpy(), self.dst_src_dict[n])
self.assertTrue(np.sum(in_neighbors) == in_neighbors.shape[0])
def test_uva_sample_result(self):
with fluid.framework._test_eager_guard():
self.func_uva_sample_result()
self.func_uva_sample_result()
def test_sample_result_static_with_eids(self):
paddle.enable_static()
with paddle.static.program_guard(paddle.static.Program()):
......
......@@ -87,12 +87,22 @@ class TestDygraphAutoTuneStatus(TestAutoTune):
}
self.check_status(expected_res)
def test_enable_autotune(self):
def func_enable_autotune(self):
self.run_program(enable_autotune=True)
def test_disable_autotune(self):
def test_enable_autotune(self):
with paddle.fluid.framework._test_eager_guard():
self.func_enable_autotune()
self.func_enable_autotune()
def func_disable_autotune(self):
self.run_program(enable_autotune=False)
def test_disable_autotune(self):
with paddle.fluid.framework._test_eager_guard():
self.func_disable_autotune()
self.func_disable_autotune()
class TestStaticAutoTuneStatus(TestAutoTune):
def run_program(self, enable_autotune):
......@@ -136,12 +146,22 @@ class TestStaticAutoTuneStatus(TestAutoTune):
self.check_status(expected_res)
paddle.disable_static()
def test_enable_autotune(self):
def func_enable_autotune(self):
self.run_program(enable_autotune=True)
def test_disable_autotune(self):
def test_enable_autotune(self):
with paddle.fluid.framework._test_eager_guard():
self.func_enable_autotune()
self.func_enable_autotune()
def func_disable_autotune(self):
self.run_program(enable_autotune=False)
def test_disable_autotune(self):
with paddle.fluid.framework._test_eager_guard():
self.func_disable_autotune()
self.func_disable_autotune()
if __name__ == '__main__':
unittest.main()
......@@ -16,6 +16,7 @@ from __future__ import print_function
import unittest
import numpy as np
import paddle
from paddle import to_tensor
from paddle.nn.functional import zeropad2d
from paddle.nn import ZeroPad2D
......@@ -33,7 +34,7 @@ class TestZeroPad2dAPIError(unittest.TestCase):
self.shape = [4, 3, 224, 224]
self.unsupport_dtypes = ['bool', 'int8']
def test_unsupport_dtypes(self):
def func_unsupport_dtypes(self):
"""
test unsupport dtypes.
"""
......@@ -43,6 +44,11 @@ class TestZeroPad2dAPIError(unittest.TestCase):
x_tensor = to_tensor(x).astype(dtype)
self.assertRaises(TypeError, zeropad2d, x=x_tensor, padding=pad)
def test_unsupport_dtypes(self):
with paddle.fluid.framework._test_eager_guard():
self.func_unsupport_dtypes()
self.func_unsupport_dtypes()
class TestZeroPad2dAPI(unittest.TestCase):
"""
......@@ -56,7 +62,7 @@ class TestZeroPad2dAPI(unittest.TestCase):
self.shape = [4, 3, 224, 224]
self.support_dtypes = ['float32', 'float64', 'int32', 'int64']
def test_support_dtypes(self):
def func_support_dtypes(self):
"""
test support types
"""
......@@ -69,7 +75,12 @@ class TestZeroPad2dAPI(unittest.TestCase):
ret_res = zeropad2d(x_tensor, [pad, pad, pad, pad]).numpy()
self.assertTrue(np.allclose(expect_res, ret_res))
def test_support_pad2(self):
def test_support_dtypes(self):
with paddle.fluid.framework._test_eager_guard():
self.func_support_dtypes()
self.func_support_dtypes()
def func_support_pad2(self):
"""
test the type of 'pad' is list.
"""
......@@ -82,7 +93,12 @@ class TestZeroPad2dAPI(unittest.TestCase):
ret_res = zeropad2d(x_tensor, pad).numpy()
self.assertTrue(np.allclose(expect_res, ret_res))
def test_support_pad3(self):
def test_support_pad2(self):
with paddle.fluid.framework._test_eager_guard():
self.func_support_pad2()
self.func_support_pad2()
def func_support_pad3(self):
"""
test the type of 'pad' is tuple.
"""
......@@ -95,7 +111,12 @@ class TestZeroPad2dAPI(unittest.TestCase):
ret_res = zeropad2d(x_tensor, pad).numpy()
self.assertTrue(np.allclose(expect_res, ret_res))
def test_support_pad4(self):
def test_support_pad3(self):
with paddle.fluid.framework._test_eager_guard():
self.func_support_pad3()
self.func_support_pad3()
def func_support_pad4(self):
"""
test the type of 'pad' is paddle.Tensor.
"""
......@@ -109,6 +130,11 @@ class TestZeroPad2dAPI(unittest.TestCase):
ret_res = zeropad2d(x_tensor, pad_tensor).numpy()
self.assertTrue(np.allclose(expect_res, ret_res))
def test_support_pad4(self):
with paddle.fluid.framework._test_eager_guard():
self.func_support_pad4()
self.func_support_pad4()
class TestZeroPad2DLayer(unittest.TestCase):
"""
......@@ -124,12 +150,17 @@ class TestZeroPad2DLayer(unittest.TestCase):
[[0, 0], [0, 0], [self.pad[2], self.pad[3]],
[self.pad[0], self.pad[1]]])
def test_layer(self):
def func_layer(self):
self.assertTrue(
np.allclose(
zeropad2d(to_tensor(self.x), self.pad).numpy(),
self.padLayer(to_tensor(self.x))))
def test_layer(self):
with paddle.fluid.framework._test_eager_guard():
self.func_layer()
self.func_layer()
if __name__ == '__main__':
unittest.main()
......@@ -1356,29 +1356,31 @@ def pad(x, pad, mode='constant', value=0, data_format="NCHW", name=None):
unsqueezed_dim = [1]
x = unsqueeze(x, axis=unsqueezed_dim)
if in_dynamic_mode():
if in_dygraph_mode():
if isinstance(pad, Variable):
pad = pad.numpy()
pad = pad.numpy().tolist()
out = _C_ops.final_state_pad3d(x, pad, mode, value, data_format)
else:
if _in_legacy_dygraph():
if isinstance(pad, Variable):
pad = pad.numpy().tolist()
out = _C_ops.pad3d(x, "paddings", pad, "mode", mode, "value", value,
"data_format", data_format, "name", name)
else:
out = _C_ops.final_state_pad3d(x, pad, mode, value, data_format)
else:
attrs = {'mode': mode, 'value': value, 'data_format': data_format}
inputs = {'X': [x]}
if isinstance(pad, Variable):
inputs['Paddings'] = [pad]
attrs['paddings'] = []
else:
attrs['paddings'] = pad
attrs = {'mode': mode, 'value': value, 'data_format': data_format}
inputs = {'X': [x]}
if isinstance(pad, Variable):
inputs['Paddings'] = [pad]
attrs['paddings'] = []
else:
attrs['paddings'] = pad
helper = LayerHelper('pad3d', **locals())
helper = LayerHelper('pad3d', **locals())
dtype = helper.input_dtype(input_param_name='input')
out = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type='pad3d', inputs=inputs, outputs={"Out": out}, attrs=attrs)
dtype = helper.input_dtype(input_param_name='input')
out = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type='pad3d', inputs=inputs, outputs={"Out": out}, attrs=attrs)
if len(unsqueezed_dim) != 0:
out = squeeze(out, axis=unsqueezed_dim)
......@@ -1531,38 +1533,50 @@ def linear(x, weight, bias=None, name=None):
# [0.9440598 0.9440598 0.9440598 0.9440598 ]
# [2.1077576 2.1077576 2.1077576 2.1077576 ]]
"""
if in_dynamic_mode():
pre_bias = _C_ops.matmul_v2(x, weight, 'trans_x', False, 'trans_y',
False)
if in_dygraph_mode():
pre_bias = _C_ops.final_state_matmul(x, weight, False, False)
if bias is None:
return pre_bias
return _C_ops.elementwise_add(pre_bias, bias)
return _C_ops.final_state_add(pre_bias, bias)
else:
helper = LayerHelper('linear', **locals())
dtype = x.dtype
if _in_legacy_dygraph():
pre_bias = _C_ops.matmul_v2(x, weight, 'trans_x', False, 'trans_y',
False)
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'],
'linear')
check_dtype(dtype, 'dtype', ['float16', 'float32', 'float64'], 'linear')
if bias is None:
return pre_bias
inputs = {'X': [x], 'Y': [weight]}
attrs = {'trans_x': False, 'trans_y': False}
tmp = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type='matmul_v2', inputs=inputs, outputs={'Out': tmp}, attrs=attrs)
if bias is not None:
res = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type='elementwise_add',
inputs={'X': [tmp],
'Y': [bias]},
outputs={'Out': [res]},
attrs={'axis': len(x.shape) - 1})
return _C_ops.elementwise_add(pre_bias, bias)
else:
res = tmp
return res
helper = LayerHelper('linear', **locals())
dtype = x.dtype
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'],
'linear')
check_dtype(dtype, 'dtype', ['float16', 'float32', 'float64'],
'linear')
inputs = {'X': [x], 'Y': [weight]}
attrs = {'trans_x': False, 'trans_y': False}
tmp = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type='matmul_v2',
inputs=inputs,
outputs={'Out': tmp},
attrs=attrs)
if bias is not None:
res = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type='elementwise_add',
inputs={'X': [tmp],
'Y': [bias]},
outputs={'Out': [res]},
attrs={'axis': len(x.shape) - 1})
else:
res = tmp
return res
def label_smooth(label, prior_dist=None, epsilon=0.1, name=None):
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册