未验证 提交 7c577754 编写于 作者: W Weilong Wu 提交者: GitHub

clear test_dropout_op (#48657)

上级 46371c53
......@@ -23,7 +23,6 @@ import paddle.fluid.core as core
import paddle.static as static
from paddle import _C_ops
from paddle.fluid import Program, program_guard
from paddle.fluid.framework import _enable_legacy_dygraph, _test_eager_guard
class TestDropoutOp(OpTest):
......@@ -1046,13 +1045,14 @@ class TestDropoutBackward(unittest.TestCase):
return mask.astype("float32")
def test_backward_downscale_in_infer(self):
_enable_legacy_dygraph()
for place in self.places:
with fluid.dygraph.guard(place):
input = paddle.uniform([40, 40], dtype="float32")
input.stop_gradient = False
out, mask = core.ops.dropout(input, 'dropout_prob', 0.5)
out, mask = _C_ops.dropout(
input, None, 0.5, False, "downgrade_in_infer", 0, False
)
out.backward()
np.testing.assert_array_equal(
......@@ -1060,35 +1060,15 @@ class TestDropoutBackward(unittest.TestCase):
self.cal_grad_downscale_in_infer(mask.numpy()),
)
def test_backward_downscale_in_infer_eager(self):
for place in self.places:
with fluid.dygraph.guard(place):
with _test_eager_guard():
input = paddle.uniform([40, 40], dtype="float32")
input.stop_gradient = False
out, mask = _C_ops.dropout(
input, None, 0.5, False, "downgrade_in_infer", 0, False
)
out.backward()
np.testing.assert_array_equal(
input.gradient(),
self.cal_grad_downscale_in_infer(mask.numpy()),
)
def test_backward_upscale_train(self):
_enable_legacy_dygraph()
for place in self.places:
with fluid.dygraph.guard(place):
prob = 0.5
input = paddle.uniform([40, 40], dtype="float32")
input.stop_gradient = False
out, mask = core.ops.dropout(
input,
'dropout_prob',
prob,
"dropout_implementation",
"upscale_in_train",
out, mask = _C_ops.dropout(
input, None, 0.5, False, "upscale_in_train", 0, False
)
out.backward()
......@@ -1098,38 +1078,15 @@ class TestDropoutBackward(unittest.TestCase):
rtol=1e-05,
)
def test_backward_upscale_train_eager(self):
for place in self.places:
with fluid.dygraph.guard(place):
with _test_eager_guard():
prob = 0.5
input = paddle.uniform([40, 40], dtype="float32")
input.stop_gradient = False
out, mask = _C_ops.dropout(
input, None, 0.5, False, "upscale_in_train", 0, False
)
out.backward()
np.testing.assert_allclose(
input.gradient(),
self.cal_grad_upscale_train(mask.numpy(), prob),
rtol=1e-05,
)
def test_backward_upscale_train_2(self):
_enable_legacy_dygraph()
for place in self.places:
with fluid.dygraph.guard(place):
prob = 0.3
input = paddle.uniform([40, 40], dtype="float32")
input.stop_gradient = False
out, mask = core.ops.dropout(
input,
'dropout_prob',
prob,
"dropout_implementation",
"upscale_in_train",
out, mask = _C_ops.dropout(
input, None, 0.3, False, "upscale_in_train", 0, False
)
out.backward()
......@@ -1139,26 +1096,6 @@ class TestDropoutBackward(unittest.TestCase):
rtol=1e-05,
)
def test_backward_upscale_train_2_eager(self):
for place in self.places:
with fluid.dygraph.guard(place):
with _test_eager_guard():
prob = 0.3
input = paddle.uniform([40, 40], dtype="float32")
input.stop_gradient = False
out, mask = _C_ops.dropout(
input, None, 0.3, False, "upscale_in_train", 0, False
)
out.backward()
np.testing.assert_allclose(
input.gradient(),
self.cal_grad_upscale_train(mask.numpy(), prob),
rtol=1e-05,
)
class TestDropOutWithProbTensor(unittest.TestCase):
def setUp(self):
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册