提交 c657537b 编写于 作者: X Xinghai Sun

Correct some typos.

上级 b1a18552
......@@ -62,7 +62,7 @@ class DropoutOpGrad : public framework::OperatorWithKernel {
auto mask_dims = ctx.Input<Tensor>("Mask")->dims();
auto out_dims = ctx.Input<Tensor>(framework::GradVarName("Out"))->dims();
PADDLE_ENFORCE_EQ(x_dims, out_dims,
"Dimensions of Input(X) and Out must be the same.");
"Dimensions of Input(X) and Out@Grad must be the same.");
PADDLE_ENFORCE_EQ(x_dims, mask_dims,
"Dimensions of Input(X) and Mask must be the same.");
......
......@@ -14,7 +14,7 @@ class TestDropoutOpProbZero(unittest.TestCase):
self.outputs = {'Out': self.inputs['X'], 'Mask': np.ones((32, 64))}
class TestDropoutOpAllProbOne(unittest.TestCase):
class TestDropoutOpProbOne(unittest.TestCase):
__metaclass__ = OpTestMeta
def setUp(self):
......@@ -24,7 +24,7 @@ class TestDropoutOpAllProbOne(unittest.TestCase):
self.outputs = {'Out': np.zeros((32, 64)), 'Mask': np.zeros((32, 64))}
class DropoutGradOpTest(GradientChecker):
class TestDropoutGradOp(GradientChecker):
def test_dropout_2d(self):
op = create_op("dropout")
inputs = {'X': np.random.random((10, 5)).astype("float32")}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册