未验证 提交 db8f4082 编写于 作者: D danleifeng 提交者: GitHub

fix check info for flatten hash is_empty op; test=develop (#24499)

上级 40304ab3
...@@ -29,17 +29,17 @@ class FlattenOp : public framework::OperatorWithKernel { ...@@ -29,17 +29,17 @@ class FlattenOp : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel; using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext *ctx) const override { void InferShape(framework::InferShapeContext *ctx) const override {
PADDLE_ENFORCE_EQ(ctx->HasInput("X"), true, OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "Flatten");
"Input (X) of Flatten op should not be null."); OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "Flatten");
PADDLE_ENFORCE_EQ(ctx->HasOutput("Out"), true,
"Output (Output) of Flatten op should not be null.");
const auto &axis = ctx->Attrs().Get<int>("axis"); const auto &axis = ctx->Attrs().Get<int>("axis");
const auto &in_dims = ctx->GetInputDim("X"); const auto &in_dims = ctx->GetInputDim("X");
PADDLE_ENFORCE_GE(axis, 0, PADDLE_ENFORCE_GE(axis, 0,
"The axis should be greater than or equal to 0."); platform::errors::InvalidArgument(
"The axis should be greater than or equal to 0."));
PADDLE_ENFORCE_LE( PADDLE_ENFORCE_LE(
axis, in_dims.size(), axis, in_dims.size(),
"The axis should be less than or equal to input tensor's rank."); platform::errors::InvalidArgument(
"The axis should be less than or equal to input tensor's rank."));
const auto &out_dims = GetOutputShape(axis, in_dims); const auto &out_dims = GetOutputShape(axis, in_dims);
ctx->SetOutputDim("Out", framework::make_ddim(out_dims)); ctx->SetOutputDim("Out", framework::make_ddim(out_dims));
...@@ -161,17 +161,17 @@ class Flatten2Op : public framework::OperatorWithKernel { ...@@ -161,17 +161,17 @@ class Flatten2Op : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel; using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext *ctx) const override { void InferShape(framework::InferShapeContext *ctx) const override {
PADDLE_ENFORCE_EQ(ctx->HasInput("X"), true, OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "Flatten2");
"Input (X) of Flatten op should not be null."); OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "Flatten2");
PADDLE_ENFORCE_EQ(ctx->HasOutput("Out"), true,
"Output (Output) of Flatten op should not be null.");
const auto &axis = ctx->Attrs().Get<int>("axis"); const auto &axis = ctx->Attrs().Get<int>("axis");
const auto &in_dims = ctx->GetInputDim("X"); const auto &in_dims = ctx->GetInputDim("X");
PADDLE_ENFORCE_GE(axis, 0, PADDLE_ENFORCE_GE(axis, 0,
"The axis should be greater than or equal to 0."); platform::errors::InvalidArgument(
"The axis should be greater than or equal to 0."));
PADDLE_ENFORCE_LE( PADDLE_ENFORCE_LE(
axis, in_dims.size(), axis, in_dims.size(),
"The axis should be less than or equal to input tensor's rank."); platform::errors::InvalidArgument(
"The axis should be less than or equal to input tensor's rank"));
const auto &out_dims = FlattenOp::GetOutputShape(axis, in_dims); const auto &out_dims = FlattenOp::GetOutputShape(axis, in_dims);
ctx->SetOutputDim("Out", framework::make_ddim(out_dims)); ctx->SetOutputDim("Out", framework::make_ddim(out_dims));
...@@ -181,8 +181,7 @@ class Flatten2Op : public framework::OperatorWithKernel { ...@@ -181,8 +181,7 @@ class Flatten2Op : public framework::OperatorWithKernel {
ctx->ShareLoD("X", "Out"); ctx->ShareLoD("X", "Out");
} }
PADDLE_ENFORCE_EQ(ctx->HasOutput("XShape"), true, OP_INOUT_CHECK(ctx->HasOutput("XShape"), "Output", "XShape", "Flatten2");
"Output (XShape) of Flatten op should not be null.");
std::vector<int64_t> xshape_dims(in_dims.size() + 1); std::vector<int64_t> xshape_dims(in_dims.size() + 1);
xshape_dims[0] = 0; xshape_dims[0] = 0;
for (int i = 0; i < in_dims.size(); ++i) { for (int i = 0; i < in_dims.size(); ++i) {
...@@ -223,10 +222,10 @@ class Flatten2GradOp : public framework::OperatorWithKernel { ...@@ -223,10 +222,10 @@ class Flatten2GradOp : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel; using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext *context) const override { void InferShape(framework::InferShapeContext *context) const override {
PADDLE_ENFORCE_EQ(context->HasInput("XShape"), true, OP_INOUT_CHECK(context->HasInput("XShape"), "Input", "XShape",
"Input(XShape) shouldn't be null."); "Flatten2Grad");
PADDLE_ENFORCE_EQ(context->HasInput(framework::GradVarName("Out")), true, OP_INOUT_CHECK(context->HasInput(framework::GradVarName("Out")), "Input",
"Input(Out@GRAD) shouldn't be null."); framework::GradVarName("Out"), "Flatten2Grad");
auto xshape_dims = context->GetInputDim("XShape"); auto xshape_dims = context->GetInputDim("XShape");
auto x_dims = framework::slice_ddim(xshape_dims, 1, xshape_dims.size()); auto x_dims = framework::slice_ddim(xshape_dims, 1, xshape_dims.size());
context->SetOutputDim(framework::GradVarName("X"), x_dims); context->SetOutputDim(framework::GradVarName("X"), x_dims);
......
...@@ -26,14 +26,13 @@ class HashOp : public framework::OperatorWithKernel { ...@@ -26,14 +26,13 @@ class HashOp : public framework::OperatorWithKernel {
: OperatorWithKernel(type, inputs, outputs, attrs) {} : OperatorWithKernel(type, inputs, outputs, attrs) {}
void InferShape(framework::InferShapeContext *ctx) const override { void InferShape(framework::InferShapeContext *ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("X"), OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "Hash");
"Input(X) of HashOp should not be null."); OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "Hash");
PADDLE_ENFORCE(ctx->HasOutput("Out"),
"Output(Out) of HashOp should not be null.");
auto dims = ctx->GetInputDim("X"); auto dims = ctx->GetInputDim("X");
PADDLE_ENFORCE_EQ(dims.size(), 2UL, PADDLE_ENFORCE_EQ(dims.size(), 2UL,
"The input of hash_op's dimensions must be 2"); platform::errors::InvalidArgument(
"The input of hash_op's dimensions must be 2"));
std::vector<int64_t> out_dims; std::vector<int64_t> out_dims;
int num_hash = ctx->Attrs().Get<int>("num_hash"); int num_hash = ctx->Attrs().Get<int>("num_hash");
HashOutputSize(dims, out_dims, num_hash); HashOutputSize(dims, out_dims, num_hash);
......
...@@ -25,10 +25,8 @@ class IsEmptyOp : public framework::OperatorWithKernel { ...@@ -25,10 +25,8 @@ class IsEmptyOp : public framework::OperatorWithKernel {
protected: protected:
void InferShape(framework::InferShapeContext *ctx) const override { void InferShape(framework::InferShapeContext *ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("X"), OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "IsEmpty");
"Input(X) of IsEmptyOp should not be null."); OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "IsEmpty");
PADDLE_ENFORCE(ctx->HasOutput("Out"),
"Output(Out) of IsEmptyOp should not be null.");
ctx->SetOutputDim("Out", {1}); ctx->SetOutputDim("Out", {1});
} }
......
...@@ -26,7 +26,7 @@ import numpy ...@@ -26,7 +26,7 @@ import numpy
import warnings import warnings
import six import six
from functools import reduce, partial from functools import reduce, partial
from ..data_feeder import convert_dtype, check_variable_and_dtype, check_type from ..data_feeder import convert_dtype, check_variable_and_dtype, check_type, check_dtype
from ... import compat as cpt from ... import compat as cpt
from ..backward import _infer_var_data_type_shape_ from ..backward import _infer_var_data_type_shape_
...@@ -3607,15 +3607,15 @@ def is_empty(x, cond=None): ...@@ -3607,15 +3607,15 @@ def is_empty(x, cond=None):
# fluid.layers.is_empty(x=input, cond=res) # fluid.layers.is_empty(x=input, cond=res)
""" """
check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'],
'is_empty')
check_type(cond, 'cond', (Variable, type(None)), 'is_empty')
helper = LayerHelper("is_empty", **locals()) helper = LayerHelper("is_empty", **locals())
if cond is None: if cond is None:
cond = helper.create_variable_for_type_inference(dtype='bool') cond = helper.create_variable_for_type_inference(dtype='bool')
cond.stop_gradient = True cond.stop_gradient = True
elif not isinstance(cond, Variable): else:
raise TypeError("cond takes a variable") check_dtype(cond.dtype, 'cond', ['bool'], 'is_empty')
elif cond.dtype != 'bool':
raise TypeError("The data type of cond must be bool")
helper.append_op( helper.append_op(
type='is_empty', inputs={'X': [x]}, outputs={'Out': [cond]}) type='is_empty', inputs={'X': [x]}, outputs={'Out': [cond]})
return cond return cond
...@@ -10854,6 +10854,8 @@ def flatten(x, axis=1, name=None): ...@@ -10854,6 +10854,8 @@ def flatten(x, axis=1, name=None):
out = fluid.layers.flatten(x=x, axis=2) out = fluid.layers.flatten(x=x, axis=2)
# out shape is [16, 3] # out shape is [16, 3]
""" """
check_variable_and_dtype(
x, 'x', ['float32', 'float64', 'int8', 'int32', 'int64'], 'flatten')
helper = LayerHelper('flatten', **locals()) helper = LayerHelper('flatten', **locals())
if not (isinstance(x, Variable)): if not (isinstance(x, Variable)):
...@@ -13691,6 +13693,9 @@ def hash(input, hash_size, num_hash=1, name=None): ...@@ -13691,6 +13693,9 @@ def hash(input, hash_size, num_hash=1, name=None):
# [386] # [386]
# [901]]] # [901]]]
""" """
check_variable_and_dtype(input, 'input', ['int32', 'int64'], 'hash')
check_type(hash_size, 'hash_size', ['int32', 'int64'], 'hash')
check_type(num_hash, 'num_hash', ['int32', 'int64'], 'hash')
helper = LayerHelper('hash', **locals()) helper = LayerHelper('hash', **locals())
out = helper.create_variable_for_type_inference( out = helper.create_variable_for_type_inference(
helper.input_dtype(), stop_gradient=True) helper.input_dtype(), stop_gradient=True)
......
...@@ -16,7 +16,7 @@ from __future__ import print_function ...@@ -16,7 +16,7 @@ from __future__ import print_function
import unittest import unittest
import numpy as np import numpy as np
import paddle.fluid as fluid
from op_test import OpTest from op_test import OpTest
...@@ -69,5 +69,25 @@ class TestFlattenOpSixDims(TestFlattenOp): ...@@ -69,5 +69,25 @@ class TestFlattenOpSixDims(TestFlattenOp):
self.new_shape = (36, 16) self.new_shape = (36, 16)
class TestFlatten2OpError(unittest.TestCase):
def test_errors(self):
with fluid.program_guard(fluid.Program(), fluid.Program()):
input_data = np.random.random((3, 2, 4, 5)).astype("float64")
def test_Variable():
# the input type must be Variable
fluid.layers.flatten(input_data, axis=1)
self.assertRaises(TypeError, test_Variable)
def test_type():
# dtype must be float32, float64, int8, int32, int64.
x2 = fluid.layers.data(
name='x2', shape=[3, 2, 4, 5], dtype='float16')
fluid.layers.flatten(x2, axis=1)
self.assertRaises(TypeError, test_type)
if __name__ == "__main__": if __name__ == "__main__":
unittest.main() unittest.main()
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from op_test import OpTest
import paddle.fluid as fluid
class TestHashOp(OpTest): class TestHashOp(OpTest):
...@@ -102,5 +103,41 @@ class TestHashOp3(TestHashOp): ...@@ -102,5 +103,41 @@ class TestHashOp3(TestHashOp):
self.check_output() self.check_output()
class TestHashOpError(unittest.TestCase):
def test_errors(self):
with fluid.program_guard(fluid.Program(), fluid.Program()):
input_data = np.random.randint(0, 10, (8, 1)).astype("int32")
def test_Variable():
# the input type must be Variable
fluid.layers.hash(input=input_data, hash_size=2**32)
self.assertRaises(TypeError, test_Variable)
def test_type():
# dtype must be int32, int64.
x2 = fluid.layers.data(
name='x2', shape=[1], dtype="float32", lod_level=1)
fluid.layers.hash(input=x2, hash_size=2**32)
self.assertRaises(TypeError, test_type)
def test_hash_size_type():
# hash_size dtype must be int32, int64.
x3 = fluid.layers.data(
name='x3', shape=[1], dtype="int32", lod_level=1)
fluid.layers.hash(input=x3, hash_size=1024.5)
self.assertRaises(TypeError, test_hash_size_type)
def test_num_hash_type():
# num_hash dtype must be int32, int64.
x4 = fluid.layers.data(
name='x4', shape=[1], dtype="int32", lod_level=1)
fluid.layers.hash(input=x4, hash_size=2**32, num_hash=2.5)
self.assertRaises(TypeError, test_num_hash_type)
if __name__ == "__main__": if __name__ == "__main__":
unittest.main() unittest.main()
...@@ -17,6 +17,7 @@ from __future__ import print_function ...@@ -17,6 +17,7 @@ from __future__ import print_function
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from op_test import OpTest
import paddle.fluid as fluid
class TestEmpty(OpTest): class TestEmpty(OpTest):
...@@ -36,5 +37,42 @@ class TestNotEmpty(TestEmpty): ...@@ -36,5 +37,42 @@ class TestNotEmpty(TestEmpty):
self.outputs = {'Out': np.array([True])} self.outputs = {'Out': np.array([True])}
class TestIsEmptyOpError(unittest.TestCase):
def test_errors(self):
with fluid.program_guard(fluid.Program(), fluid.Program()):
input_data = np.random.random((3, 2)).astype("float64")
def test_Variable():
# the input type must be Variable
fluid.layers.is_empty(x=input_data)
self.assertRaises(TypeError, test_Variable)
def test_cond_Variable():
# cond type must be Variable or None
x2 = fluid.layers.data(name="x2", shape=[3, 2], dtype="float32")
cond_data = np.random.random((3, 2)).astype("float32")
fluid.layers.is_empty(x=x2, cond=cond_data)
self.assertRaises(TypeError, test_cond_Variable)
def test_type():
# dtype must be float32, float64, int32, int64
x3 = fluid.layers.data(
name="x3", shape=[4, 32, 32], dtype="bool")
res = fluid.layers.is_empty(x=x3)
self.assertRaises(TypeError, test_type)
def test_cond_type():
# cond dtype must be bool.
x4 = fluid.layers.data(name="x4", shape=[3, 2], dtype="float32")
cond = fluid.layers.data(
name="cond", shape=[1], dtype="float32")
fluid.layers.is_empty(x=x4, cond=cond)
self.assertRaises(TypeError, test_cond_type)
if __name__ == "__main__": if __name__ == "__main__":
unittest.main() unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册