diff --git a/paddle/fluid/operators/expand_op.cc b/paddle/fluid/operators/expand_op.cc index b95373178d458dc3b3619bcbc056b24c8a05d6be..09c730db3951dd5e7b28e95535da2775f4745428 100644 --- a/paddle/fluid/operators/expand_op.cc +++ b/paddle/fluid/operators/expand_op.cc @@ -226,8 +226,11 @@ REGISTER_OP_CPU_KERNEL( expand, ops::ExpandKernel, ops::ExpandKernel, ops::ExpandKernel, + ops::ExpandKernel, ops::ExpandKernel); REGISTER_OP_CPU_KERNEL( expand_grad, ops::ExpandGradKernel, - ops::ExpandGradKernel); + ops::ExpandGradKernel, + ops::ExpandGradKernel, + ops::ExpandGradKernel); diff --git a/paddle/fluid/operators/expand_op.cu b/paddle/fluid/operators/expand_op.cu index 50a506b294db14f0d170c60a0ed760dcf280ad60..cf913f56dde80df5089e085c26aeb75b0030e9f2 100644 --- a/paddle/fluid/operators/expand_op.cu +++ b/paddle/fluid/operators/expand_op.cu @@ -18,8 +18,11 @@ REGISTER_OP_CUDA_KERNEL( expand, ops::ExpandKernel, ops::ExpandKernel, ops::ExpandKernel, + ops::ExpandKernel, ops::ExpandKernel); REGISTER_OP_CUDA_KERNEL( expand_grad, ops::ExpandGradKernel, - ops::ExpandGradKernel); + ops::ExpandGradKernel, + ops::ExpandGradKernel, + ops::ExpandGradKernel); diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index 2b0759518dc5cf166a4d58a70f631e1a80461e3b..6dfe56610a2a29263418a0e960054a578d476207 100755 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -12077,10 +12077,21 @@ def expand(x, expand_times, name=None): expanded_2 = fluid.layers.expand(data_2, expand_times=expand_times) # the shape of expanded_2 is [48, 56]. """ - + if not isinstance(x, Variable): + raise TypeError( + "The type of 'input' in reduce_sum must be Variable, but received %s" + % (type(x))) if not isinstance(expand_times, (list, tuple, Variable)): raise ValueError( "Input expand_times must be an Variable, python list or tuple.") + if convert_dtype( + x.dtype) not in ['bool', 'float32', 'float64', 'int32', 'int64']: + raise TypeError( + "The data type of input in expand must be one of bool float32, float64, int32 or int64, but received %s." + % (convert_dtype(x.dtype))) + if convert_dtype(x.dtype) == 'bool' and x.stop_gradient == True: + raise ValueError( + "expand op bool date type must set the stop_gradient to be False") helper = LayerHelper('expand', input=x, **locals()) inputs = {"X": x} diff --git a/python/paddle/fluid/tests/unittests/test_expand_op.py b/python/paddle/fluid/tests/unittests/test_expand_op.py index 449cda29b45ba4c9ac7aa20d40d04dd3c6a4496f..b4efda63e10ca4ac0aa1681227e8472d459deb9a 100644 --- a/python/paddle/fluid/tests/unittests/test_expand_op.py +++ b/python/paddle/fluid/tests/unittests/test_expand_op.py @@ -18,6 +18,7 @@ import unittest import numpy as np from op_test import OpTest import paddle.fluid as fluid +from paddle.fluid import compiler, Program, program_guard # Situation 1: expand_times is a list(without tensor) @@ -176,6 +177,36 @@ class TestExpandOpBoolean(OpTest): self.check_output() +# Situation 56: input x is Integer +class TestExpandOpInt64_t(OpTest): + def setUp(self): + self.op_type = "expand" + self.inputs = { + 'X': np.random.randint( + 10, size=(2, 4, 5)).astype("int64") + } + self.attrs = {'expand_times': [2, 1, 4]} + output = np.tile(self.inputs['X'], (2, 1, 4)) + self.outputs = {'Out': output} + + def test_check_output(self): + self.check_output() + + +class TestExpandError(OpTest): + def test_errors(self): + with program_guard(Program(), Program()): + x1 = fluid.create_lod_tensor( + np.array([[-1]]), [[1]], fluid.CPUPlace()) + expand_times = [2, 2] + self.assertRaises(TypeError, fluid.layers.expand, x1, expand_times) + x2 = fluid.layers.data(name='x2', shape=[4], dtype="uint8") + self.assertRaises(TypeError, fluid.layers.expand, x2, expand_times) + x3 = fluid.layers.data(name='x3', shape=[4], dtype="bool") + x3.stop_gradient = True + self.assertRaises(ValueError, fluid.layers.expand, x3, expand_times) + + # Test python API class TestExpandAPI(OpTest): def test_api(self):