未验证 提交 2e156ac8 编写于 作者: X xiaoting 提交者: GitHub

support 0d tensor for interpolate (#49929)

* support 0d tensor for interpolate

* support 0d tensor for interpolate

* add xpu unittest for interp

* update unittest for interpolate

* fix coverage

* fix code style

* fix for coverage

* fix coverage
上级 e2474595
...@@ -1424,16 +1424,18 @@ static void Interpolate1DInferShapeCheck( ...@@ -1424,16 +1424,18 @@ static void Interpolate1DInferShapeCheck(
if (scale_tensor) { if (scale_tensor) {
auto scale_tensor_dim = scale_tensor.dims(); auto scale_tensor_dim = scale_tensor.dims();
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
scale_tensor_dim.size(), scale_tensor_dim.size() == 1 || scale_tensor_dim.size() == 0,
1, true,
phi::errors::InvalidArgument( phi::errors::InvalidArgument(
"Scale's dimension size must be 1, but got dimension = %d .", "Scale's dimension size must be 1 or 0, but got dimension = %d .",
scale_tensor_dim.size())); scale_tensor_dim.size()));
PADDLE_ENFORCE_EQ(scale_tensor_dim[0], if (scale_tensor_dim.size() == 1) {
1, PADDLE_ENFORCE_EQ(scale_tensor_dim[0],
phi::errors::InvalidArgument( 1,
"Scale's shape must be 1, but got shape = %d .", phi::errors::InvalidArgument(
scale_tensor_dim[0])); "Scale's shape must be 1, but got shape = %d .",
scale_tensor_dim[0]));
}
out_w_tmp = -1; out_w_tmp = -1;
} else { } else {
if (scale.size() > 0) { if (scale.size() > 0) {
...@@ -1550,19 +1552,25 @@ static void Interpolate2DInferShapeCheck( ...@@ -1550,19 +1552,25 @@ static void Interpolate2DInferShapeCheck(
} }
int out_h_tmp, out_w_tmp; int out_h_tmp, out_w_tmp;
if (scale_tensor) { if (scale_tensor) {
auto scale_tensor_dim = scale_tensor.dims(); auto scale_tensor_dim = scale_tensor.dims();
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
scale_tensor_dim.size(), scale_tensor_dim.size() == 1 || scale_tensor_dim.size() == 0,
1, true,
phi::errors::InvalidArgument( phi::errors::InvalidArgument(
"Scale's dimension size must be 1, but got dimension = %d .", "Scale's dimension size must be 1 or 0, but got dimension = %d .",
scale_tensor_dim.size())); scale_tensor_dim.size()));
PADDLE_ENFORCE_EQ(scale_tensor_dim[0] == 2 || scale_tensor_dim[0] == 1,
true, if (scale_tensor_dim.size() == 1) {
phi::errors::InvalidArgument( PADDLE_ENFORCE_EQ(
"Scale's shape must be 2 or 1, but got shape = %d .", scale_tensor_dim[0] == 2 || scale_tensor_dim[0] == 1,
scale_tensor_dim[0])); true,
phi::errors::InvalidArgument(
"Scale's shape must be 2 or 1, but got shape = %d .",
scale_tensor_dim[0]));
}
out_h_tmp = -1; out_h_tmp = -1;
out_w_tmp = -1; out_w_tmp = -1;
} else { } else {
...@@ -1695,10 +1703,10 @@ static void Interpolate3DInferShapeCheck( ...@@ -1695,10 +1703,10 @@ static void Interpolate3DInferShapeCheck(
if (scale_tensor) { if (scale_tensor) {
auto scale_tensor_dim = scale_tensor.dims(); auto scale_tensor_dim = scale_tensor.dims();
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
scale_tensor_dim.size(), scale_tensor_dim.size() == 1 || scale_tensor_dim.size() == 0,
1, true,
phi::errors::InvalidArgument( phi::errors::InvalidArgument(
"Scale's dimension size must be 1, but got size = %d .", "Scale's dimension size must be 1 or 0, but got size = %d .",
scale_tensor_dim.size())); scale_tensor_dim.size()));
PADDLE_ENFORCE_EQ(scale_tensor_dim[0] == 3 || scale_tensor_dim[0] == 1, PADDLE_ENFORCE_EQ(scale_tensor_dim[0] == 3 || scale_tensor_dim[0] == 1,
true, true,
......
...@@ -85,12 +85,14 @@ inline std::vector<int> get_new_shape( ...@@ -85,12 +85,14 @@ inline std::vector<int> get_new_shape(
std::vector<int> vec_new_shape; std::vector<int> vec_new_shape;
for (size_t i = 0; i < list_new_shape_tensor.size(); ++i) { for (size_t i = 0; i < list_new_shape_tensor.size(); ++i) {
auto tensor = list_new_shape_tensor[i]; auto tensor = list_new_shape_tensor[i];
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(tensor->dims() == phi::make_ddim({1}) ||
tensor->dims(), tensor->dims() == phi::make_ddim({}),
phi::make_ddim({1}), true,
errors::InvalidArgument("The shape of dimension tensor should be [1]," errors::InvalidArgument(
"but received d%.", "The shape of dimension tensor should be [1] or [],"
tensor->dims())); "but received d%.",
tensor->dims()));
#ifdef PADDLE_WITH_XPU #ifdef PADDLE_WITH_XPU
if (tensor->place().GetType() == phi::AllocationType::XPU) { if (tensor->place().GetType() == phi::AllocationType::XPU) {
DenseTensor temp; DenseTensor temp;
......
...@@ -816,5 +816,80 @@ class TestBilinearInterpOpZoomInForFloat16(unittest.TestCase): ...@@ -816,5 +816,80 @@ class TestBilinearInterpOpZoomInForFloat16(unittest.TestCase):
np.testing.assert_allclose(x_g_np_1, x_g_np_2, atol=1e-2, rtol=1e-2) np.testing.assert_allclose(x_g_np_1, x_g_np_2, atol=1e-2, rtol=1e-2)
class TestBilinearInterpOpAPI_0DTensorScale(unittest.TestCase):
def test_case(self):
import paddle
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
else:
place = core.CPUPlace()
with fluid.dygraph.guard(place):
input_data = np.random.random((2, 3, 6, 6)).astype("float32")
input_x = paddle.to_tensor(input_data)
expect_res = bilinear_interp_np(
input_data, out_h=12, out_w=12, align_corners=False
)
scale_0d = paddle.full([], 2)
out = interpolate(
x=input_x,
scale_factor=scale_0d,
mode="bilinear",
align_corners=False,
)
np.testing.assert_allclose(out.numpy(), expect_res, rtol=1e-05)
class TestBilinearInterpOpAPI_0DTensorScale2(unittest.TestCase):
def test_case(self):
import paddle
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
else:
place = core.CPUPlace()
with fluid.dygraph.guard(place):
input_data = np.random.random((2, 3, 6, 6)).astype("float32")
input_x = paddle.to_tensor(input_data)
expect_res = bilinear_interp_np(
input_data, out_h=12, out_w=12, align_corners=False
)
scale_0d = [paddle.full([], 2), paddle.full([], 2)]
out = interpolate(
x=input_x,
scale_factor=scale_0d,
mode="bilinear",
align_corners=False,
)
np.testing.assert_allclose(out.numpy(), expect_res, rtol=1e-05)
class TestBilinearInterpOpAPI_0DTensorOutSize(unittest.TestCase):
def test_case(self):
import paddle
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
else:
place = core.CPUPlace()
with fluid.dygraph.guard(place):
input_data = np.random.random((2, 3, 6, 6)).astype("float32")
input_x = paddle.to_tensor(input_data)
expect_res = bilinear_interp_np(
input_data, out_h=12, out_w=12, align_corners=False
)
output_size = [
paddle.full([], 12, dtype="int32"),
paddle.full([], 12, dtype="int32"),
]
out = interpolate(
x=input_x,
size=output_size,
mode="bilinear",
align_corners=False,
)
np.testing.assert_allclose(out.numpy(), expect_res, rtol=1e-05)
if __name__ == "__main__": if __name__ == "__main__":
unittest.main() unittest.main()
...@@ -1388,6 +1388,72 @@ class TestSundryAPI(unittest.TestCase): ...@@ -1388,6 +1388,72 @@ class TestSundryAPI(unittest.TestCase):
self.assertEqual(x1.grad.numpy(), 0.5) self.assertEqual(x1.grad.numpy(), 0.5)
self.assertEqual(x2.grad.numpy(), 0) self.assertEqual(x2.grad.numpy(), 0)
def test_interpolate(self):
from paddle.nn.functional import interpolate
input_x = paddle.rand([2, 3, 6, 6])
input_x.stop_gradient = False
origin_result = interpolate(
x=input_x, size=[12, 12], mode="bilinear", align_corners=False
)
output_size = [
paddle.full([], 12, dtype="int32"),
paddle.full([], 12, dtype="int32"),
]
out1 = interpolate(
x=input_x, size=output_size, mode="bilinear", align_corners=False
)
out1.backward()
self.assertEqual(out1.shape, [2, 3, 12, 12])
self.assertEqual(input_x.grad.shape, [2, 3, 6, 6])
scale_1 = [paddle.full([], 2), paddle.full([], 2)]
out2 = interpolate(
x=input_x,
scale_factor=scale_1,
mode="bilinear",
align_corners=False,
)
out2.backward()
self.assertEqual(out2.shape, [2, 3, 12, 12])
self.assertEqual(input_x.grad.shape, [2, 3, 6, 6])
scale_2 = paddle.full([], 2)
out3 = interpolate(
x=input_x,
scale_factor=scale_2,
mode="bilinear",
align_corners=False,
)
out3.backward()
# for coverage
scale_3 = paddle.full([1], 2)
input_3d = paddle.rand([2, 3, 6])
out4 = interpolate(
x=input_3d,
scale_factor=scale_3,
mode="LINEAR",
align_corners=False,
data_format="NCW",
)
self.assertEqual(out3.shape, [2, 3, 12, 12])
self.assertEqual(input_x.grad.shape, [2, 3, 6, 6])
np.testing.assert_allclose(
origin_result.numpy(), out1.numpy(), rtol=1e-05
)
np.testing.assert_allclose(
origin_result.numpy(), out2.numpy(), rtol=1e-05
)
np.testing.assert_allclose(
origin_result.numpy(), out3.numpy(), rtol=1e-05
)
def test_maseked_select(self): def test_maseked_select(self):
x = paddle.rand([]) x = paddle.rand([])
x.stop_gradient = False x.stop_gradient = False
...@@ -2223,6 +2289,41 @@ class TestSundryAPIStatic(unittest.TestCase): ...@@ -2223,6 +2289,41 @@ class TestSundryAPIStatic(unittest.TestCase):
self.assertEqual(res[0].shape, ()) self.assertEqual(res[0].shape, ())
@prog_scope()
def test_interpolate(self):
from paddle.nn.functional import interpolate
input_x = paddle.rand([2, 3, 6, 6])
input_x.stop_gradient = False
output_size = [
paddle.full([], 12, dtype="int32"),
paddle.full([], 12, dtype="int32"),
]
out1 = interpolate(
x=input_x, size=output_size, mode="bilinear", align_corners=False
)
paddle.static.append_backward(out1.sum())
prog = paddle.static.default_main_program()
res1 = self.exe.run(prog, feed={}, fetch_list=[out1, input_x.grad_name])
scale_1 = paddle.full([], 2)
out2 = interpolate(
x=input_x,
scale_factor=scale_1,
mode="bilinear",
align_corners=False,
)
paddle.static.append_backward(out2.sum())
prog = paddle.static.default_main_program()
res2 = self.exe.run(prog, feed={}, fetch_list=[out2, input_x.grad_name])
self.assertEqual(res1[0].shape, (2, 3, 12, 12))
self.assertEqual(res1[1].shape, (2, 3, 6, 6))
self.assertEqual(res2[0].shape, (2, 3, 12, 12))
self.assertEqual(res2[1].shape, (2, 3, 6, 6))
@prog_scope() @prog_scope()
def test_maseked_select(self): def test_maseked_select(self):
x = paddle.rand([]) x = paddle.rand([])
......
...@@ -883,6 +883,61 @@ class TestSundryAPI(unittest.TestCase): ...@@ -883,6 +883,61 @@ class TestSundryAPI(unittest.TestCase):
y = paddle.full([], 0.6) y = paddle.full([], 0.6)
self.assertFalse(paddle.allclose(x, y)) self.assertFalse(paddle.allclose(x, y))
def test_interpolate(self):
from paddle.nn.functional import interpolate
input_x = paddle.rand([2, 3, 6, 6])
input_x.stop_gradient = False
origin_result = interpolate(
x=input_x, size=[12, 12], mode="bilinear", align_corners=False
)
output_size = [
paddle.full([], 12, dtype="int32"),
paddle.full([], 12, dtype="int32"),
]
out1 = interpolate(
x=input_x, size=output_size, mode="bilinear", align_corners=False
)
out1.backward()
self.assertEqual(out1.shape, [2, 3, 12, 12])
self.assertEqual(input_x.grad.shape, [2, 3, 6, 6])
scale_1 = [paddle.full([], 2), paddle.full([], 2)]
out2 = interpolate(
x=input_x,
scale_factor=scale_1,
mode="bilinear",
align_corners=False,
)
out2.backward()
self.assertEqual(out2.shape, [2, 3, 12, 12])
self.assertEqual(input_x.grad.shape, [2, 3, 6, 6])
scale_2 = paddle.full([], 2)
out3 = interpolate(
x=input_x,
scale_factor=scale_2,
mode="bilinear",
align_corners=False,
)
out3.backward()
self.assertEqual(out3.shape, [2, 3, 12, 12])
self.assertEqual(input_x.grad.shape, [2, 3, 6, 6])
np.testing.assert_allclose(
origin_result.numpy(), out1.numpy(), rtol=1e-05
)
np.testing.assert_allclose(
origin_result.numpy(), out2.numpy(), rtol=1e-05
)
np.testing.assert_allclose(
origin_result.numpy(), out3.numpy(), rtol=1e-05
)
def test_equalall(self): def test_equalall(self):
x = paddle.full([], 0.5) x = paddle.full([], 0.5)
y = paddle.full([], 0.6) y = paddle.full([], 0.6)
......
...@@ -12,6 +12,8 @@ ...@@ -12,6 +12,8 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import numpy
import paddle import paddle
from paddle import _C_ops, _legacy_C_ops from paddle import _C_ops, _legacy_C_ops
from paddle.fluid.layer_helper import LayerHelper from paddle.fluid.layer_helper import LayerHelper
...@@ -102,6 +104,10 @@ def unfold(x, kernel_sizes, strides=1, paddings=0, dilations=1, name=None): ...@@ -102,6 +104,10 @@ def unfold(x, kernel_sizes, strides=1, paddings=0, dilations=1, name=None):
y = F.unfold(x, [3, 3], 1, 1, 1) y = F.unfold(x, [3, 3], 1, 1, 1)
""" """
helper = LayerHelper("unfold", **locals())
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'unfold')
assert len(x.shape) == 4, "input should be the format of [N, C, H, W]" assert len(x.shape) == 4, "input should be the format of [N, C, H, W]"
if isinstance(kernel_sizes, int): if isinstance(kernel_sizes, int):
...@@ -145,9 +151,6 @@ def unfold(x, kernel_sizes, strides=1, paddings=0, dilations=1, name=None): ...@@ -145,9 +151,6 @@ def unfold(x, kernel_sizes, strides=1, paddings=0, dilations=1, name=None):
if in_dygraph_mode(): if in_dygraph_mode():
return _C_ops.unfold(x, kernel_sizes, strides, paddings, dilations) return _C_ops.unfold(x, kernel_sizes, strides, paddings, dilations)
helper = LayerHelper("unfold", **locals())
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'unfold')
out = helper.create_variable_for_type_inference(dtype=x.dtype) out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op( helper.append_op(
type="unfold", type="unfold",
...@@ -432,9 +435,12 @@ def interpolate( ...@@ -432,9 +435,12 @@ def interpolate(
): ):
if len(size) == 0: if len(size) == 0:
raise ValueError("output size can not be empty") raise ValueError("output size can not be empty")
if size is None:
raise ValueError("output size can not be None in AREA mode")
if len(x.shape) == 3: if len(x.shape) == 3:
return paddle.nn.functional.adaptive_avg_pool1d(x, size) return paddle.nn.functional.adaptive_avg_pool1d(x, size)
elif len(x.shape) == 4: elif len(x.shape) == 4:
print("size :", size)
return paddle.nn.functional.adaptive_avg_pool2d(x, size) return paddle.nn.functional.adaptive_avg_pool2d(x, size)
elif len(x.shape) == 5: elif len(x.shape) == 5:
return paddle.nn.functional.adaptive_avg_pool3d(x, size) return paddle.nn.functional.adaptive_avg_pool3d(x, size)
...@@ -494,9 +500,10 @@ def interpolate( ...@@ -494,9 +500,10 @@ def interpolate(
out_shape = list(out_shape.numpy()) out_shape = list(out_shape.numpy())
else: else:
out_shape = list(out_shape) out_shape = list(out_shape)
for i, dim in enumerate(out_shape): for i, dim in enumerate(out_shape):
if isinstance(dim, Variable): if isinstance(dim, Variable):
out_shape[i] = dim.numpy()[0] out_shape[i] = dim.numpy().item()
if not (_is_list_or_turple_(out_shape)): if not (_is_list_or_turple_(out_shape)):
raise TypeError("size should be a list or tuple or Variable.") raise TypeError("size should be a list or tuple or Variable.")
# Validate the shape # Validate the shape
...@@ -568,11 +575,18 @@ def interpolate( ...@@ -568,11 +575,18 @@ def interpolate(
else: else:
if in_dynamic_mode() and isinstance(scale, Variable): if in_dynamic_mode() and isinstance(scale, Variable):
scale = list(scale.numpy()) if scale.shape == []:
scale = float(scale)
else:
scale = list(scale.numpy())
if isinstance(scale, Variable): if isinstance(scale, Variable):
scale.stop_gradient = True scale.stop_gradient = True
inputs["Scale"] = scale inputs["Scale"] = scale
elif isinstance(scale, float) or isinstance(scale, int): elif (
isinstance(scale, float)
or isinstance(scale, int)
or isinstance(scale, numpy.ndarray)
):
if scale <= 0: if scale <= 0:
raise ValueError("Attr(scale) should be greater than zero.") raise ValueError("Attr(scale) should be greater than zero.")
scale_list = [] scale_list = []
...@@ -2253,6 +2267,11 @@ def fold( ...@@ -2253,6 +2267,11 @@ def fold(
# y.shape = [2,3,4,5] # y.shape = [2,3,4,5]
""" """
helper = LayerHelper("fold", **locals())
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'fold')
assert len(x.shape) == 3, "input should be the format of [N, C, L]" assert len(x.shape) == 3, "input should be the format of [N, C, L]"
def _is_list_or_turple_(data): def _is_list_or_turple_(data):
...@@ -2322,9 +2341,6 @@ def fold( ...@@ -2322,9 +2341,6 @@ def fold(
dilations, dilations,
) )
else: else:
helper = LayerHelper("fold", **locals())
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'fold')
out = helper.create_variable_for_type_inference(dtype=x.dtype) out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op( helper.append_op(
type="fold", type="fold",
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册