未验证 提交 e1c8d6bc 编写于 作者: A Aurelius84 提交者: GitHub

Fix wrong out dtype inferred from helper.input_dtype (#28715)

* Fix wrong out dtype from helper.input_dtype

* add unittest

* remove disable_static in op_test

* fix param name typo
上级 08b62f49
......@@ -6680,8 +6680,8 @@ def pad(x, paddings, pad_value=0., name=None):
check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64', 'int32', 'int64'], "pad")
helper = LayerHelper('pad', input=x, **locals())
dtype = helper.input_dtype()
helper = LayerHelper('pad', **locals())
dtype = helper.input_dtype(input_param_name='x')
out = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type='pad',
......@@ -6775,8 +6775,8 @@ def pad_constant_like(x, y, pad_value=0., name=None):
check_variable_and_dtype(y, 'y', ['float32', 'float64', 'int32', 'int64'],
"pad_constant_like")
helper = LayerHelper('pad_constant_like', input=x, **locals())
dtype = helper.input_dtype()
helper = LayerHelper('pad_constant_like', **locals())
dtype = helper.input_dtype(input_param_name='y')
out = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type='pad_constant_like',
......@@ -8887,7 +8887,6 @@ def mean_iou(input, label, num_classes):
check_variable_and_dtype(input, 'Predictions', ['int32', 'int64'],
'mean_iou')
check_variable_and_dtype(label, 'Labels', ['int32', 'int64'], 'mean_iou')
dtype = helper.input_dtype()
out_mean_iou = helper.create_variable_for_type_inference(dtype='float32')
out_wrong = helper.create_variable_for_type_inference(dtype='int32')
out_correct = helper.create_variable_for_type_inference(dtype='int32')
......
......@@ -759,8 +759,8 @@ def sequence_expand(x, y, ref_level=-1, name=None):
"sequence layer is not supported in dygraph mode yet.")
check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'],
'sequence_expand')
helper = LayerHelper('sequence_expand', input=x, **locals())
dtype = helper.input_dtype()
helper = LayerHelper('sequence_expand', **locals())
dtype = helper.input_dtype(input_param_name='x')
tmp = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type='sequence_expand',
......@@ -880,8 +880,8 @@ def sequence_expand_as(x, y, name=None):
check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'],
'sequence_expand_as')
check_type(y, 'y', Variable, 'sequence_expand_as')
helper = LayerHelper('sequence_expand_as', input=x, **locals())
dtype = helper.input_dtype()
helper = LayerHelper('sequence_expand_as', **locals())
dtype = helper.input_dtype(input_param_name='x')
tmp = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type='sequence_expand_as',
......@@ -980,13 +980,13 @@ def sequence_pad(x, pad_value, maxlen=None, name=None):
assert not in_dygraph_mode(), (
"sequence layer is not supported in dygraph mode yet.")
helper = LayerHelper('sequence_pad', input=x, **locals())
helper = LayerHelper('sequence_pad', **locals())
check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'],
'fluid.layers.sequence_pad')
check_variable_and_dtype(pad_value, 'pad_value',
['float32', 'float64', 'int32', 'int64'],
'fluid.layers.sequence_pad')
dtype = helper.input_dtype()
dtype = helper.input_dtype(input_param_name='x')
out = helper.create_variable_for_type_inference(dtype)
length = helper.create_variable_for_type_inference(VarDesc.VarType.INT64)
......@@ -1062,12 +1062,12 @@ def sequence_unpad(x, length, name=None):
assert not in_dygraph_mode(), (
"sequence layer is not supported in dygraph mode yet.")
helper = LayerHelper('sequence_unpad', input=x, **locals())
helper = LayerHelper('sequence_unpad', **locals())
check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'],
'fluid.layers.sequence_unpad')
check_variable_and_dtype(length, 'length', ['int64'],
'fluid.layers.sequence_unpad')
dtype = helper.input_dtype()
dtype = helper.input_dtype(input_param_name='x')
out = helper.create_variable_for_type_inference(dtype)
length.stop_gradient = True
......
......@@ -39,6 +39,48 @@ from white_list import op_accuracy_white_list, check_shape_white_list, compile_v
from white_list import op_threshold_white_list, no_grad_set_white_list
def check_out_dtype(api_fn, in_specs, expect_dtypes, target_index=0, **configs):
"""
Determines whether dtype of output tensor is as expected.
Args:
api_fn(callable): paddle api function
in_specs(list[tuple]): list of shape and dtype information for constructing input tensor of api_fn, such as [(shape, dtype), (shape, dtype)].
expected_dtype(list[str]): expected dtype of output tensor.
target_index(int): indicate which one from in_specs to infer the dtype of output.
config(dict): other arguments of paddle api function
Example:
check_out_dtype(fluid.layers.pad_constant_like, [([2,3,2,3], 'float64'), ([1, 3, 1,3], )], ['float32', 'float64', 'int64'], target_index=1, pad_value=0.)
"""
paddle.enable_static()
for i, expect_dtype in enumerate(expect_dtypes):
with paddle.static.program_guard(paddle.static.Program()):
input_t = []
for index, spec in enumerate(in_specs):
if len(spec) == 1:
shape = spec[0]
dtype = expect_dtype if target_index == index else 'float32'
elif len(spec) == 2:
shape, dtype = spec
else:
raise ValueError(
"Value of in_specs[{}] should contains two elements: [shape, dtype]".
format(index))
input_t.append(
paddle.static.data(
name='data_%s' % index, shape=shape, dtype=dtype))
out = api_fn(*input_t, **configs)
out_dtype = fluid.data_feeder.convert_dtype(out.dtype)
if out_dtype != expect_dtype:
raise ValueError(
"Expected out.dtype is {}, but got {} from {}.".format(
expect_dtype, out_dtype, api_fn.__name__))
def _set_use_system_allocator(value=None):
USE_SYSTEM_ALLOCATOR_FLAG = "FLAGS_use_system_allocator"
old_value = core.globals()[USE_SYSTEM_ALLOCATOR_FLAG]
......
......@@ -14,7 +14,7 @@
import numpy as np
import unittest
from op_test import OpTest
from op_test import OpTest, check_out_dtype
import paddle.fluid.core as core
from paddle.fluid import compiler, Program, program_guard
import paddle
......@@ -106,5 +106,16 @@ class TestPool1D_API(unittest.TestCase):
self.check_adaptive_max_static_results(place)
class TestOutDtype(unittest.TestCase):
def test_max_pool(self):
api_fn = F.adaptive_max_pool1d
shape = [1, 3, 32]
check_out_dtype(
api_fn,
in_specs=[(shape, )],
expect_dtypes=['float32', 'float64'],
output_size=16)
if __name__ == '__main__':
unittest.main()
......@@ -19,10 +19,11 @@ import unittest
import numpy as np
import paddle.fluid.core as core
from op_test import OpTest
from op_test import OpTest, check_out_dtype
import paddle
import paddle.fluid as fluid
from paddle.fluid import Program, program_guard
import paddle.nn.functional as F
def adaptive_start_index(index, input_size, output_size):
......@@ -270,5 +271,16 @@ class TestAdaptiveMaxPool2DClassAPI(unittest.TestCase):
assert np.allclose(out_5.numpy(), self.res_5_np)
class TestOutDtype(unittest.TestCase):
def test_max_pool(self):
api_fn = F.adaptive_max_pool2d
shape = [1, 3, 32, 32]
check_out_dtype(
api_fn,
in_specs=[(shape, )],
expect_dtypes=['float32', 'float64'],
output_size=16)
if __name__ == '__main__':
unittest.main()
......@@ -19,10 +19,11 @@ import unittest
import numpy as np
import paddle.fluid.core as core
from op_test import OpTest
from op_test import OpTest, check_out_dtype
import paddle
import paddle.fluid as fluid
from paddle.fluid import Program, program_guard
import paddle.nn.functional as F
def adaptive_start_index(index, input_size, output_size):
......@@ -291,5 +292,16 @@ class TestAdaptiveMaxPool3DClassAPI(unittest.TestCase):
assert np.allclose(out_5.numpy(), self.res_5_np)
class TestOutDtype(unittest.TestCase):
def test_max_pool(self):
api_fn = F.adaptive_max_pool3d
shape = [1, 3, 32, 32, 32]
check_out_dtype(
api_fn,
in_specs=[(shape, )],
expect_dtypes=['float32', 'float64'],
output_size=16)
if __name__ == '__main__':
unittest.main()
......@@ -16,12 +16,13 @@ from __future__ import print_function
import unittest
import numpy as np
from op_test import OpTest, skip_check_grad_ci
from op_test import OpTest, skip_check_grad_ci, check_out_dtype
import paddle.fluid.core as core
from paddle.fluid.op import Operator
import paddle.compat as cpt
import paddle.fluid as fluid
from paddle.fluid import Program, program_guard
import paddle.nn.functional as F
class TestLookupTableOp(OpTest):
......@@ -315,5 +316,15 @@ class TestLookupTableWithTensorIdsWIsSelectedRowsInt8(
assert (row == result_array[idx]).all()
class TestOutDtype(unittest.TestCase):
def test_dtype(self):
api_fn = F.embedding
check_out_dtype(
api_fn,
in_specs=[([10, 16], 'int64'), ([100, 64], )],
expect_dtypes=['float32', 'float64'],
target_index=1)
if __name__ == "__main__":
unittest.main()
......@@ -16,7 +16,7 @@ from __future__ import print_function
import unittest
import numpy as np
from op_test import OpTest, skip_check_grad_ci
from op_test import OpTest, skip_check_grad_ci, check_out_dtype
import paddle
import paddle.fluid.core as core
......@@ -85,3 +85,17 @@ class ApiMaxTest(unittest.TestCase):
np_z = z.numpy()
z_expected = np.array(np.max(np_x, axis=0))
self.assertEqual((np_z == z_expected).all(), True)
class TestOutDtype(unittest.TestCase):
def test_max(self):
api_fn = paddle.max
shape = [10, 16]
check_out_dtype(
api_fn,
in_specs=[(shape, )],
expect_dtypes=['float32', 'float64', 'int32', 'int64'])
if __name__ == '__main__':
unittest.main()
......@@ -16,7 +16,7 @@ from __future__ import print_function
import unittest
import numpy as np
from op_test import OpTest, skip_check_grad_ci
from op_test import OpTest, skip_check_grad_ci, check_out_dtype
import paddle
import paddle.fluid.core as core
......@@ -85,3 +85,17 @@ class ApiMinTest(unittest.TestCase):
np_z = z.numpy()
z_expected = np.array(np.min(np_x, axis=0))
self.assertEqual((np_z == z_expected).all(), True)
class TestOutDtype(unittest.TestCase):
def test_min(self):
api_fn = paddle.min
shape = [10, 16]
check_out_dtype(
api_fn,
in_specs=[(shape, )],
expect_dtypes=['float32', 'float64', 'int32', 'int64'])
if __name__ == '__main__':
unittest.main()
......@@ -16,7 +16,7 @@ from __future__ import print_function
import unittest
import numpy as np
from op_test import OpTest
from op_test import OpTest, check_out_dtype
import paddle.fluid as fluid
from paddle.fluid import Program, program_guard
......@@ -88,5 +88,16 @@ class TestPadConstantLikeOpError(unittest.TestCase):
self.assertRaises(TypeError, test_Variable_y)
class TestOutDtype(unittest.TestCase):
def test_dtype(self):
api_fn = fluid.layers.pad_constant_like
check_out_dtype(
api_fn,
in_specs=[([2, 3, 2, 3], 'float64'), ([1, 3, 1, 3], )],
expect_dtypes=['float32', 'float64', 'int32', 'int64'],
target_index=1,
pad_value=0.)
if __name__ == '__main__':
unittest.main()
......@@ -310,7 +310,7 @@ def conv1d(x,
check_variable_and_dtype(x, 'input', ['float16', 'float32', 'float64'],
'conv2d')
helper = LayerHelper(l_type, **locals())
dtype = helper.input_dtype()
dtype = helper.input_dtype(input_param_name='x')
out = helper.create_variable_for_type_inference(dtype)
outputs = {"Output": [out]}
helper.append_op(
......@@ -528,7 +528,7 @@ def conv2d(x,
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'],
'conv2d')
helper = LayerHelper(l_type, **locals())
dtype = helper.input_dtype()
dtype = helper.input_dtype(input_param_name='x')
pre_bias = helper.create_variable_for_type_inference(dtype)
outputs = {"Output": [pre_bias]}
helper.append_op(
......@@ -789,7 +789,7 @@ def conv1d_transpose(x,
check_variable_and_dtype(x, 'input', ['float16', 'float32', 'float64'],
'conv2d_transpose')
helper = LayerHelper(op_type, **locals())
dtype = helper.input_dtype()
dtype = helper.input_dtype(input_param_name='x')
out = helper.create_variable_for_type_inference(dtype)
outputs = {"Output": [out]}
helper.append_op(
......@@ -1224,7 +1224,7 @@ def conv3d(x,
"data_format": data_format
}
helper = LayerHelper(op_type, **locals())
dtype = helper.input_dtype()
dtype = helper.input_dtype(input_param_name='x')
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'],
'conv3d')
......
......@@ -198,7 +198,7 @@ def embedding(x, weight, padding_idx=None, sparse=False, name=None):
'remote_prefetch', False, 'padding_idx', padding_idx)
else:
helper = LayerHelper('embedding', **locals())
dtype = helper.input_dtype()
dtype = helper.input_dtype(input_param_name='weight')
check_variable_and_dtype(x, 'input', ['int32', 'int64'], 'embedding')
......
......@@ -235,7 +235,7 @@ def avg_pool1d(x,
op_type = 'pool2d'
helper = LayerHelper(op_type, **locals())
dtype = helper.input_dtype()
dtype = helper.input_dtype(input_param_name='x')
pool_out = helper.create_variable_for_type_inference(dtype)
helper.append_op(
......@@ -346,7 +346,7 @@ def avg_pool2d(x,
op_type = 'pool2d'
helper = LayerHelper(op_type, **locals())
dtype = helper.input_dtype()
dtype = helper.input_dtype(input_param_name='x')
pool_out = helper.create_variable_for_type_inference(dtype)
helper.append_op(
......@@ -461,7 +461,7 @@ def avg_pool3d(x,
op_type = "pool3d"
helper = LayerHelper(op_type, **locals())
dtype = helper.input_dtype()
dtype = helper.input_dtype(input_param_name='x')
pool_out = helper.create_variable_for_type_inference(dtype)
outputs = {"Out": pool_out}
......@@ -581,7 +581,7 @@ def max_pool1d(x,
op_type = 'max_pool2d_with_index' if return_mask else "pool2d"
helper = LayerHelper(op_type, **locals())
dtype = helper.input_dtype()
dtype = helper.input_dtype(input_param_name='x')
pool_out = helper.create_variable_for_type_inference(dtype)
mask = helper.create_variable_for_type_inference(dtype)
outputs = {"Out": pool_out, "Mask": mask}
......@@ -714,7 +714,7 @@ def max_pool2d(x,
op_type = 'max_pool2d_with_index' if return_mask else "pool2d"
helper = LayerHelper(op_type, **locals())
dtype = helper.input_dtype()
dtype = helper.input_dtype(input_param_name='x')
pool_out = helper.create_variable_for_type_inference(dtype)
mask = helper.create_variable_for_type_inference(dtype)
outputs = {"Out": pool_out, "Mask": mask}
......@@ -840,7 +840,7 @@ def max_pool3d(x,
op_type = "max_pool3d_with_index" if return_mask else "pool3d"
helper = LayerHelper(op_type, **locals())
dtype = helper.input_dtype()
dtype = helper.input_dtype(input_param_name='x')
pool_out = helper.create_variable_for_type_inference(dtype)
mask = helper.create_variable_for_type_inference(dtype)
outputs = {"Out": pool_out, "Mask": mask}
......@@ -921,7 +921,7 @@ def adaptive_avg_pool1d(x, output_size, name=None):
return squeeze(pool_out, [2])
helper = LayerHelper(l_type, **locals())
dtype = helper.input_dtype()
dtype = helper.input_dtype(input_param_name='x')
pool_out = helper.create_variable_for_type_inference(dtype)
outputs = {"Out": pool_out}
......@@ -1020,7 +1020,7 @@ def adaptive_avg_pool2d(x, output_size, data_format='NCHW', name=None):
l_type = 'pool2d'
helper = LayerHelper(l_type, **locals())
dtype = helper.input_dtype()
dtype = helper.input_dtype(input_param_name='x')
pool_out = helper.create_variable_for_type_inference(dtype)
outputs = {"Out": pool_out}
......@@ -1126,7 +1126,7 @@ def adaptive_avg_pool3d(x, output_size, data_format='NCDHW', name=None):
l_type = 'pool3d'
helper = LayerHelper(l_type, **locals())
dtype = helper.input_dtype()
dtype = helper.input_dtype(input_param_name='x')
pool_out = helper.create_variable_for_type_inference(dtype)
outputs = {"Out": pool_out}
......@@ -1208,7 +1208,7 @@ def adaptive_max_pool1d(x, output_size, return_mask=False, name=None):
pool_out[1], [2])) if return_mask else squeeze(pool_out[0], [2])
helper = LayerHelper(l_type, **locals())
dtype = helper.input_dtype()
dtype = helper.input_dtype(input_param_name='x')
pool_out = helper.create_variable_for_type_inference(dtype)
mask = helper.create_variable_for_type_inference(dtype)
......@@ -1296,7 +1296,7 @@ def adaptive_max_pool2d(x, output_size, return_mask=False, name=None):
l_type = 'max_pool2d_with_index'
helper = LayerHelper(l_type, **locals())
dtype = helper.input_dtype()
dtype = helper.input_dtype(input_param_name='x')
pool_out = helper.create_variable_for_type_inference(dtype)
mask = helper.create_variable_for_type_inference(dtype)
......@@ -1389,7 +1389,7 @@ def adaptive_max_pool3d(x, output_size, return_mask=False, name=None):
l_type = 'max_pool3d_with_index'
helper = LayerHelper(l_type, **locals())
dtype = helper.input_dtype()
dtype = helper.input_dtype(input_param_name='x')
pool_out = helper.create_variable_for_type_inference(dtype)
mask = helper.create_variable_for_type_inference(dtype)
......
......@@ -1176,7 +1176,7 @@ def max(x, axis=None, keepdim=False, name=None):
x, 'x', ['float32', 'float64', 'int32', 'int64'], 'max')
out = helper.create_variable_for_type_inference(
dtype=helper.input_dtype())
dtype=x.dtype)
helper.append_op(
type='reduce_max',
inputs={'X': x},
......@@ -1267,7 +1267,7 @@ def min(x, axis=None, keepdim=False, name=None):
x, 'x', ['float32', 'float64', 'int32', 'int64'], 'min')
out = helper.create_variable_for_type_inference(
dtype=helper.input_dtype())
dtype=x.dtype)
helper.append_op(
type='reduce_min',
inputs={'X': x},
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册