未验证 提交 822e5b36 编写于 作者: L Leo Chen 提交者: GitHub

Support int16 for Tensor (#22423)

* add int16 support, test=develop

* add test, test=develop

* fix typo, test=develop

* fix dtype error in slice, test=develop
上级 e1b0d7cb
...@@ -106,9 +106,10 @@ DECLARE_VALID_DTYPE_TO_PY_ARRAY(float); ...@@ -106,9 +106,10 @@ DECLARE_VALID_DTYPE_TO_PY_ARRAY(float);
DECLARE_VALID_DTYPE_TO_PY_ARRAY(double); DECLARE_VALID_DTYPE_TO_PY_ARRAY(double);
DECLARE_VALID_DTYPE_TO_PY_ARRAY(bool); DECLARE_VALID_DTYPE_TO_PY_ARRAY(bool);
DECLARE_VALID_DTYPE_TO_PY_ARRAY(int8_t); DECLARE_VALID_DTYPE_TO_PY_ARRAY(int8_t);
DECLARE_VALID_DTYPE_TO_PY_ARRAY(uint8_t); DECLARE_VALID_DTYPE_TO_PY_ARRAY(int16_t);
DECLARE_VALID_DTYPE_TO_PY_ARRAY(int); DECLARE_VALID_DTYPE_TO_PY_ARRAY(int);
DECLARE_VALID_DTYPE_TO_PY_ARRAY(int64_t); DECLARE_VALID_DTYPE_TO_PY_ARRAY(int64_t);
DECLARE_VALID_DTYPE_TO_PY_ARRAY(uint8_t);
inline std::string TensorDTypeToPyDTypeStr( inline std::string TensorDTypeToPyDTypeStr(
framework::proto::VarType::Type type) { framework::proto::VarType::Type type) {
...@@ -218,13 +219,16 @@ void SetTensorFromPyArray(framework::Tensor *self, const py::object &obj, ...@@ -218,13 +219,16 @@ void SetTensorFromPyArray(framework::Tensor *self, const py::object &obj,
SetTensorFromPyArrayT<double, P>(self, array, place, zero_copy); SetTensorFromPyArrayT<double, P>(self, array, place, zero_copy);
} else if (py::isinstance<py::array_t<int8_t>>(array)) { } else if (py::isinstance<py::array_t<int8_t>>(array)) {
SetTensorFromPyArrayT<int8_t, P>(self, array, place, zero_copy); SetTensorFromPyArrayT<int8_t, P>(self, array, place, zero_copy);
} else if (py::isinstance<py::array_t<int16_t>>(array)) {
SetTensorFromPyArrayT<int16_t, P>(self, array, place, zero_copy);
} else if (py::isinstance<py::array_t<uint8_t>>(array)) { } else if (py::isinstance<py::array_t<uint8_t>>(array)) {
SetTensorFromPyArrayT<uint8_t, P>(self, array, place, zero_copy); SetTensorFromPyArrayT<uint8_t, P>(self, array, place, zero_copy);
} else if (py::isinstance<py::array_t<paddle::platform::float16>>(array)) { } else if (py::isinstance<py::array_t<paddle::platform::float16>>(array)) {
SetTensorFromPyArrayT<paddle::platform::float16, P>(self, array, place, SetTensorFromPyArrayT<paddle::platform::float16, P>(self, array, place,
zero_copy); zero_copy);
} else if (py::isinstance<py::array_t<uint16_t>>(array)) { } else if (py::isinstance<py::array_t<uint16_t>>(array)) {
// TODO(cql): temporary keeping uint16, should be depracated later // TODO(cql): temporary keeping uint16, which is used for casting float16
// before. It should be depracated later.
SetTensorFromPyArrayT<paddle::platform::float16, P>(self, array, place, SetTensorFromPyArrayT<paddle::platform::float16, P>(self, array, place,
zero_copy); zero_copy);
} else if (py::isinstance<py::array_t<bool>>(array)) { } else if (py::isinstance<py::array_t<bool>>(array)) {
...@@ -234,7 +238,7 @@ void SetTensorFromPyArray(framework::Tensor *self, const py::object &obj, ...@@ -234,7 +238,7 @@ void SetTensorFromPyArray(framework::Tensor *self, const py::object &obj,
"Incompatible data or style type: tensor.set() supports bool, float16, " "Incompatible data or style type: tensor.set() supports bool, float16, "
"float32, " "float32, "
"float64, " "float64, "
"int8, int32, int64 and uint8, uint16, but got %s!", "int8, int16, int32, int64 and uint8, uint16, but got %s!",
array.dtype()); array.dtype());
} }
} }
...@@ -435,16 +439,18 @@ inline framework::Tensor *_sliceTensor(const framework::Tensor &self, ...@@ -435,16 +439,18 @@ inline framework::Tensor *_sliceTensor(const framework::Tensor &self,
return _sliceAndConcat<float>(self, obj, dim); return _sliceAndConcat<float>(self, obj, dim);
case framework::proto::VarType::FP64: case framework::proto::VarType::FP64:
return _sliceAndConcat<double>(self, obj, dim); return _sliceAndConcat<double>(self, obj, dim);
case framework::proto::VarType::INT8:
return _sliceAndConcat<int8_t>(self, obj, dim);
case framework::proto::VarType::INT16:
return _sliceAndConcat<int16_t>(self, obj, dim);
case framework::proto::VarType::INT32: case framework::proto::VarType::INT32:
return _sliceAndConcat<int>(self, obj, dim); return _sliceAndConcat<int>(self, obj, dim);
case framework::proto::VarType::INT64: case framework::proto::VarType::INT64:
return _sliceAndConcat<int64_t>(self, obj, dim); return _sliceAndConcat<int64_t>(self, obj, dim);
case framework::proto::VarType::BOOL: case framework::proto::VarType::BOOL:
return _sliceAndConcat<bool>(self, obj, dim); return _sliceAndConcat<bool>(self, obj, dim);
case framework::proto::VarType::INT16:
return _sliceAndConcat<bool>(self, obj, dim);
case framework::proto::VarType::UINT8: case framework::proto::VarType::UINT8:
return _sliceAndConcat<bool>(self, obj, dim); return _sliceAndConcat<uint8_t>(self, obj, dim);
default: default:
PADDLE_THROW("Not support type %d", src_type); PADDLE_THROW("Not support type %d", src_type);
} }
......
...@@ -22,6 +22,12 @@ import numbers ...@@ -22,6 +22,12 @@ import numbers
class TestTensor(unittest.TestCase): class TestTensor(unittest.TestCase):
def setUp(self):
self.support_dtypes = [
'bool', 'uint8', 'int8', 'int16', 'int32', 'int64', 'float16',
'float32', 'float64'
]
def test_int_tensor(self): def test_int_tensor(self):
scope = core.Scope() scope = core.Scope()
var = scope.var("test_tensor") var = scope.var("test_tensor")
...@@ -184,15 +190,15 @@ class TestTensor(unittest.TestCase): ...@@ -184,15 +190,15 @@ class TestTensor(unittest.TestCase):
tensor_array = numpy.array(tensor) tensor_array = numpy.array(tensor)
self.assertEqual((0, 1), tensor_array.shape) self.assertEqual((0, 1), tensor_array.shape)
def run_sliece_tensor(self, place): def run_slice_tensor(self, place, dtype):
tensor = fluid.Tensor() tensor = fluid.Tensor()
shape = [3, 3, 3] shape = [3, 3, 3]
tensor._set_dims(shape) tensor._set_dims(shape)
tensor_array = numpy.array([[[1, 2, 3], [4, 5, 6], [7, 8, 9]], tensor_array = numpy.array(
[[10, 11, 12], [13, 14, 15], [16, 17, 18]], [[[1, 2, 3], [4, 5, 6], [7, 8, 9]],
[[19, 20, 21], [22, 23, 24], [25, 26, 27]]]) [[10, 11, 12], [13, 14, 15], [16, 17, 18]],
[[19, 20, 21], [22, 23, 24], [25, 26, 27]]]).astype(dtype)
tensor.set(tensor_array, place) tensor.set(tensor_array, place)
n1 = tensor[1] n1 = tensor[1]
...@@ -227,14 +233,15 @@ class TestTensor(unittest.TestCase): ...@@ -227,14 +233,15 @@ class TestTensor(unittest.TestCase):
t8 = tensor_array[0::1, 0::-1, 2:] t8 = tensor_array[0::1, 0::-1, 2:]
self.assertTrue((numpy.array(n8) == numpy.array(t8)).all()) self.assertTrue((numpy.array(n8) == numpy.array(t8)).all())
def test_sliece_tensor(self): def test_slice_tensor(self):
# run cpu first for dtype in self.support_dtypes:
place = core.CPUPlace() # run cpu first
self.run_sliece_tensor(place) place = core.CPUPlace()
self.run_slice_tensor(place, dtype)
if core.is_compiled_with_cuda(): if core.is_compiled_with_cuda():
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
self.run_sliece_tensor(place) self.run_slice_tensor(place, dtype)
def test_print_tensor(self): def test_print_tensor(self):
scope = core.Scope() scope = core.Scope()
...@@ -299,6 +306,25 @@ class TestTensor(unittest.TestCase): ...@@ -299,6 +306,25 @@ class TestTensor(unittest.TestCase):
self.assertEqual(tensor._dtype(), core.VarDesc.VarType.FP16) self.assertEqual(tensor._dtype(), core.VarDesc.VarType.FP16)
self.assertTrue(numpy.array_equal(numpy.array(tensor), array)) self.assertTrue(numpy.array_equal(numpy.array(tensor), array))
def test_tensor_set_int16(self):
array = numpy.random.randint(100, size=(300, 500)).astype("int16")
tensor = fluid.Tensor()
place = core.CPUPlace()
tensor.set(array, place)
self.assertEqual(tensor._dtype(), core.VarDesc.VarType.INT16)
self.assertTrue(numpy.array_equal(numpy.array(tensor), array))
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
tensor.set(array, place)
self.assertEqual(tensor._dtype(), core.VarDesc.VarType.INT16)
self.assertTrue(numpy.array_equal(numpy.array(tensor), array))
place = core.CUDAPinnedPlace()
tensor.set(array, place)
self.assertEqual(tensor._dtype(), core.VarDesc.VarType.INT16)
self.assertTrue(numpy.array_equal(numpy.array(tensor), array))
def test_tensor_set_from_array_list(self): def test_tensor_set_from_array_list(self):
array = numpy.random.randint(1000, size=(200, 300)) array = numpy.random.randint(1000, size=(200, 300))
list_array = [array, array] list_array = [array, array]
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册