未验证 提交 822e5b36 编写于 作者: L Leo Chen 提交者: GitHub

Support int16 for Tensor (#22423)

* add int16 support, test=develop

* add test, test=develop

* fix typo, test=develop

* fix dtype error in slice, test=develop
上级 e1b0d7cb
......@@ -106,9 +106,10 @@ DECLARE_VALID_DTYPE_TO_PY_ARRAY(float);
DECLARE_VALID_DTYPE_TO_PY_ARRAY(double);
DECLARE_VALID_DTYPE_TO_PY_ARRAY(bool);
DECLARE_VALID_DTYPE_TO_PY_ARRAY(int8_t);
DECLARE_VALID_DTYPE_TO_PY_ARRAY(uint8_t);
DECLARE_VALID_DTYPE_TO_PY_ARRAY(int16_t);
DECLARE_VALID_DTYPE_TO_PY_ARRAY(int);
DECLARE_VALID_DTYPE_TO_PY_ARRAY(int64_t);
DECLARE_VALID_DTYPE_TO_PY_ARRAY(uint8_t);
inline std::string TensorDTypeToPyDTypeStr(
framework::proto::VarType::Type type) {
......@@ -218,13 +219,16 @@ void SetTensorFromPyArray(framework::Tensor *self, const py::object &obj,
SetTensorFromPyArrayT<double, P>(self, array, place, zero_copy);
} else if (py::isinstance<py::array_t<int8_t>>(array)) {
SetTensorFromPyArrayT<int8_t, P>(self, array, place, zero_copy);
} else if (py::isinstance<py::array_t<int16_t>>(array)) {
SetTensorFromPyArrayT<int16_t, P>(self, array, place, zero_copy);
} else if (py::isinstance<py::array_t<uint8_t>>(array)) {
SetTensorFromPyArrayT<uint8_t, P>(self, array, place, zero_copy);
} else if (py::isinstance<py::array_t<paddle::platform::float16>>(array)) {
SetTensorFromPyArrayT<paddle::platform::float16, P>(self, array, place,
zero_copy);
} else if (py::isinstance<py::array_t<uint16_t>>(array)) {
// TODO(cql): temporary keeping uint16, should be depracated later
// TODO(cql): temporary keeping uint16, which is used for casting float16
// before. It should be depracated later.
SetTensorFromPyArrayT<paddle::platform::float16, P>(self, array, place,
zero_copy);
} else if (py::isinstance<py::array_t<bool>>(array)) {
......@@ -234,7 +238,7 @@ void SetTensorFromPyArray(framework::Tensor *self, const py::object &obj,
"Incompatible data or style type: tensor.set() supports bool, float16, "
"float32, "
"float64, "
"int8, int32, int64 and uint8, uint16, but got %s!",
"int8, int16, int32, int64 and uint8, uint16, but got %s!",
array.dtype());
}
}
......@@ -435,16 +439,18 @@ inline framework::Tensor *_sliceTensor(const framework::Tensor &self,
return _sliceAndConcat<float>(self, obj, dim);
case framework::proto::VarType::FP64:
return _sliceAndConcat<double>(self, obj, dim);
case framework::proto::VarType::INT8:
return _sliceAndConcat<int8_t>(self, obj, dim);
case framework::proto::VarType::INT16:
return _sliceAndConcat<int16_t>(self, obj, dim);
case framework::proto::VarType::INT32:
return _sliceAndConcat<int>(self, obj, dim);
case framework::proto::VarType::INT64:
return _sliceAndConcat<int64_t>(self, obj, dim);
case framework::proto::VarType::BOOL:
return _sliceAndConcat<bool>(self, obj, dim);
case framework::proto::VarType::INT16:
return _sliceAndConcat<bool>(self, obj, dim);
case framework::proto::VarType::UINT8:
return _sliceAndConcat<bool>(self, obj, dim);
return _sliceAndConcat<uint8_t>(self, obj, dim);
default:
PADDLE_THROW("Not support type %d", src_type);
}
......
......@@ -22,6 +22,12 @@ import numbers
class TestTensor(unittest.TestCase):
def setUp(self):
self.support_dtypes = [
'bool', 'uint8', 'int8', 'int16', 'int32', 'int64', 'float16',
'float32', 'float64'
]
def test_int_tensor(self):
scope = core.Scope()
var = scope.var("test_tensor")
......@@ -184,15 +190,15 @@ class TestTensor(unittest.TestCase):
tensor_array = numpy.array(tensor)
self.assertEqual((0, 1), tensor_array.shape)
def run_sliece_tensor(self, place):
def run_slice_tensor(self, place, dtype):
tensor = fluid.Tensor()
shape = [3, 3, 3]
tensor._set_dims(shape)
tensor_array = numpy.array([[[1, 2, 3], [4, 5, 6], [7, 8, 9]],
[[10, 11, 12], [13, 14, 15], [16, 17, 18]],
[[19, 20, 21], [22, 23, 24], [25, 26, 27]]])
tensor_array = numpy.array(
[[[1, 2, 3], [4, 5, 6], [7, 8, 9]],
[[10, 11, 12], [13, 14, 15], [16, 17, 18]],
[[19, 20, 21], [22, 23, 24], [25, 26, 27]]]).astype(dtype)
tensor.set(tensor_array, place)
n1 = tensor[1]
......@@ -227,14 +233,15 @@ class TestTensor(unittest.TestCase):
t8 = tensor_array[0::1, 0::-1, 2:]
self.assertTrue((numpy.array(n8) == numpy.array(t8)).all())
def test_sliece_tensor(self):
# run cpu first
place = core.CPUPlace()
self.run_sliece_tensor(place)
def test_slice_tensor(self):
for dtype in self.support_dtypes:
# run cpu first
place = core.CPUPlace()
self.run_slice_tensor(place, dtype)
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
self.run_sliece_tensor(place)
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
self.run_slice_tensor(place, dtype)
def test_print_tensor(self):
scope = core.Scope()
......@@ -299,6 +306,25 @@ class TestTensor(unittest.TestCase):
self.assertEqual(tensor._dtype(), core.VarDesc.VarType.FP16)
self.assertTrue(numpy.array_equal(numpy.array(tensor), array))
def test_tensor_set_int16(self):
array = numpy.random.randint(100, size=(300, 500)).astype("int16")
tensor = fluid.Tensor()
place = core.CPUPlace()
tensor.set(array, place)
self.assertEqual(tensor._dtype(), core.VarDesc.VarType.INT16)
self.assertTrue(numpy.array_equal(numpy.array(tensor), array))
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
tensor.set(array, place)
self.assertEqual(tensor._dtype(), core.VarDesc.VarType.INT16)
self.assertTrue(numpy.array_equal(numpy.array(tensor), array))
place = core.CUDAPinnedPlace()
tensor.set(array, place)
self.assertEqual(tensor._dtype(), core.VarDesc.VarType.INT16)
self.assertTrue(numpy.array_equal(numpy.array(tensor), array))
def test_tensor_set_from_array_list(self):
array = numpy.random.randint(1000, size=(200, 300))
list_array = [array, array]
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册