diff --git a/paddle/fluid/pybind/tensor_py.h b/paddle/fluid/pybind/tensor_py.h index cc44ad9a2deb556d2d9e275caa029025e1c5f533..9e5dc638516a958cc829eb034d0766e91771906d 100644 --- a/paddle/fluid/pybind/tensor_py.h +++ b/paddle/fluid/pybind/tensor_py.h @@ -106,9 +106,10 @@ DECLARE_VALID_DTYPE_TO_PY_ARRAY(float); DECLARE_VALID_DTYPE_TO_PY_ARRAY(double); DECLARE_VALID_DTYPE_TO_PY_ARRAY(bool); DECLARE_VALID_DTYPE_TO_PY_ARRAY(int8_t); -DECLARE_VALID_DTYPE_TO_PY_ARRAY(uint8_t); +DECLARE_VALID_DTYPE_TO_PY_ARRAY(int16_t); DECLARE_VALID_DTYPE_TO_PY_ARRAY(int); DECLARE_VALID_DTYPE_TO_PY_ARRAY(int64_t); +DECLARE_VALID_DTYPE_TO_PY_ARRAY(uint8_t); inline std::string TensorDTypeToPyDTypeStr( framework::proto::VarType::Type type) { @@ -218,13 +219,16 @@ void SetTensorFromPyArray(framework::Tensor *self, const py::object &obj, SetTensorFromPyArrayT(self, array, place, zero_copy); } else if (py::isinstance>(array)) { SetTensorFromPyArrayT(self, array, place, zero_copy); + } else if (py::isinstance>(array)) { + SetTensorFromPyArrayT(self, array, place, zero_copy); } else if (py::isinstance>(array)) { SetTensorFromPyArrayT(self, array, place, zero_copy); } else if (py::isinstance>(array)) { SetTensorFromPyArrayT(self, array, place, zero_copy); } else if (py::isinstance>(array)) { - // TODO(cql): temporary keeping uint16, should be depracated later + // TODO(cql): temporary keeping uint16, which is used for casting float16 + // before. It should be depracated later. SetTensorFromPyArrayT(self, array, place, zero_copy); } else if (py::isinstance>(array)) { @@ -234,7 +238,7 @@ void SetTensorFromPyArray(framework::Tensor *self, const py::object &obj, "Incompatible data or style type: tensor.set() supports bool, float16, " "float32, " "float64, " - "int8, int32, int64 and uint8, uint16, but got %s!", + "int8, int16, int32, int64 and uint8, uint16, but got %s!", array.dtype()); } } @@ -435,16 +439,18 @@ inline framework::Tensor *_sliceTensor(const framework::Tensor &self, return _sliceAndConcat(self, obj, dim); case framework::proto::VarType::FP64: return _sliceAndConcat(self, obj, dim); + case framework::proto::VarType::INT8: + return _sliceAndConcat(self, obj, dim); + case framework::proto::VarType::INT16: + return _sliceAndConcat(self, obj, dim); case framework::proto::VarType::INT32: return _sliceAndConcat(self, obj, dim); case framework::proto::VarType::INT64: return _sliceAndConcat(self, obj, dim); case framework::proto::VarType::BOOL: return _sliceAndConcat(self, obj, dim); - case framework::proto::VarType::INT16: - return _sliceAndConcat(self, obj, dim); case framework::proto::VarType::UINT8: - return _sliceAndConcat(self, obj, dim); + return _sliceAndConcat(self, obj, dim); default: PADDLE_THROW("Not support type %d", src_type); } diff --git a/python/paddle/fluid/tests/unittests/test_tensor.py b/python/paddle/fluid/tests/unittests/test_tensor.py index 71dd49504a58ee290351e81fed0355e192059daf..24be25fda2e357f5aa0f94eb4cf0197387b33d30 100644 --- a/python/paddle/fluid/tests/unittests/test_tensor.py +++ b/python/paddle/fluid/tests/unittests/test_tensor.py @@ -22,6 +22,12 @@ import numbers class TestTensor(unittest.TestCase): + def setUp(self): + self.support_dtypes = [ + 'bool', 'uint8', 'int8', 'int16', 'int32', 'int64', 'float16', + 'float32', 'float64' + ] + def test_int_tensor(self): scope = core.Scope() var = scope.var("test_tensor") @@ -184,15 +190,15 @@ class TestTensor(unittest.TestCase): tensor_array = numpy.array(tensor) self.assertEqual((0, 1), tensor_array.shape) - def run_sliece_tensor(self, place): - + def run_slice_tensor(self, place, dtype): tensor = fluid.Tensor() shape = [3, 3, 3] tensor._set_dims(shape) - tensor_array = numpy.array([[[1, 2, 3], [4, 5, 6], [7, 8, 9]], - [[10, 11, 12], [13, 14, 15], [16, 17, 18]], - [[19, 20, 21], [22, 23, 24], [25, 26, 27]]]) + tensor_array = numpy.array( + [[[1, 2, 3], [4, 5, 6], [7, 8, 9]], + [[10, 11, 12], [13, 14, 15], [16, 17, 18]], + [[19, 20, 21], [22, 23, 24], [25, 26, 27]]]).astype(dtype) tensor.set(tensor_array, place) n1 = tensor[1] @@ -227,14 +233,15 @@ class TestTensor(unittest.TestCase): t8 = tensor_array[0::1, 0::-1, 2:] self.assertTrue((numpy.array(n8) == numpy.array(t8)).all()) - def test_sliece_tensor(self): - # run cpu first - place = core.CPUPlace() - self.run_sliece_tensor(place) + def test_slice_tensor(self): + for dtype in self.support_dtypes: + # run cpu first + place = core.CPUPlace() + self.run_slice_tensor(place, dtype) - if core.is_compiled_with_cuda(): - place = core.CUDAPlace(0) - self.run_sliece_tensor(place) + if core.is_compiled_with_cuda(): + place = core.CUDAPlace(0) + self.run_slice_tensor(place, dtype) def test_print_tensor(self): scope = core.Scope() @@ -299,6 +306,25 @@ class TestTensor(unittest.TestCase): self.assertEqual(tensor._dtype(), core.VarDesc.VarType.FP16) self.assertTrue(numpy.array_equal(numpy.array(tensor), array)) + def test_tensor_set_int16(self): + array = numpy.random.randint(100, size=(300, 500)).astype("int16") + tensor = fluid.Tensor() + place = core.CPUPlace() + tensor.set(array, place) + self.assertEqual(tensor._dtype(), core.VarDesc.VarType.INT16) + self.assertTrue(numpy.array_equal(numpy.array(tensor), array)) + + if core.is_compiled_with_cuda(): + place = core.CUDAPlace(0) + tensor.set(array, place) + self.assertEqual(tensor._dtype(), core.VarDesc.VarType.INT16) + self.assertTrue(numpy.array_equal(numpy.array(tensor), array)) + + place = core.CUDAPinnedPlace() + tensor.set(array, place) + self.assertEqual(tensor._dtype(), core.VarDesc.VarType.INT16) + self.assertTrue(numpy.array_equal(numpy.array(tensor), array)) + def test_tensor_set_from_array_list(self): array = numpy.random.randint(1000, size=(200, 300)) list_array = [array, array]