提交 2c3c579b 编写于 作者: L Leo Chen 提交者: Zeng Jinle

tensor.set() supports array list and remove unused code, test=develop (#20959)

上级 eec4fa90
......@@ -140,7 +140,8 @@ void TensorSetElement(framework::Tensor *self, size_t offset, T elem) {
template <typename T, typename P>
void SetTensorFromPyArrayT(
framework::Tensor *self,
py::array_t<T, py::array::c_style | py::array::forcecast> array, P place) {
const py::array_t<T, py::array::c_style | py::array::forcecast> &array,
const P &place) {
std::vector<int64_t> dims;
dims.reserve(array.ndim());
for (decltype(array.ndim()) i = 0; i < array.ndim(); ++i) {
......@@ -171,8 +172,9 @@ void SetTensorFromPyArrayT(
}
template <typename P>
void SetTensorFromPyArray(framework::Tensor *self, pybind11::array array,
P place) {
void SetTensorFromPyArray(framework::Tensor *self, const py::object &obj,
const P &place) {
auto array = obj.cast<py::array>();
if (py::isinstance<py::array_t<float>>(array)) {
SetTensorFromPyArrayT<float, P>(self, array, place);
} else if (py::isinstance<py::array_t<int>>(array)) {
......@@ -202,42 +204,6 @@ void SetTensorFromPyArray(framework::Tensor *self, pybind11::array array,
}
}
template <typename T>
void PyCPUTensorSetFromArray(
framework::Tensor *self,
pybind11::array_t<T, pybind11::array::c_style | pybind11::array::forcecast>
array,
paddle::platform::CPUPlace place) {
std::vector<int64_t> dims;
dims.reserve(array.ndim());
for (decltype(array.ndim()) i = 0; i < array.ndim(); ++i) {
dims.push_back(static_cast<int>(array.shape()[i]));
}
self->Resize(framework::make_ddim(dims));
auto *dst = self->mutable_data<T>(place);
std::memcpy(dst, array.data(), sizeof(T) * array.size());
}
template <>
// This following specialization maps uint16_t in the parameter type to
// platform::float16.
inline void PyCPUTensorSetFromArray(
framework::Tensor *self,
pybind11::array_t<uint16_t,
pybind11::array::c_style | pybind11::array::forcecast>
array,
paddle::platform::CPUPlace place) {
std::vector<int64_t> dims;
dims.reserve(array.ndim());
for (decltype(array.ndim()) i = 0; i < array.ndim(); ++i) {
dims.push_back(static_cast<int>(array.shape()[i]));
}
self->Resize(framework::make_ddim(dims));
auto *dst = self->mutable_data<platform::float16>(place);
std::memcpy(dst, array.data(), sizeof(uint16_t) * array.size());
}
template <typename T, size_t D>
void _sliceCompute(const framework::Tensor *in, framework::Tensor *out,
const platform::CPUDeviceContext &ctx,
......@@ -485,84 +451,6 @@ inline framework::Tensor *PySliceTensor(const framework::Tensor &self,
}
}
#ifdef PADDLE_WITH_CUDA
template <typename T>
void PyCUDATensorSetFromArray(
framework::Tensor *self,
pybind11::array_t<T, pybind11::array::c_style | pybind11::array::forcecast>
array,
paddle::platform::CUDAPlace place) {
std::vector<int64_t> dims;
dims.reserve(array.ndim());
for (decltype(array.ndim()) i = 0; i < array.ndim(); ++i) {
dims.push_back(static_cast<int>(array.shape()[i]));
}
self->Resize(framework::make_ddim(dims));
auto *dst = self->mutable_data<T>(place);
paddle::platform::GpuMemcpySync(dst, array.data(), sizeof(T) * array.size(),
cudaMemcpyHostToDevice);
}
template <>
// This following specialization maps uint16_t in the parameter type to
// platform::float16.
inline void PyCUDATensorSetFromArray(
framework::Tensor *self,
pybind11::array_t<uint16_t,
pybind11::array::c_style | pybind11::array::forcecast>
array,
paddle::platform::CUDAPlace place) {
std::vector<int64_t> dims;
dims.reserve(array.ndim());
for (decltype(array.ndim()) i = 0; i < array.ndim(); ++i) {
dims.push_back(static_cast<int>(array.shape()[i]));
}
self->Resize(framework::make_ddim(dims));
auto *dst = self->mutable_data<platform::float16>(place);
paddle::platform::GpuMemcpySync(dst, array.data(),
sizeof(uint16_t) * array.size(),
cudaMemcpyHostToDevice);
}
template <typename T>
void PyCUDAPinnedTensorSetFromArray(
framework::Tensor *self,
pybind11::array_t<T, pybind11::array::c_style | pybind11::array::forcecast>
array,
const paddle::platform::CUDAPinnedPlace &place) {
std::vector<int64_t> dims;
dims.reserve(array.ndim());
for (decltype(array.ndim()) i = 0; i < array.ndim(); ++i) {
dims.push_back(static_cast<int>(array.shape()[i]));
}
self->Resize(framework::make_ddim(dims));
auto *dst = self->mutable_data<T>(place);
std::memcpy(dst, array.data(), sizeof(T) * array.size());
}
template <>
// This following specialization maps uint16_t in the parameter type to
// platform::float16.
inline void PyCUDAPinnedTensorSetFromArray(
framework::Tensor *self,
pybind11::array_t<uint16_t,
pybind11::array::c_style | pybind11::array::forcecast>
array,
const paddle::platform::CUDAPinnedPlace &place) {
std::vector<int64_t> dims;
dims.reserve(array.ndim());
for (decltype(array.ndim()) i = 0; i < array.ndim(); ++i) {
dims.push_back(static_cast<int>(array.shape()[i]));
}
self->Resize(framework::make_ddim(dims));
auto *dst = self->mutable_data<platform::float16>(place);
std::memcpy(dst, array.data(), sizeof(uint16_t) * array.size());
}
#endif
inline py::array TensorToPyArray(const framework::Tensor &tensor) {
if (!tensor.IsInitialized()) {
return py::array();
......
......@@ -280,6 +280,45 @@ class TestTensor(unittest.TestCase):
isinstance(
tensor._mutable_data(places[0], dtype), numbers.Integral))
def test_tensor_set_fp16(self):
array = numpy.random.random((300, 500)).astype("float16")
tensor = fluid.Tensor()
place = core.CPUPlace()
tensor.set(array, place)
self.assertEqual(tensor._dtype(), core.VarDesc.VarType.FP16)
self.assertTrue(numpy.array_equal(numpy.array(tensor), array))
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
tensor.set(array, place)
self.assertEqual(tensor._dtype(), core.VarDesc.VarType.FP16)
self.assertTrue(numpy.array_equal(numpy.array(tensor), array))
place = core.CUDAPinnedPlace()
tensor.set(array, place)
self.assertEqual(tensor._dtype(), core.VarDesc.VarType.FP16)
self.assertTrue(numpy.array_equal(numpy.array(tensor), array))
def test_tensor_set_from_array_list(self):
array = numpy.random.randint(1000, size=(200, 300))
list_array = [array, array]
tensor = fluid.Tensor()
place = core.CPUPlace()
tensor.set(list_array, place)
self.assertEqual([2, 200, 300], tensor.shape())
self.assertTrue(numpy.array_equal(numpy.array(tensor), list_array))
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
tensor.set(list_array, place)
self.assertEqual([2, 200, 300], tensor.shape())
self.assertTrue(numpy.array_equal(numpy.array(tensor), list_array))
place = core.CUDAPinnedPlace()
tensor.set(list_array, place)
self.assertEqual([2, 200, 300], tensor.shape())
self.assertTrue(numpy.array_equal(numpy.array(tensor), list_array))
if __name__ == '__main__':
unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册