提交 20a6ae7f 编写于 作者: Y Yan Chunwei 提交者: GitHub

Feature/tensor array add python binding (#4616)

上级 4c96008a
...@@ -26,6 +26,9 @@ namespace framework { ...@@ -26,6 +26,9 @@ namespace framework {
* in original lod-tensor. * in original lod-tensor.
*/ */
struct DySeqMeta { struct DySeqMeta {
DySeqMeta(size_t begin, size_t end, size_t ori_idx)
: begin(begin), end(end), ori_idx(ori_idx) {}
size_t begin; size_t begin;
size_t end; // not included size_t end; // not included
size_t ori_idx; size_t ori_idx;
......
if(WITH_PYTHON) if(WITH_PYTHON)
cc_library(paddle_pybind SHARED cc_library(paddle_pybind SHARED
SRCS pybind.cc exception.cc protobuf.cc SRCS pybind.cc exception.cc protobuf.cc
DEPS pybind python backward proto_desc DEPS pybind python backward proto_desc tensor_array
${GLOB_OP_LIB}) ${GLOB_OP_LIB})
endif(WITH_PYTHON) endif(WITH_PYTHON)
...@@ -16,6 +16,7 @@ limitations under the License. */ ...@@ -16,6 +16,7 @@ limitations under the License. */
#include "paddle/framework/backward.h" #include "paddle/framework/backward.h"
#include "paddle/framework/lod_tensor.h" #include "paddle/framework/lod_tensor.h"
#include "paddle/framework/tensor_array.h"
#include "paddle/operators/cond_op.h" #include "paddle/operators/cond_op.h"
#include "paddle/operators/net_op.h" #include "paddle/operators/net_op.h"
#include "paddle/operators/recurrent_op.h" #include "paddle/operators/recurrent_op.h"
...@@ -286,6 +287,56 @@ All parameter, weight, gradient are variables in Paddle. ...@@ -286,6 +287,56 @@ All parameter, weight, gradient are variables in Paddle.
self->CompleteAddOp(); self->CompleteAddOp();
}); });
py::class_<framework::TensorArray>(m, "TensorArray")
.def("__init__",
[](TensorArray &instance) { new (&instance) TensorArray(); })
.def("read",
[](TensorArray &self, size_t index) { return self.Read(index); })
.def("write", [](TensorArray &self, size_t index,
LoDTensor &value) { self.Write(index, value); })
.def("write_shared",
[](TensorArray &self, size_t index, const LoDTensor &value) {
self.WriteShared(index, value);
})
.def("size", [](TensorArray &self) { return self.size(); })
.def("pack",
[](TensorArray &self, size_t level,
const std::vector<std::vector<size_t>> &meta_info,
const std::vector<std::vector<size_t>> &lod) {
std::vector<DySeqMeta> meta;
for (auto &info : meta_info) {
PADDLE_ENFORCE_EQ(info.size(), 3UL);
meta.emplace_back(info[0], info[1], info[2]);
}
#ifndef PADDLE_WITH_CUDA
return self.Pack(level, meta, lod);
#else
LoD new_lod;
new_lod.reserve(lod.size());
std::copy(lod.begin(), lod.end(), std::back_inserter(new_lod));
return self.Pack(level, meta, new_lod);
#endif
})
.def("unpack",
[](TensorArray &self, const LoDTensor &source, int level,
bool length_descend) {
auto metas = self.Unpack(source, level, length_descend);
std::vector<std::vector<size_t>> meta_info;
for (auto meta : metas) {
meta_info.emplace_back(
std::vector<size_t>({meta.begin, meta.end, meta.ori_idx}));
}
return meta_info;
})
.def("stack", [](TensorArray &self) { return self.Stack(); })
.def("unstack",
[](TensorArray &self, const LoDTensor &source) {
return self.Unstack(source);
})
.def("unstack_shared", [](TensorArray &self, const LoDTensor &source) {
return self.UnstackShared(source);
});
// recurrent_op // recurrent_op
py::class_<operators::RecurrentOp, OperatorBase>(m, "RecurrentOp") py::class_<operators::RecurrentOp, OperatorBase>(m, "RecurrentOp")
.def_static( .def_static(
......
import logging
import paddle.v2.framework.core as core
import unittest
import numpy as np
class TestTensorArray(unittest.TestCase):
def setUp(self):
self.ta = core.TensorArray()
self.batch_size = 10
self.dim = 2
# create a LoDTensor
self.scope = core.Scope()
var = self.scope.new_var("test_tensor")
self.place = core.CPUPlace()
tensor = var.get_tensor()
tensor.set_dims([self.batch_size, self.dim])
tensor.alloc_float(self.place)
tensor_array = np.array(tensor)
tensor_array[0, 0] = 0
tensor_array[1, 0] = 1
tensor_array[2, 0] = 2
tensor_array[3, 0] = 3
tensor_array[4, 0] = 4
tensor_array[5, 0] = 5
tensor_array[6, 0] = 6
tensor_array[7, 0] = 7
tensor_array[8, 0] = 8
tensor_array[9, 0] = 9
lod_py = [[0, 2, 5, 10]]
lod_tensor = core.LoDTensor(lod_py)
lod_tensor.set(tensor_array, self.place)
self.py_seq_meta = [[5, 10, 2], [2, 5, 1], [0, 2, 0]]
self.tensor = lod_tensor
def test_unstack(self):
self.ta.unstack(self.tensor)
self.assertEqual(self.tensor.get_dims()[0], self.ta.size())
def test_read(self):
self.ta.unstack(self.tensor)
for i in range(self.batch_size):
tensor = self.ta.read(i)
def test_write(self):
self.ta.unstack(self.tensor)
# create a tensor with shape of [1, self.dim]
var = self.scope.new_var("hell")
tensor = var.get_tensor()
tensor.set_dims([1, self.dim])
tensor.alloc_float(self.place)
tensor_array = np.array(tensor)
for i in range(self.dim):
tensor_array[0, i] = i
tensor.set(tensor_array, self.place)
self.ta.write(2, tensor)
ta_tensor = self.ta.read(2)
ta_tensor_array = np.array(ta_tensor)
self.assertEqual(ta_tensor.get_dims(), [1, self.dim])
self.assertTrue((tensor_array == ta_tensor_array).all())
def test_write_shared(self):
self.ta.unstack(self.tensor)
# create a tensor with shape of [1, self.dim]
var = self.scope.new_var("hell")
tensor = var.get_tensor()
tensor.set_dims([1, self.dim])
tensor.alloc_float(self.place)
tensor_array = np.array(tensor)
for i in range(self.dim):
tensor_array[0, i] = i
tensor.set(tensor_array, self.place)
self.ta.write_shared(2, tensor)
ta_tensor = self.ta.read(2)
ta_tensor_array = np.array(ta_tensor)
self.assertEqual(ta_tensor.get_dims(), [1, self.dim])
self.assertTrue((tensor_array == ta_tensor_array).all())
def test_unpack(self):
meta = self.ta.unpack(self.tensor, 0, True)
self.assertEqual(self.ta.size(), 5)
self.assertEqual(meta, self.py_seq_meta)
def test_pack(self):
meta = self.ta.unpack(self.tensor, 0, True)
print "meta", meta
tensor = self.ta.pack(0, meta, self.tensor.lod())
print np.array(self.tensor)
print np.array(tensor)
self.assertTrue((np.array(self.tensor) == np.array(tensor)).all())
self.assertTrue(tensor.lod(), self.tensor.lod())
if __name__ == '__main__':
unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册