diff --git a/paddle/framework/executor.cc b/paddle/framework/executor.cc index c1a009f131c803f7881faf2e322eecf3e6e8ea05..2fcf41d69f0011b0d9a3d89c97fcebacb0703e97 100644 --- a/paddle/framework/executor.cc +++ b/paddle/framework/executor.cc @@ -23,6 +23,7 @@ limitations under the License. */ #include "paddle/framework/feed_fetch_type.h" #include "paddle/framework/lod_rank_table.h" #include "paddle/framework/lod_tensor.h" +#include "paddle/framework/lod_tensor_array.h" #include "paddle/framework/op_registry.h" #include "paddle/framework/scope.h" @@ -73,6 +74,8 @@ static void CreateTensor(Variable* var, VarDesc::VarType var_type) { var->GetMutable>(); } else if (var_type == VarDesc::LOD_RANK_TABLE) { var->GetMutable(); + } else if (var_type == VarDesc::LOD_TENSOR_ARRAY) { + var->GetMutable(); } else { PADDLE_THROW( "Variable type %d is not in " diff --git a/paddle/framework/framework.proto b/paddle/framework/framework.proto index 54ce461ce81955c9c3b550998e4a1df32c201ab3..f1fc4529e15502927560eefd74110f6ca7eab4a9 100644 --- a/paddle/framework/framework.proto +++ b/paddle/framework/framework.proto @@ -109,6 +109,11 @@ message LoDTensorDesc { optional int32 lod_level = 2 [ default = 0 ]; } +message LoDTensorArrayDesc { + required TensorDesc tensor = 1; + optional int32 lod_level = 2 [ default = 0 ]; +} + message VarDesc { enum VarType { LOD_TENSOR = 1; @@ -117,11 +122,13 @@ message VarDesc { FETCH_LIST = 4; STEP_SCOPES = 5; LOD_RANK_TABLE = 6; + LOD_TENSOR_ARRAY = 7; } required string name = 1; required VarType type = 2; optional LoDTensorDesc lod_tensor = 3; optional TensorDesc selected_rows = 4; + optional LoDTensorArrayDesc tensor_array = 6; optional bool persistable = 5 [ default = false ]; } diff --git a/paddle/framework/lod_tensor_array.h b/paddle/framework/lod_tensor_array.h new file mode 100644 index 0000000000000000000000000000000000000000..13f0608d24be97d8bba149b74f1a4deb57deeb48 --- /dev/null +++ b/paddle/framework/lod_tensor_array.h @@ -0,0 +1,23 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#pragma once +#include +#include "paddle/framework/lod_tensor.h" + +namespace paddle { +namespace framework { +using LoDTensorArray = std::vector; +} +} // namespace paddle diff --git a/paddle/framework/var_desc.cc b/paddle/framework/var_desc.cc index 8e92c81d1137472737230be79d71824593d3256f..16aca192d41a64003de85ce45f7697bf45c556ed 100644 --- a/paddle/framework/var_desc.cc +++ b/paddle/framework/var_desc.cc @@ -37,13 +37,27 @@ std::vector VarDescBind::Shape() const { DataType VarDescBind::GetDataType() const { return tensor_desc().data_type(); } void VarDescBind::SetLoDLevel(int32_t lod_level) { - PADDLE_ENFORCE(desc_.type() == VarDesc::LOD_TENSOR); - desc_.mutable_lod_tensor()->set_lod_level(lod_level); + switch (desc_.type()) { + case VarDesc::LOD_TENSOR: + desc_.mutable_lod_tensor()->set_lod_level(lod_level); + break; + case VarDesc::LOD_TENSOR_ARRAY: + desc_.mutable_tensor_array()->set_lod_level(lod_level); + break; + default: + PADDLE_THROW("Tensor type=%d does not support LoDLevel", desc_.type()); + } } int32_t VarDescBind::GetLodLevel() const { - PADDLE_ENFORCE(desc_.type() == VarDesc::LOD_TENSOR); - return desc_.lod_tensor().lod_level(); + switch (desc_.type()) { + case VarDesc::LOD_TENSOR: + return desc_.lod_tensor().lod_level(); + case VarDesc::LOD_TENSOR_ARRAY: + return desc_.tensor_array().lod_level(); + default: + PADDLE_THROW("Tensor type=%d does not support LoDLevel", desc_.type()); + } } const TensorDesc &VarDescBind::tensor_desc() const { @@ -53,6 +67,8 @@ const TensorDesc &VarDescBind::tensor_desc() const { return desc_.selected_rows(); case VarDesc::LOD_TENSOR: return desc_.lod_tensor().tensor(); + case VarDesc::LOD_TENSOR_ARRAY: + return desc_.tensor_array().tensor(); default: PADDLE_THROW("Unexpected branch."); } @@ -66,6 +82,8 @@ TensorDesc *VarDescBind::mutable_tensor_desc() { return desc_.mutable_selected_rows(); case VarDesc::LOD_TENSOR: return desc_.mutable_lod_tensor()->mutable_tensor(); + case VarDesc::LOD_TENSOR_ARRAY: + return desc_.mutable_tensor_array()->mutable_tensor(); default: PADDLE_THROW("Unexpected branch."); } diff --git a/paddle/pybind/protobuf.cc b/paddle/pybind/protobuf.cc index d3fc544ec712a753304de8568534c78c154774a5..5462e6c6c7a840f75f8c15195c6d6910f30ef733 100644 --- a/paddle/pybind/protobuf.cc +++ b/paddle/pybind/protobuf.cc @@ -239,7 +239,8 @@ void BindVarDsec(py::module &m) { .value("FEED_MINIBATCH", VarDesc::FEED_MINIBATCH) .value("FETCH_LIST", VarDesc::FETCH_LIST) .value("STEP_SCOPES", VarDesc::STEP_SCOPES) - .value("LOD_RANK_TABLE", VarDesc::LOD_RANK_TABLE); + .value("LOD_RANK_TABLE", VarDesc::LOD_RANK_TABLE) + .value("LOD_TENSOR_ARRAY", VarDesc::LOD_TENSOR_ARRAY); } void BindOpDesc(py::module &m) { diff --git a/paddle/pybind/pybind.cc b/paddle/pybind/pybind.cc index 78dc7943b3644e7014da427e6aca640d94746f68..0c528174b2b2b3a27869ed0083fc96b8d90e723b 100644 --- a/paddle/pybind/pybind.cc +++ b/paddle/pybind/pybind.cc @@ -23,6 +23,7 @@ limitations under the License. */ #include "paddle/framework/framework.pb.h" #include "paddle/framework/lod_rank_table.h" #include "paddle/framework/lod_tensor.h" +#include "paddle/framework/lod_tensor_array.h" #include "paddle/framework/prune.h" #include "paddle/framework/selected_rows.h" #include "paddle/framework/tensor_array.h" @@ -233,6 +234,9 @@ All parameter, weight, gradient are variables in Paddle. return self.GetMutable(); }, py::return_value_policy::reference) + .def("get_lod_tensor_array", + [](Variable &self) { return self.GetMutable(); }, + py::return_value_policy::reference) #ifdef PADDLE_WITH_CUDA .def("get_communicator", [](Variable &self) -> platform::Communicator * { @@ -505,6 +509,23 @@ All parameter, weight, gradient are variables in Paddle. return res; }); + py::class_(m, "LoDTensorArray") + .def("__getitem__", + [](LoDTensorArray &self, size_t i) { return &self.at(i); }, + py::return_value_policy::reference) + .def("__len__", [](LoDTensorArray &self) { return self.size(); }) + .def("__setitem__", + [](LoDTensorArray &self, size_t i, const LoDTensor &t) { + PADDLE_ENFORCE_LT(i, self.size()); + self[i].ShareDataWith(t); + self[i].set_lod(t.lod()); + }) + .def("append", [](LoDTensorArray &self, const LoDTensor &t) { + self.emplace_back(); + self.back().ShareDataWith(t); + self.back().set_lod(t.lod()); + }); + m.def("op_support_gpu", OpSupportGPU); #ifdef PADDLE_WITH_CUDA m.def("get_cuda_device_count", platform::GetCUDADeviceCount); diff --git a/python/paddle/v2/framework/tests/test_lod_tensor_array.py b/python/paddle/v2/framework/tests/test_lod_tensor_array.py new file mode 100644 index 0000000000000000000000000000000000000000..a433bcf622b14a1d2d33b5b98d555e1a21e4b9e8 --- /dev/null +++ b/python/paddle/v2/framework/tests/test_lod_tensor_array.py @@ -0,0 +1,38 @@ +import unittest +import paddle.v2.framework.core as core +import numpy + + +class TestLoDTensorArray(unittest.TestCase): + def test_get_set(self): + scope = core.Scope() + arr = scope.var('tmp_lod_tensor_array') + tensor_array = arr.get_lod_tensor_array() + self.assertEqual(0, len(tensor_array)) + cpu = core.CPUPlace() + for i in xrange(10): + t = core.LoDTensor() + t.set(numpy.array([i], dtype='float32'), cpu) + t.set_lod([[0, 1]]) + tensor_array.append(t) + + self.assertEqual(10, len(tensor_array)) + + for i in xrange(10): + t = tensor_array[i] + self.assertEqual(numpy.array(t), numpy.array([i], dtype='float32')) + self.assertEqual([[0, 1]], t.lod()) + + t = core.LoDTensor() + t.set(numpy.array([i + 10], dtype='float32'), cpu) + t.set_lod([[0, 2]]) + tensor_array[i] = t + t = tensor_array[i] + self.assertEqual( + numpy.array(t), numpy.array( + [i + 10], dtype='float32')) + self.assertEqual([[0, 2]], t.lod()) + + +if __name__ == '__main__': + unittest.main()