未验证 提交 2be4c3cb 编写于 作者: Y Yu Yang 提交者: GitHub

Feature/lod tensor array (#5379)

* Use stable_sort in lod_rank_table

It is easy to debug and test when use `stable_sort`and the time
complexity is not changed.

* Add LoDTensorArray
上级 cb0118f3
...@@ -23,6 +23,7 @@ limitations under the License. */ ...@@ -23,6 +23,7 @@ limitations under the License. */
#include "paddle/framework/feed_fetch_type.h" #include "paddle/framework/feed_fetch_type.h"
#include "paddle/framework/lod_rank_table.h" #include "paddle/framework/lod_rank_table.h"
#include "paddle/framework/lod_tensor.h" #include "paddle/framework/lod_tensor.h"
#include "paddle/framework/lod_tensor_array.h"
#include "paddle/framework/op_registry.h" #include "paddle/framework/op_registry.h"
#include "paddle/framework/scope.h" #include "paddle/framework/scope.h"
...@@ -73,6 +74,8 @@ static void CreateTensor(Variable* var, VarDesc::VarType var_type) { ...@@ -73,6 +74,8 @@ static void CreateTensor(Variable* var, VarDesc::VarType var_type) {
var->GetMutable<std::vector<framework::Scope>>(); var->GetMutable<std::vector<framework::Scope>>();
} else if (var_type == VarDesc::LOD_RANK_TABLE) { } else if (var_type == VarDesc::LOD_RANK_TABLE) {
var->GetMutable<LoDRankTable>(); var->GetMutable<LoDRankTable>();
} else if (var_type == VarDesc::LOD_TENSOR_ARRAY) {
var->GetMutable<LoDTensorArray>();
} else { } else {
PADDLE_THROW( PADDLE_THROW(
"Variable type %d is not in " "Variable type %d is not in "
......
...@@ -109,6 +109,11 @@ message LoDTensorDesc { ...@@ -109,6 +109,11 @@ message LoDTensorDesc {
optional int32 lod_level = 2 [ default = 0 ]; optional int32 lod_level = 2 [ default = 0 ];
} }
message LoDTensorArrayDesc {
required TensorDesc tensor = 1;
optional int32 lod_level = 2 [ default = 0 ];
}
message VarDesc { message VarDesc {
enum VarType { enum VarType {
LOD_TENSOR = 1; LOD_TENSOR = 1;
...@@ -117,11 +122,13 @@ message VarDesc { ...@@ -117,11 +122,13 @@ message VarDesc {
FETCH_LIST = 4; FETCH_LIST = 4;
STEP_SCOPES = 5; STEP_SCOPES = 5;
LOD_RANK_TABLE = 6; LOD_RANK_TABLE = 6;
LOD_TENSOR_ARRAY = 7;
} }
required string name = 1; required string name = 1;
required VarType type = 2; required VarType type = 2;
optional LoDTensorDesc lod_tensor = 3; optional LoDTensorDesc lod_tensor = 3;
optional TensorDesc selected_rows = 4; optional TensorDesc selected_rows = 4;
optional LoDTensorArrayDesc tensor_array = 6;
optional bool persistable = 5 [ default = false ]; optional bool persistable = 5 [ default = false ];
} }
......
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <vector>
#include "paddle/framework/lod_tensor.h"
namespace paddle {
namespace framework {
using LoDTensorArray = std::vector<LoDTensor>;
}
} // namespace paddle
...@@ -37,13 +37,27 @@ std::vector<int64_t> VarDescBind::Shape() const { ...@@ -37,13 +37,27 @@ std::vector<int64_t> VarDescBind::Shape() const {
DataType VarDescBind::GetDataType() const { return tensor_desc().data_type(); } DataType VarDescBind::GetDataType() const { return tensor_desc().data_type(); }
void VarDescBind::SetLoDLevel(int32_t lod_level) { void VarDescBind::SetLoDLevel(int32_t lod_level) {
PADDLE_ENFORCE(desc_.type() == VarDesc::LOD_TENSOR); switch (desc_.type()) {
desc_.mutable_lod_tensor()->set_lod_level(lod_level); case VarDesc::LOD_TENSOR:
desc_.mutable_lod_tensor()->set_lod_level(lod_level);
break;
case VarDesc::LOD_TENSOR_ARRAY:
desc_.mutable_tensor_array()->set_lod_level(lod_level);
break;
default:
PADDLE_THROW("Tensor type=%d does not support LoDLevel", desc_.type());
}
} }
int32_t VarDescBind::GetLodLevel() const { int32_t VarDescBind::GetLodLevel() const {
PADDLE_ENFORCE(desc_.type() == VarDesc::LOD_TENSOR); switch (desc_.type()) {
return desc_.lod_tensor().lod_level(); case VarDesc::LOD_TENSOR:
return desc_.lod_tensor().lod_level();
case VarDesc::LOD_TENSOR_ARRAY:
return desc_.tensor_array().lod_level();
default:
PADDLE_THROW("Tensor type=%d does not support LoDLevel", desc_.type());
}
} }
const TensorDesc &VarDescBind::tensor_desc() const { const TensorDesc &VarDescBind::tensor_desc() const {
...@@ -53,6 +67,8 @@ const TensorDesc &VarDescBind::tensor_desc() const { ...@@ -53,6 +67,8 @@ const TensorDesc &VarDescBind::tensor_desc() const {
return desc_.selected_rows(); return desc_.selected_rows();
case VarDesc::LOD_TENSOR: case VarDesc::LOD_TENSOR:
return desc_.lod_tensor().tensor(); return desc_.lod_tensor().tensor();
case VarDesc::LOD_TENSOR_ARRAY:
return desc_.tensor_array().tensor();
default: default:
PADDLE_THROW("Unexpected branch."); PADDLE_THROW("Unexpected branch.");
} }
...@@ -66,6 +82,8 @@ TensorDesc *VarDescBind::mutable_tensor_desc() { ...@@ -66,6 +82,8 @@ TensorDesc *VarDescBind::mutable_tensor_desc() {
return desc_.mutable_selected_rows(); return desc_.mutable_selected_rows();
case VarDesc::LOD_TENSOR: case VarDesc::LOD_TENSOR:
return desc_.mutable_lod_tensor()->mutable_tensor(); return desc_.mutable_lod_tensor()->mutable_tensor();
case VarDesc::LOD_TENSOR_ARRAY:
return desc_.mutable_tensor_array()->mutable_tensor();
default: default:
PADDLE_THROW("Unexpected branch."); PADDLE_THROW("Unexpected branch.");
} }
......
...@@ -239,7 +239,8 @@ void BindVarDsec(py::module &m) { ...@@ -239,7 +239,8 @@ void BindVarDsec(py::module &m) {
.value("FEED_MINIBATCH", VarDesc::FEED_MINIBATCH) .value("FEED_MINIBATCH", VarDesc::FEED_MINIBATCH)
.value("FETCH_LIST", VarDesc::FETCH_LIST) .value("FETCH_LIST", VarDesc::FETCH_LIST)
.value("STEP_SCOPES", VarDesc::STEP_SCOPES) .value("STEP_SCOPES", VarDesc::STEP_SCOPES)
.value("LOD_RANK_TABLE", VarDesc::LOD_RANK_TABLE); .value("LOD_RANK_TABLE", VarDesc::LOD_RANK_TABLE)
.value("LOD_TENSOR_ARRAY", VarDesc::LOD_TENSOR_ARRAY);
} }
void BindOpDesc(py::module &m) { void BindOpDesc(py::module &m) {
......
...@@ -23,6 +23,7 @@ limitations under the License. */ ...@@ -23,6 +23,7 @@ limitations under the License. */
#include "paddle/framework/framework.pb.h" #include "paddle/framework/framework.pb.h"
#include "paddle/framework/lod_rank_table.h" #include "paddle/framework/lod_rank_table.h"
#include "paddle/framework/lod_tensor.h" #include "paddle/framework/lod_tensor.h"
#include "paddle/framework/lod_tensor_array.h"
#include "paddle/framework/prune.h" #include "paddle/framework/prune.h"
#include "paddle/framework/selected_rows.h" #include "paddle/framework/selected_rows.h"
#include "paddle/framework/tensor_array.h" #include "paddle/framework/tensor_array.h"
...@@ -233,6 +234,9 @@ All parameter, weight, gradient are variables in Paddle. ...@@ -233,6 +234,9 @@ All parameter, weight, gradient are variables in Paddle.
return self.GetMutable<SelectedRows>(); return self.GetMutable<SelectedRows>();
}, },
py::return_value_policy::reference) py::return_value_policy::reference)
.def("get_lod_tensor_array",
[](Variable &self) { return self.GetMutable<LoDTensorArray>(); },
py::return_value_policy::reference)
#ifdef PADDLE_WITH_CUDA #ifdef PADDLE_WITH_CUDA
.def("get_communicator", .def("get_communicator",
[](Variable &self) -> platform::Communicator * { [](Variable &self) -> platform::Communicator * {
...@@ -505,6 +509,23 @@ All parameter, weight, gradient are variables in Paddle. ...@@ -505,6 +509,23 @@ All parameter, weight, gradient are variables in Paddle.
return res; return res;
}); });
py::class_<LoDTensorArray>(m, "LoDTensorArray")
.def("__getitem__",
[](LoDTensorArray &self, size_t i) { return &self.at(i); },
py::return_value_policy::reference)
.def("__len__", [](LoDTensorArray &self) { return self.size(); })
.def("__setitem__",
[](LoDTensorArray &self, size_t i, const LoDTensor &t) {
PADDLE_ENFORCE_LT(i, self.size());
self[i].ShareDataWith(t);
self[i].set_lod(t.lod());
})
.def("append", [](LoDTensorArray &self, const LoDTensor &t) {
self.emplace_back();
self.back().ShareDataWith(t);
self.back().set_lod(t.lod());
});
m.def("op_support_gpu", OpSupportGPU); m.def("op_support_gpu", OpSupportGPU);
#ifdef PADDLE_WITH_CUDA #ifdef PADDLE_WITH_CUDA
m.def("get_cuda_device_count", platform::GetCUDADeviceCount); m.def("get_cuda_device_count", platform::GetCUDADeviceCount);
......
import unittest
import paddle.v2.framework.core as core
import numpy
class TestLoDTensorArray(unittest.TestCase):
def test_get_set(self):
scope = core.Scope()
arr = scope.var('tmp_lod_tensor_array')
tensor_array = arr.get_lod_tensor_array()
self.assertEqual(0, len(tensor_array))
cpu = core.CPUPlace()
for i in xrange(10):
t = core.LoDTensor()
t.set(numpy.array([i], dtype='float32'), cpu)
t.set_lod([[0, 1]])
tensor_array.append(t)
self.assertEqual(10, len(tensor_array))
for i in xrange(10):
t = tensor_array[i]
self.assertEqual(numpy.array(t), numpy.array([i], dtype='float32'))
self.assertEqual([[0, 1]], t.lod())
t = core.LoDTensor()
t.set(numpy.array([i + 10], dtype='float32'), cpu)
t.set_lod([[0, 2]])
tensor_array[i] = t
t = tensor_array[i]
self.assertEqual(
numpy.array(t), numpy.array(
[i + 10], dtype='float32'))
self.assertEqual([[0, 2]], t.lod())
if __name__ == '__main__':
unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册