提交 f03811b0 编写于 作者: Q qijun

init refine LODTensor

上级 01f10e64
...@@ -19,25 +19,24 @@ ...@@ -19,25 +19,24 @@
namespace paddle { namespace paddle {
namespace framework { namespace framework {
LODTensor::LOD LODTensor::LOD::SliceLevels(size_t level_begin, LOD SliceLevels(const LOD& in, size_t level_begin, size_t level_end) {
size_t level_end) const {
LOD new_lod; LOD new_lod;
new_lod.reserve(level_end - level_begin); new_lod.reserve(level_end - level_begin);
for (size_t i = level_begin; i < level_end; i++) { for (size_t i = level_begin; i < level_end; i++) {
new_lod.emplace_back(at(i)); new_lod.emplace_back(in.at(i));
} }
return new_lod; return new_lod;
} }
LODTensor::LOD LODTensor::LOD::SliceInLevel(size_t level, size_t elem_begin, LOD SliceInLevel(const LOD& in, size_t level, size_t elem_begin,
size_t elem_end) const { size_t elem_end) {
// slice the lod. // slice the lod.
LOD new_lod; LOD new_lod;
new_lod.reserve(size() - level); new_lod.reserve(in.size() - level);
auto start = this->at(level)[elem_begin]; auto start = in.at(level)[elem_begin];
auto end = this->at(level)[elem_end]; auto end = in.at(level)[elem_end];
for (auto it = this->begin() + level; it != this->end(); it++) { for (auto it = in.begin() + level; it != in.end(); it++) {
auto it_begin = std::find(it->begin(), it->end(), start); auto it_begin = std::find(it->begin(), it->end(), start);
auto it_end = std::find(it_begin, it->end(), end); auto it_end = std::find(it_begin, it->end(), end);
PADDLE_ENFORCE(it_begin != it->end(), "error in parsing lod info"); PADDLE_ENFORCE(it_begin != it->end(), "error in parsing lod info");
...@@ -49,11 +48,11 @@ LODTensor::LOD LODTensor::LOD::SliceInLevel(size_t level, size_t elem_begin, ...@@ -49,11 +48,11 @@ LODTensor::LOD LODTensor::LOD::SliceInLevel(size_t level, size_t elem_begin,
[start](int v) { return v - start; }); [start](int v) { return v - start; });
PADDLE_ENFORCE_EQ(new_lod.back().front(), 0, "error in slice LOD"); PADDLE_ENFORCE_EQ(new_lod.back().front(), 0, "error in slice LOD");
} }
PADDLE_ENFORCE_LE(new_lod.size(), this->size()); PADDLE_ENFORCE_LE(new_lod.size(), in.size());
return new_lod; return new_lod;
} }
bool operator==(const LODTensor::LOD& a, const LODTensor::LOD& b) { bool operator==(const LOD& a, const LOD& b) {
if (a.size() != b.size()) { if (a.size() != b.size()) {
return false; return false;
} }
...@@ -70,9 +69,27 @@ bool operator==(const LODTensor::LOD& a, const LODTensor::LOD& b) { ...@@ -70,9 +69,27 @@ bool operator==(const LODTensor::LOD& a, const LODTensor::LOD& b) {
} }
} }
} }
return true; return true;
} }
void LODTensor::SliceLevels(size_t level_begin, size_t level_end) {
auto new_lod = framework::SliceLevels(lod_, level_begin, level_end);
lod_ = new_lod;
}
void LODTensor::SliceInLevel(size_t level, size_t elem_begin, size_t elem_end) {
PADDLE_ENFORCE(level < NumLevels(), "level [%d] out of range [%d]", level,
NumLevels());
PADDLE_ENFORCE(elem_begin < NumElements(level),
"element begin [%d] out of range [%d]", elem_begin,
NumElements(level));
PADDLE_ENFORCE(elem_end < NumElements(level) + 1,
"element end [%d] out of range [%d]", elem_end,
NumElements(level));
auto new_lod = framework::SliceInLevel(lod_, level, elem_begin, elem_end);
lod_ = new_lod;
}
} // namespace framework } // namespace framework
} // namespace paddle } // namespace paddle
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
#pragma once #pragma once
#include <memory> #include <memory>
#if !defined(PADDLE_ONLY_CPU) #ifndef PADDLE_ONLY_CPU
#include <thrust/device_vector.h> #include <thrust/device_vector.h>
#include <thrust/host_vector.h> #include <thrust/host_vector.h>
#endif #endif
...@@ -27,33 +27,31 @@ ...@@ -27,33 +27,31 @@
namespace paddle { namespace paddle {
namespace framework { namespace framework {
#ifdef PADDLE_ONLY_CPU
template <typename T>
using Vector = std::vector<T>;
#else
template <typename T>
using Vector = thrust::host_vector<T>;
#endif
using LOD = std::vector<Vector<size_t>>;
LOD SliceLevels(const LOD& in, size_t level_begin, size_t level_end);
LOD SliceInLevel(const LOD& in, size_t level, size_t elem_begin,
size_t elem_end);
bool operator==(const LOD& a, const LOD& b);
/* /*
* LODTensor (Level of details Tensor) * LODTensor (Level of details Tensor)
* see https://en.wikipedia.org/wiki/Level_of_details for reference. * see https://en.wikipedia.org/wiki/Level_of_details for reference.
*/ */
class LODTensor : public Tensor { struct LODTensor {
public: public:
// Level save offsets of each unit.
#ifdef PADDLE_ONLY_CPU
template <typename T>
using Vector = std::vector<T>;
#else
template <typename T>
using Vector = thrust::host_vector<T>;
#endif
// LoD stores offsets of each level of units, the largest units level first,
// then the smaller units level. Each Level stores the offsets of units in
// Tesor.
class LOD : public std::vector<Vector<size_t>> {
public:
LOD SliceLevels(size_t level_begin, size_t level_end) const;
LOD SliceInLevel(size_t level, size_t elem_begin, size_t elem_end) const;
};
LODTensor() {} LODTensor() {}
explicit LODTensor(const LOD &lod) : lod_(lod) {} LODTensor(const LOD& lod, Tensor* t) : lod_(lod), tensor_(t) {}
virtual Tensor *Clone() const { return new LODTensor(lod_); }
/* /*
* Get a element from LOD. * Get a element from LOD.
...@@ -79,71 +77,23 @@ class LODTensor : public Tensor { ...@@ -79,71 +77,23 @@ class LODTensor : public Tensor {
PADDLE_ENFORCE(level < NumLevels(), "level [%d] out of range [%d]", level, PADDLE_ENFORCE(level < NumLevels(), "level [%d] out of range [%d]", level,
NumLevels()); NumLevels());
// the last offset is the end of last element // the last offset is the end of last element
return lod_[level].size() - 1; return (lod_)[level].size() - 1;
} }
/* /*
* Slice of levels[level_begin:level_end], with tensor shared. * Slice of levels[level_begin:level_end]
*/ */
template <typename T> void SliceLevels(size_t level_begin, size_t level_end);
LODTensor SliceLevels(size_t level_begin, size_t level_end) const;
/* /*
* Slice of elements of a level, [elem_begin: elem_end], with tensor shared. * Slice of elements of a level, [elem_begin: elem_end]
* @note: low performance in slice lod_. * @note: low performance in slice lod_.
*/ */
template <typename T> void SliceInLevel(size_t level, size_t elem_begin, size_t elem_end);
LODTensor SliceInLevel(size_t level, size_t elem_begin,
size_t elem_end) const;
/*
* Copy other's lod_'s content, free to mutate.
*/
void CopyLOD(const LODTensor &other) { lod_ = other.lod_; }
/*
* Determine whether LODTensor has a valid LOD info.
*/
const LOD &lod() const { return lod_; }
LOD *mutable_lod() { return &lod_; }
virtual ~LODTensor() {}
private: public:
LOD lod_; LOD lod_;
Tensor* tensor_; // not owned
}; };
bool operator==(const LODTensor::LOD &a, const LODTensor::LOD &b);
template <typename T>
LODTensor LODTensor::SliceLevels(size_t level_begin, size_t level_end) const {
auto new_lod = lod_.SliceLevels(level_begin, level_end);
// slice levels just need to update LOD info, each level will contains the
// whole tensor_, so no need to modify tensor_.
LODTensor new_tensor(new_lod);
new_tensor.ShareDataWith<T>(*this);
return new_tensor;
}
template <typename T>
LODTensor LODTensor::SliceInLevel(size_t level, size_t elem_begin,
size_t elem_end) const {
PADDLE_ENFORCE(level < NumLevels(), "level [%d] out of range [%d]", level,
NumLevels());
PADDLE_ENFORCE(elem_begin < NumElements(level),
"element begin [%d] out of range [%d]", elem_begin,
NumElements(level));
PADDLE_ENFORCE(elem_end < NumElements(level) + 1,
"element end [%d] out of range [%d]", elem_end,
NumElements(level));
auto new_lod = lod_.SliceInLevel(level, elem_begin, elem_end);
// slice elements just need to update LOD info, because offsets are not
// changed, so the original tensor_ can be reused.
LODTensor new_tensor(new_lod);
new_tensor.ShareDataWith<T>(*this);
return new_tensor;
}
} // namespace framework } // namespace framework
} // namespace paddle } // namespace paddle
...@@ -21,16 +21,34 @@ ...@@ -21,16 +21,34 @@
namespace paddle { namespace paddle {
namespace framework { namespace framework {
// TEST(LODTensor, test) {
// LOD lod;
// lod.push_back(std::vector<size_t>{0, 10, 20});
// lod.push_back(std::vector<size_t>{0, 5, 10, 15, 20});
// lod.push_back(std::vector<size_t>{0, 2, 5, 7, 10, 12, 15, 17, 20});
// ASSERT_EQ(lod.size(), 3UL);
// Tensor tensor;
// tensor.Resize({20 /*batch size*/, 128 /*dim*/});
// // malloc memory
// platform::CPUPlace place;
// tensor.mutable_data<float>(place);
// LODTensor lod_tensor(lod, &tensor);
// ASSERT_EQ(lod_tensor.NumLevels(), 3UL);
// }
class LODTensorTester : public ::testing::Test { class LODTensorTester : public ::testing::Test {
public: public:
virtual void SetUp() override { virtual void SetUp() override {
lod_tensor.reset(new LODTensor);
// tensor's batch_size: 30 // tensor's batch_size: 30
// 3 levels // 3 levels
// 0 10 20 // 0 10 20
// 0 5 10 15 20 // 0 5 10 15 20
// 0 2 5 7 10 12 15 20 // 0 2 5 7 10 12 15 20
LODTensor::LOD lod; LOD lod;
lod.push_back(std::vector<size_t>{0, 10, 20}); lod.push_back(std::vector<size_t>{0, 10, 20});
lod.push_back(std::vector<size_t>{0, 5, 10, 15, 20}); lod.push_back(std::vector<size_t>{0, 5, 10, 15, 20});
lod.push_back(std::vector<size_t>{0, 2, 5, 7, 10, 12, 15, 17, 20}); lod.push_back(std::vector<size_t>{0, 2, 5, 7, 10, 12, 15, 17, 20});
...@@ -41,75 +59,65 @@ class LODTensorTester : public ::testing::Test { ...@@ -41,75 +59,65 @@ class LODTensorTester : public ::testing::Test {
// malloc memory // malloc memory
tensor.mutable_data<float>(place); tensor.mutable_data<float>(place);
lod_tensor.reset(new LODTensor(lod)); lod_tensor.lod_ = lod;
lod_tensor->Resize({20 /*batch size*/, 128 /*dim*/}); lod_tensor.tensor_ = &tensor;
lod_tensor->ShareDataWith<float>(tensor);
// lod_tensor->ShareDataWith<Tensor>(tensor);
} }
protected: protected:
std::unique_ptr<LODTensor> lod_tensor;
platform::CPUPlace place; platform::CPUPlace place;
Tensor tensor; Tensor tensor;
LODTensor lod_tensor;
}; };
TEST_F(LODTensorTester, NumLevels) { ASSERT_EQ(lod_tensor->NumLevels(), 3UL); } TEST_F(LODTensorTester, NumLevels) { ASSERT_EQ(lod_tensor.NumLevels(), 3UL); }
TEST_F(LODTensorTester, NumElements) { TEST_F(LODTensorTester, NumElements) {
ASSERT_EQ(lod_tensor->NumElements(0), 2UL); ASSERT_EQ(lod_tensor.NumElements(0), 2UL);
ASSERT_EQ(lod_tensor->NumElements(1), 4UL); ASSERT_EQ(lod_tensor.NumElements(1), 4UL);
ASSERT_EQ(lod_tensor->NumElements(2), 8UL); ASSERT_EQ(lod_tensor.NumElements(2), 8UL);
} }
TEST_F(LODTensorTester, SliceLevels) { TEST_F(LODTensorTester, SliceLevels) {
// slice 1 level // slice 1 level
for (size_t level = 0; level < 3UL; ++level) { for (size_t level = 0; level < 3UL; ++level) {
auto new_lod_tensor = lod_tensor->SliceLevels<float>(level, level + 1); LODTensor new_lod_tensor = lod_tensor;
new_lod_tensor.SliceLevels(level, level + 1);
ASSERT_EQ(new_lod_tensor.NumLevels(), 1UL); ASSERT_EQ(new_lod_tensor.NumLevels(), 1UL);
ASSERT_EQ(new_lod_tensor.NumElements(0UL), lod_tensor->NumElements(level)); ASSERT_EQ(new_lod_tensor.NumElements(0), lod_tensor.NumElements(level));
// ASSERT_EQ(new_lod_tensor, *lod_tensor); ASSERT_EQ(new_lod_tensor.tensor_->data<float>(),
lod_tensor.tensor_->data<float>());
} }
// slice 2 level // slice 2 level
for (size_t level = 0; level < 2UL; ++level) { for (size_t level = 0; level < 2UL; ++level) {
auto new_lod_tensor = lod_tensor->SliceLevels<float>(level, level + 2); LODTensor new_lod_tensor = lod_tensor;
new_lod_tensor.SliceLevels(level, level + 2);
ASSERT_EQ(new_lod_tensor.NumLevels(), 2UL); ASSERT_EQ(new_lod_tensor.NumLevels(), 2UL);
ASSERT_EQ(new_lod_tensor.NumElements(0), lod_tensor->NumElements(level)); ASSERT_EQ(new_lod_tensor.NumElements(0), lod_tensor.NumElements(level));
ASSERT_EQ(new_lod_tensor.NumElements(1), ASSERT_EQ(new_lod_tensor.NumElements(1), lod_tensor.NumElements(level + 1));
lod_tensor->NumElements(level + 1)); ASSERT_EQ(new_lod_tensor.tensor_->data<float>(),
ASSERT_EQ(new_lod_tensor.data<float>(), lod_tensor->data<float>()); lod_tensor.tensor_->data<float>());
} }
} }
TEST_F(LODTensorTester, SliceInLevel) { TEST_F(LODTensorTester, SliceInLevel) {
size_t level = 0; size_t level = 0;
auto new_lod_tensor = lod_tensor->SliceInLevel<float>(level, 0, 2); LODTensor new_lod_tensor = lod_tensor;
new_lod_tensor.SliceInLevel(level, 0, 2);
EXPECT_EQ(new_lod_tensor.NumLevels(), 3UL); EXPECT_EQ(new_lod_tensor.NumLevels(), 3UL);
EXPECT_EQ(new_lod_tensor.NumElements(0), 2UL); EXPECT_EQ(new_lod_tensor.NumElements(0), 2UL);
EXPECT_EQ(new_lod_tensor.NumElements(1), 4UL); EXPECT_EQ(new_lod_tensor.NumElements(1), 4UL);
EXPECT_EQ(new_lod_tensor.NumElements(2), 8UL); EXPECT_EQ(new_lod_tensor.NumElements(2), 8UL);
ASSERT_EQ(new_lod_tensor.data<float>(), lod_tensor->data<float>()); ASSERT_EQ(new_lod_tensor.tensor_->data<float>(),
lod_tensor.tensor_->data<float>());
level = 1; level = 1;
new_lod_tensor = lod_tensor->SliceInLevel<float>(level, 0, 2); new_lod_tensor = lod_tensor;
new_lod_tensor.SliceInLevel(level, 0, 2);
ASSERT_EQ(new_lod_tensor.NumLevels(), 2UL); ASSERT_EQ(new_lod_tensor.NumLevels(), 2UL);
ASSERT_EQ(new_lod_tensor.NumElements(0), 2UL); ASSERT_EQ(new_lod_tensor.NumElements(0), 2UL);
ASSERT_EQ(new_lod_tensor.NumElements(1), 4UL); ASSERT_EQ(new_lod_tensor.NumElements(1), 4UL);
ASSERT_EQ(new_lod_tensor.data<float>(), lod_tensor->data<float>()); ASSERT_EQ(new_lod_tensor.tensor_->data<float>(),
} lod_tensor.tensor_->data<float>());
TEST_F(LODTensorTester, ShareLOD) {
LODTensor new_lod_tensor;
new_lod_tensor.CopyLOD(*lod_tensor);
ASSERT_EQ(new_lod_tensor.lod(), lod_tensor->lod());
}
TEST_F(LODTensorTester, CopyLOD) {
LODTensor new_lod_tensor;
new_lod_tensor.CopyLOD(*lod_tensor);
bool equals = std::equal(lod_tensor->lod().begin(), lod_tensor->lod().end(),
new_lod_tensor.lod().begin());
ASSERT_TRUE(equals);
} }
} // namespace framework } // namespace framework
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册