提交 7000cb61 编写于 作者: Y Yu Yang 提交者: GitHub

Merge pull request #863 from reyoung/feature/clean_sparse_row_matrix

Extract RowBuffer class for SparseRowMatrix.
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <vector>
#include "MemoryHandle.h"
#include "paddle/utils/Util.h"
namespace paddle {
/**
* @brief The RowBuffer class
* Represent the SparseRow Matrix Data.
*
* If not set memory handler, then the data could be auto growth.
*/
class RowBuffer {
public:
/**
* @brief RowBuffer create a auto-growth row buffer. The row length is width.
* @param width the length of each row, a.k.a matrix width.
*/
explicit RowBuffer(size_t width) : width_(width) {}
/**
* @brief RowBuffer create a row buffer, which cannot be auto-growth.
* @param mem the pre-allocated memory.
* @param width the length of each row, a.k.a matrix width.
*/
RowBuffer(const CpuMemHandlePtr& mem, size_t width)
: preallocatedBuf_(mem), width_(width) {}
/**
* @brief resize resize the buffer with rowCount
* @param rowCnt number of row. matrix height.
*/
inline void resize(int rowCnt) {
if (preallocatedBuf_) {
CHECK(preallocatedBuf_->getSize() >= rowCnt * width_ * sizeof(real));
} else {
rowStore_.resize(rowCnt * width_);
}
}
/**
* @brief get a row buffer with row index.
* @param row the index of row.
* @return row buffer.
*/
inline real* get(int row) const {
if (preallocatedBuf_) {
CHECK_LE((row + 1) * width_ * sizeof(real), preallocatedBuf_->getSize());
return reinterpret_cast<real*>(preallocatedBuf_->getBuf()) + row * width_;
} else {
CHECK_LE((row + 1) * width_, rowStore_.size());
return const_cast<real*>(rowStore_.data() + row * width_);
}
}
/**
* @brief get a row buffer with row index. If row index is larger than local
* buffer, the size of local buffer will grow.
* @param row the index of row.
* @return row buffer.
*/
inline real* getWithAutoGrowth(int row) {
if (preallocatedBuf_) {
return get(row);
} else {
if ((rowStore_.size() <= row * width_)) {
rowStore_.resize((row + 1) * width_);
}
return rowStore_.data() + row * width_;
}
}
/**
* @return raw data buffer.
*/
inline real* data() {
if (preallocatedBuf_) {
return reinterpret_cast<real*>(preallocatedBuf_->getBuf());
} else {
return rowStore_.data();
}
}
/**
* @brief clear local buffer. It only affect auto-growth buffer.
*/
inline void clear() { rowStore_.clear(); }
/**
* @brief get current number of rows.
* @return number of rows.
*/
inline size_t getRowCount() const {
if (preallocatedBuf_) {
return preallocatedBuf_->getSize() / sizeof(real) / width_;
} else {
return rowStore_.size() / width_;
}
}
/**
* @brief get is this buffer can automatically grow or not.
* @return ture if can automacitally grow.
*/
inline bool isAutoGrowth() const { return !preallocatedBuf_; }
/**
* @brief return the width of matrix. a.k.a length of row.
* @return width of matrix
*/
inline size_t getWidth() const { return width_; }
private:
//! TODO(yuyang18): Add resize method to CpuMemHandlePtr, then we can get rid
//! of std::vector here.
CpuMemHandlePtr preallocatedBuf_;
std::vector<real, AlignedAllocator<real, 32>> rowStore_;
size_t width_;
};
} // namespace paddle
...@@ -18,6 +18,7 @@ limitations under the License. */ ...@@ -18,6 +18,7 @@ limitations under the License. */
#include <string.h> #include <string.h>
#include <algorithm> #include <algorithm>
#include "Matrix.h" #include "Matrix.h"
#include "RowBuffer.h"
#include "paddle/utils/Util.h" #include "paddle/utils/Util.h"
DECLARE_bool(allow_inefficient_sparse_update); DECLARE_bool(allow_inefficient_sparse_update);
...@@ -45,12 +46,9 @@ public: ...@@ -45,12 +46,9 @@ public:
IndexDictPtr indexDictHandle = nullptr, IndexDictPtr indexDictHandle = nullptr,
bool trans = false) bool trans = false)
: CpuMatrix(nullptr, height, width, trans), : CpuMatrix(nullptr, height, width, trans),
storeMat_(dataHandle,
dataHandle ? dataHandle->getSize() / sizeof(real) / width : 0,
width,
trans),
indexDictHandle_(indexDictHandle) { indexDictHandle_(indexDictHandle) {
init(height, width); init(height, width);
buf_.reset(new RowBuffer(dataHandle, width));
} }
virtual ~SparseRowCpuMatrix() {} virtual ~SparseRowCpuMatrix() {}
...@@ -71,25 +69,16 @@ public: ...@@ -71,25 +69,16 @@ public:
* *
* @param row row id in local storage * @param row row id in local storage
*/ */
real* getLocalRow(size_t row) { real* getLocalRow(size_t row) { return buf_->getWithAutoGrowth(row); }
if (storeMat_.getData()) return storeMat_.rowBuf(row);
if (rowStore_.size() <= row * width_) {
rowStore_.resize((row + 1) * width_);
}
return rowStore_.data() + row * width_;
}
/** /**
* reserve the storage for rows according to current size of indexDictHandle. * reserve the storage for rows according to current size of
* indexDictHandle.
* *
* This is only used when SparseRowCpuMatrix is constructed with * This is only used when SparseRowCpuMatrix is constructed with
* indexDictHandle. * indexDictHandle.
*/ */
void reserveStore() { void reserveStore() { buf_->resize(localIndices_->size()); }
if (!storeMat_.getData() && !localIndices_->empty()) {
rowStore_.resize(localIndices_->size() * width_);
}
}
// row is the row id in the original matrix // row is the row id in the original matrix
virtual real* getRowBuf(size_t row) { return getRow(row); } virtual real* getRowBuf(size_t row) { return getRow(row); }
...@@ -117,7 +106,8 @@ public: ...@@ -117,7 +106,8 @@ public:
* *
* If L1 decay set use L1, else if L2 set use L2, otherwise no decay atall. * If L1 decay set use L1, else if L2 set use L2, otherwise no decay atall.
* *
* t0 is a int vector used by L1/L2 decay, size = height of parameter matrix, * t0 is a int vector used by L1/L2 decay, size = height of parameter
* matrix,
* store the time that each weight row last updated. * store the time that each weight row last updated.
* *
* Time is batchId, currentTime is current batchId. * Time is batchId, currentTime is current batchId.
...@@ -176,8 +166,7 @@ public: ...@@ -176,8 +166,7 @@ public:
protected: protected:
template <typename Func> template <typename Func>
void apply(Func f) { void apply(Func f) {
real* data = storeMat_.getData() ? storeMat_.getData() : rowStore_.data(); f(buf_->data(), localIndices_->size() * width_);
f(data, localIndices_->size() * width_);
} }
void init(size_t height, size_t width); void init(size_t height, size_t width);
...@@ -188,25 +177,24 @@ protected: ...@@ -188,25 +177,24 @@ protected:
globalIndices_[id] = kUnusedId_; globalIndices_[id] = kUnusedId_;
} }
localIndices_->clear(); localIndices_->clear();
rowStore_.clear(); buf_->clear();
} }
inline void checkStoreSize() { inline void checkStoreSize() {
if (storeMat_.getData()) { if (buf_->isAutoGrowth()) {
CHECK_LE(localIndices_->size(), storeMat_.getHeight()); if (buf_->getRowCount() > 0.5 * height_) {
} else if (!FLAGS_allow_inefficient_sparse_update) {
if (localIndices_->size() > 0.5 * height_) {
LOG(WARNING) LOG(WARNING)
<< "There are more than 0.5*height (" << localIndices_->size() << "There are more than 0.5*height (" << localIndices_->size()
<< ") rows are used for sparse " << ") rows are used for sparse "
<< "update, which is not efficient. Considering not use " << "update, which is not efficient. Considering not use "
<< "sparse_update or set --allow_inefficient_sparse_update=true"; << "sparse_update or set --allow_inefficient_sparse_update=true";
} }
} else {
CHECK_LE(localIndices_->size(), buf_->getRowCount());
} }
} }
CpuMatrix storeMat_; std::unique_ptr<RowBuffer> buf_;
std::vector<real, AlignedAllocator<real, 32>> rowStore_;
IndexDictPtr indexDictHandle_; IndexDictPtr indexDictHandle_;
std::vector<unsigned int>* localIndices_; // =&indexDictHandle_->localIndices std::vector<unsigned int>* localIndices_; // =&indexDictHandle_->localIndices
unsigned int* globalIndices_; // =indexDictHandle_->globalIndices.data(); unsigned int* globalIndices_; // =indexDictHandle_->globalIndices.data();
......
...@@ -4,6 +4,7 @@ add_simple_unittest(test_ExecViaCpu) ...@@ -4,6 +4,7 @@ add_simple_unittest(test_ExecViaCpu)
add_simple_unittest(test_SIMDFunctions) add_simple_unittest(test_SIMDFunctions)
add_simple_unittest(test_TrainingAlgorithm) add_simple_unittest(test_TrainingAlgorithm)
add_simple_unittest(test_SparseMatrix) add_simple_unittest(test_SparseMatrix)
add_simple_unittest(test_RowBuffer)
# TODO(yuyang18): Refactor TestUtil.cpp. Remove this cross module reference. # TODO(yuyang18): Refactor TestUtil.cpp. Remove this cross module reference.
add_unittest(test_matrixCompare add_unittest(test_matrixCompare
......
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <gtest/gtest.h>
#include "paddle/math/RowBuffer.h"
TEST(RowBuffer, testAutoGrow) {
paddle::RowBuffer buf(128);
ASSERT_EQ(128, buf.getWidth());
ASSERT_TRUE(buf.isAutoGrowth());
buf.resize(2);
ASSERT_EQ(2, buf.getRowCount());
for (size_t i = 0; i < buf.getWidth() * 2; ++i) {
buf.data()[i] = i;
}
for (size_t i = 0; i < buf.getRowCount(); ++i) {
for (size_t j = 0; j < buf.getWidth(); ++j) {
ASSERT_NEAR(i * buf.getWidth() + j, buf.get(i)[j], 1e-5);
}
}
auto data = buf.getWithAutoGrowth(2);
for (size_t i = 0; i < buf.getWidth(); ++i) {
data[i] = i;
}
ASSERT_EQ(3, buf.getRowCount());
for (size_t i = 0; i < buf.getRowCount() - 1; ++i) {
for (size_t j = 0; j < buf.getWidth(); ++j) {
ASSERT_NEAR(i * buf.getWidth() + j, buf.get(i)[j], 1e-5);
}
}
for (size_t i = 0; i < buf.getWidth(); ++i) {
ASSERT_NEAR(i, buf.get(2)[i], 1e-5);
}
}
TEST(RowBuffer, testWithMemBuf) {
paddle::CpuMemHandlePtr mem =
std::make_shared<paddle::CpuMemoryHandle>(128 * 2 * sizeof(real));
paddle::RowBuffer buf(mem, 128);
ASSERT_TRUE(!buf.isAutoGrowth());
ASSERT_EQ(2, buf.getRowCount());
for (size_t i = 0; i < buf.getWidth() * 2; ++i) {
buf.data()[i] = i;
}
for (size_t i = 0; i < buf.getRowCount(); ++i) {
for (size_t j = 0; j < buf.getWidth(); ++j) {
ASSERT_NEAR(i * buf.getWidth() + j, buf.getWithAutoGrowth(i)[j], 1e-5);
}
}
ASSERT_DEATH_IF_SUPPORTED(buf.getWithAutoGrowth(3), ".*");
}
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册