From b4aa0eca4426a9f8358a15cd22577554e6051818 Mon Sep 17 00:00:00 2001 From: dzhwinter Date: Mon, 5 Jun 2017 22:05:55 +0800 Subject: [PATCH] "modify update interface" --- paddle/optimizer/CMakeLists.txt | 1 + paddle/optimizer/Tensor.h | 28 ++++++++++++-------- paddle/optimizer/Tensor_test.cpp | 21 +++++++++++++++ paddle/optimizer/optimizer.cc | 5 ++-- paddle/optimizer/parameter_optimizer.cc | 34 ++++++++++++------------- paddle/optimizer/sgd_optmizer.cc | 3 ++- 6 files changed, 62 insertions(+), 30 deletions(-) create mode 100644 paddle/optimizer/Tensor_test.cpp diff --git a/paddle/optimizer/CMakeLists.txt b/paddle/optimizer/CMakeLists.txt index 06f6d83ef..95d7ad720 100644 --- a/paddle/optimizer/CMakeLists.txt +++ b/paddle/optimizer/CMakeLists.txt @@ -27,3 +27,4 @@ add_dependencies(optimizer gen_proto_cpp) add_simple_unittest(optimizer_test) add_simple_unittest(optimizer_factory_test) +add_simple_unittest(Tensor_test) diff --git a/paddle/optimizer/Tensor.h b/paddle/optimizer/Tensor.h index d779bb507..fbfba4806 100644 --- a/paddle/optimizer/Tensor.h +++ b/paddle/optimizer/Tensor.h @@ -5,34 +5,42 @@ */ #include -#include "paddle/math/BaseMatrix.h" +#include "paddle/utils/Common.h" +#include "paddle/utils/Logging.h" namespace paddle { namespace optimizer { template -using TensorBase = BaseMatrixT; - -template -class TensorT : public TensorBase { +class TensorT { public: - TensorT(T* data, int size) : TensorBase(1, size, 0, data, false, false) {} + TensorT(size_t h, size_t w, T* data) : height_(h), width_(w), data_(data_) {} + TensorT(T* data, int size) : height_(1), width_(size), data_(data) {} TensorT(const TensorT& t) - : TensorBase(1, t.size(), 0, t.get_buffer(), false, false) {} + : TensorT(1, t.size(), 0, t.get_buffer(), false, false) {} TensorT& operator=(const TensorT& t) { - this->size_ = t.size(); + this->width_ = t.size(); this->data_ = t.get_buffer(); } T* get_buffer() { return this->data_; } T& operator[](const int idx) { CHECK(idx >= 0 && idx < this->width_) << "out of index range"; - return this->data_[idx]; + return data_[idx]; + } + T& operator[](const int idx) const { + CHECK(idx >= 0 && idx < this->width_) << "out of index range"; + return data_[idx]; } // TODO: replace with tensorshape size_t size() const { return this->width_; } + +protected: + size_t height_; + size_t width_; + T* data_; }; -// TODO(zhihong): design problem of dynamic datatype, need to fix +// TODO(zhihong): design problem of dynamic datatype, need to fix it typedef TensorT Tensor; } // namespace optimizer diff --git a/paddle/optimizer/Tensor_test.cpp b/paddle/optimizer/Tensor_test.cpp new file mode 100644 index 000000000..4b7059f99 --- /dev/null +++ b/paddle/optimizer/Tensor_test.cpp @@ -0,0 +1,21 @@ +#include "Tensor.h" +#include +#include "gtest/gtest.h" + +using namespace paddle; +using namespace paddle::optimizer; + +TEST(Tensor, indexer) { + real* ptr = new real[3]; + Tensor t(ptr, 3); + for (auto i = 0; i < t.size(); ++i) { + t[i] = i; + } + ASSERT_EQ(t[2], 2); + ASSERT_EQ(t[1], 1); +} + +int main(int argc, char** argv) { + testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/paddle/optimizer/optimizer.cc b/paddle/optimizer/optimizer.cc index fb2e543bf..10b3339c2 100644 --- a/paddle/optimizer/optimizer.cc +++ b/paddle/optimizer/optimizer.cc @@ -2,6 +2,7 @@ #include #include "parameter_optimizer.h" +using namespace paddle; using namespace paddle::optimizer; template @@ -50,8 +51,8 @@ int paddle_update_parameter(paddle_optimizer* o, const void* grad_buffer, int num_bytes) { // TOOD(zhihong): datatype not work. need to add the runtime datatype - auto grad = reinterpret_cast(grad_buffer); - Tensor gradient(const_cast(grad), num_bytes); + auto grad_type = reinterpret_cast(grad_buffer); + Tensor* gradient = new Tensor(const_cast(grad_type), num_bytes); o->impl->update(gradient); return PADDLE_SUCCESS; } diff --git a/paddle/optimizer/parameter_optimizer.cc b/paddle/optimizer/parameter_optimizer.cc index 6d9fa5c80..7e4aa42c4 100644 --- a/paddle/optimizer/parameter_optimizer.cc +++ b/paddle/optimizer/parameter_optimizer.cc @@ -1,7 +1,7 @@ #include -#include "adadelta_optimizer.h" -#include "adagrad_optimizer.h" -#include "adam_optimizer.h" +// #include "adadelta_optimizer.h" +// #include "adagrad_optimizer.h" +// #include "adam_optimizer.h" #include "lr_policy.h" #include "sgd_optimizer.h" @@ -36,20 +36,20 @@ ParameterOptimizer *ParameterOptimizer::create( config.sgd().nesterov(), lr); } - if (s == "Adadelta") { - return new AdagradOptimizer( - config.adagrad().epsilon(), config.adagrad().decay(), lr); - } - if (s == "Adagrad") { - return new AdagradOptimizer( - config.adagrad().epsilon(), config.adagrad().decay(), lr); - } - if (s == "Adam") { - return new AdadeltaOptimizer(config.adadelta().rho(), - config.adadelta().epsilon(), - config.adadelta().decay(), - lr); - } + // if (s == "Adadelta") { + // return new AdagradOptimizer( + // config.adagrad().epsilon(), config.adagrad().decay(), lr); + // } + // if (s == "Adagrad") { + // return new AdagradOptimizer( + // config.adagrad().epsilon(), config.adagrad().decay(), lr); + // } + // if (s == "Adam") { + // return new AdadeltaOptimizer(config.adadelta().rho(), + // config.adadelta().epsilon(), + // config.adadelta().decay(), + // lr); + // } // default return new SGDOptimizer(config.sgd().momentum(), config.sgd().decay(), diff --git a/paddle/optimizer/sgd_optmizer.cc b/paddle/optimizer/sgd_optmizer.cc index 03ddc8145..020867b93 100644 --- a/paddle/optimizer/sgd_optmizer.cc +++ b/paddle/optimizer/sgd_optmizer.cc @@ -16,7 +16,8 @@ void SGDOptimizer::set_weight(Tensor *p) { void SGDOptimizer::update(const Tensor &gradient) { num_sample_passed += 1; double learning_rate = lr_policy->get_learning_rate(num_sample_passed); - double velocity = 0.0; + real velocity = 0.0; + Tensor ¶m = *parameter_; for (size_t i = 0; i < parameter_->size(); ++i) { if (momentum == 0.0) { velocity = -- GitLab