diff --git a/paddle/optimizer/CMakeLists.txt b/paddle/optimizer/CMakeLists.txt index 06f6d83efe1155c8cbe76644d6b9efc91349b006..95d7ad720f3c6ab01ef3c8cf0987b710fa39c6ab 100644 --- a/paddle/optimizer/CMakeLists.txt +++ b/paddle/optimizer/CMakeLists.txt @@ -27,3 +27,4 @@ add_dependencies(optimizer gen_proto_cpp) add_simple_unittest(optimizer_test) add_simple_unittest(optimizer_factory_test) +add_simple_unittest(Tensor_test) diff --git a/paddle/optimizer/Tensor.h b/paddle/optimizer/Tensor.h index d779bb50709153dee19dc2709608674875000006..fbfba4806a1c9dba2376ca81950974193f698059 100644 --- a/paddle/optimizer/Tensor.h +++ b/paddle/optimizer/Tensor.h @@ -5,34 +5,42 @@ */ #include -#include "paddle/math/BaseMatrix.h" +#include "paddle/utils/Common.h" +#include "paddle/utils/Logging.h" namespace paddle { namespace optimizer { template -using TensorBase = BaseMatrixT; - -template -class TensorT : public TensorBase { +class TensorT { public: - TensorT(T* data, int size) : TensorBase(1, size, 0, data, false, false) {} + TensorT(size_t h, size_t w, T* data) : height_(h), width_(w), data_(data_) {} + TensorT(T* data, int size) : height_(1), width_(size), data_(data) {} TensorT(const TensorT& t) - : TensorBase(1, t.size(), 0, t.get_buffer(), false, false) {} + : TensorT(1, t.size(), 0, t.get_buffer(), false, false) {} TensorT& operator=(const TensorT& t) { - this->size_ = t.size(); + this->width_ = t.size(); this->data_ = t.get_buffer(); } T* get_buffer() { return this->data_; } T& operator[](const int idx) { CHECK(idx >= 0 && idx < this->width_) << "out of index range"; - return this->data_[idx]; + return data_[idx]; + } + T& operator[](const int idx) const { + CHECK(idx >= 0 && idx < this->width_) << "out of index range"; + return data_[idx]; } // TODO: replace with tensorshape size_t size() const { return this->width_; } + +protected: + size_t height_; + size_t width_; + T* data_; }; -// TODO(zhihong): design problem of dynamic datatype, need to fix +// TODO(zhihong): design problem of dynamic datatype, need to fix it typedef TensorT Tensor; } // namespace optimizer diff --git a/paddle/optimizer/Tensor_test.cpp b/paddle/optimizer/Tensor_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..4b7059f9943a9432fd36566141893216df5b1fca --- /dev/null +++ b/paddle/optimizer/Tensor_test.cpp @@ -0,0 +1,21 @@ +#include "Tensor.h" +#include +#include "gtest/gtest.h" + +using namespace paddle; +using namespace paddle::optimizer; + +TEST(Tensor, indexer) { + real* ptr = new real[3]; + Tensor t(ptr, 3); + for (auto i = 0; i < t.size(); ++i) { + t[i] = i; + } + ASSERT_EQ(t[2], 2); + ASSERT_EQ(t[1], 1); +} + +int main(int argc, char** argv) { + testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/paddle/optimizer/optimizer.cc b/paddle/optimizer/optimizer.cc index fb2e543bf32570c9744296c50256663aee7da4ff..10b3339c2d5a6f57a45f3e93d7badd52bc08c42b 100644 --- a/paddle/optimizer/optimizer.cc +++ b/paddle/optimizer/optimizer.cc @@ -2,6 +2,7 @@ #include #include "parameter_optimizer.h" +using namespace paddle; using namespace paddle::optimizer; template @@ -50,8 +51,8 @@ int paddle_update_parameter(paddle_optimizer* o, const void* grad_buffer, int num_bytes) { // TOOD(zhihong): datatype not work. need to add the runtime datatype - auto grad = reinterpret_cast(grad_buffer); - Tensor gradient(const_cast(grad), num_bytes); + auto grad_type = reinterpret_cast(grad_buffer); + Tensor* gradient = new Tensor(const_cast(grad_type), num_bytes); o->impl->update(gradient); return PADDLE_SUCCESS; } diff --git a/paddle/optimizer/parameter_optimizer.cc b/paddle/optimizer/parameter_optimizer.cc index 6d9fa5c8024013250e6e0b76707c9f0b467f5646..7e4aa42c4bf380fea846837254327f87cc899806 100644 --- a/paddle/optimizer/parameter_optimizer.cc +++ b/paddle/optimizer/parameter_optimizer.cc @@ -1,7 +1,7 @@ #include -#include "adadelta_optimizer.h" -#include "adagrad_optimizer.h" -#include "adam_optimizer.h" +// #include "adadelta_optimizer.h" +// #include "adagrad_optimizer.h" +// #include "adam_optimizer.h" #include "lr_policy.h" #include "sgd_optimizer.h" @@ -36,20 +36,20 @@ ParameterOptimizer *ParameterOptimizer::create( config.sgd().nesterov(), lr); } - if (s == "Adadelta") { - return new AdagradOptimizer( - config.adagrad().epsilon(), config.adagrad().decay(), lr); - } - if (s == "Adagrad") { - return new AdagradOptimizer( - config.adagrad().epsilon(), config.adagrad().decay(), lr); - } - if (s == "Adam") { - return new AdadeltaOptimizer(config.adadelta().rho(), - config.adadelta().epsilon(), - config.adadelta().decay(), - lr); - } + // if (s == "Adadelta") { + // return new AdagradOptimizer( + // config.adagrad().epsilon(), config.adagrad().decay(), lr); + // } + // if (s == "Adagrad") { + // return new AdagradOptimizer( + // config.adagrad().epsilon(), config.adagrad().decay(), lr); + // } + // if (s == "Adam") { + // return new AdadeltaOptimizer(config.adadelta().rho(), + // config.adadelta().epsilon(), + // config.adadelta().decay(), + // lr); + // } // default return new SGDOptimizer(config.sgd().momentum(), config.sgd().decay(), diff --git a/paddle/optimizer/sgd_optmizer.cc b/paddle/optimizer/sgd_optmizer.cc index 03ddc81451715b458b33afdeb10d9bfd24f1b354..020867b93d5df96a292cb4e845703d6c7b1e62f9 100644 --- a/paddle/optimizer/sgd_optmizer.cc +++ b/paddle/optimizer/sgd_optmizer.cc @@ -16,7 +16,8 @@ void SGDOptimizer::set_weight(Tensor *p) { void SGDOptimizer::update(const Tensor &gradient) { num_sample_passed += 1; double learning_rate = lr_policy->get_learning_rate(num_sample_passed); - double velocity = 0.0; + real velocity = 0.0; + Tensor ¶m = *parameter_; for (size_t i = 0; i < parameter_->size(); ++i) { if (momentum == 0.0) { velocity =