From 5a1e678ba40f45752ce91701688fa1640ae13f36 Mon Sep 17 00:00:00 2001 From: dzhwinter Date: Tue, 6 Jun 2017 23:52:01 +0800 Subject: [PATCH] "update macro and fix some part" --- paddle/optimizer/Tensor.h | 9 +++++++-- paddle/optimizer/Tensor_test.cpp | 4 +--- paddle/optimizer/adadelta_optimizer.cc | 9 +++------ paddle/optimizer/adagrad_optimizer.cc | 3 +-- paddle/optimizer/adam_optimizer.cc | 6 ++---- paddle/optimizer/parameter_optimizer_test.cpp | 6 ++---- paddle/optimizer/sgd_optmizer.cc | 7 +++---- 7 files changed, 19 insertions(+), 25 deletions(-) diff --git a/paddle/optimizer/Tensor.h b/paddle/optimizer/Tensor.h index 3dbb3ca05de..9f68877e3ba 100644 --- a/paddle/optimizer/Tensor.h +++ b/paddle/optimizer/Tensor.h @@ -14,10 +14,15 @@ namespace optimizer { template class TensorT { public: - TensorT(size_t h, size_t w, T* data) : height_(h), width_(w), data_(data_) {} - TensorT(T* data, int size) : height_(1), width_(size), data_(data) {} + TensorT(size_t size) : height_(1), width_(size) { data_ = new T[size]; } + TensorT(T* data, size_t size) : height_(1), width_(size), data_(data) {} + TensorT(T* data, size_t h, size_t w) : height_(h), width_(w), data_(data_) {} TensorT(const TensorT& t) : TensorT(1, t.size(), 0, t.get_buffer(), false, false) {} + ~TensorT() { + if (data_) delete data_; + } + TensorT& operator=(const TensorT& t) { this->width_ = t.size(); this->data_ = t.get_buffer(); diff --git a/paddle/optimizer/Tensor_test.cpp b/paddle/optimizer/Tensor_test.cpp index 3a21b6d3032..b6a808d6e84 100644 --- a/paddle/optimizer/Tensor_test.cpp +++ b/paddle/optimizer/Tensor_test.cpp @@ -6,14 +6,12 @@ using namespace paddle; using namespace paddle::optimizer; TEST(Tensor, indexer) { - real* ptr = new real[3]; - Tensor t(ptr, 3); + Tensor t(3); for (auto i = 0; i < t.size(); ++i) { t[i] = i; } ASSERT_EQ(t[2], 2); ASSERT_EQ(t[1], 1); - delete ptr; } int main(int argc, char** argv) { diff --git a/paddle/optimizer/adadelta_optimizer.cc b/paddle/optimizer/adadelta_optimizer.cc index 7381f9d40e7..64672da0c04 100644 --- a/paddle/optimizer/adadelta_optimizer.cc +++ b/paddle/optimizer/adadelta_optimizer.cc @@ -8,12 +8,9 @@ namespace optimizer { void AdadeltaOptimizer::set_weight(Tensor* p) { parameter_ = p; size_t size = p->size(); - real* gptr = new real[size]; - accum_gradient_ = new Tensor(gptr, size); - real* dptr = new real[size]; - accum_delta_ = new Tensor(dptr, size); - real* dptr_current = new real[size]; - update_delta_ = new Tensor(dptr_current, size); + accum_gradient_ = new Tensor(size); + accum_delta_ = new Tensor(size); + update_delta_ = new Tensor(size); } void AdadeltaOptimizer::Update(const Tensor* gradient) { diff --git a/paddle/optimizer/adagrad_optimizer.cc b/paddle/optimizer/adagrad_optimizer.cc index e3a9960e150..1698c2abdbb 100644 --- a/paddle/optimizer/adagrad_optimizer.cc +++ b/paddle/optimizer/adagrad_optimizer.cc @@ -8,8 +8,7 @@ namespace optimizer { void AdagradOptimizer::set_weight(Tensor* p) { parameter_ = p; size_t size = p->size(); - real* gptr = new real[size]; - accum_gradient_ = new Tensor(gptr, size); + accum_gradient_ = new Tensor(size); } void AdagradOptimizer::Update(const Tensor* gradient) { diff --git a/paddle/optimizer/adam_optimizer.cc b/paddle/optimizer/adam_optimizer.cc index ae96b30b948..d052ac8f9a1 100644 --- a/paddle/optimizer/adam_optimizer.cc +++ b/paddle/optimizer/adam_optimizer.cc @@ -7,10 +7,8 @@ namespace optimizer { void AdamOptimizer::set_weight(Tensor *p) { parameter_ = p; size_t size = p->size(); - real *mptr = new real[size]; - momentums_ = new Tensor(mptr, size); - real *vptr = new real[size]; - velocitys_ = new Tensor(vptr, size); + momentums_ = new Tensor(size); + velocitys_ = new Tensor(size); } void AdamOptimizer::Update(const Tensor *gradient) { diff --git a/paddle/optimizer/parameter_optimizer_test.cpp b/paddle/optimizer/parameter_optimizer_test.cpp index 2b3ad84ca95..d9f2ed8e950 100644 --- a/paddle/optimizer/parameter_optimizer_test.cpp +++ b/paddle/optimizer/parameter_optimizer_test.cpp @@ -11,8 +11,7 @@ using namespace paddle; using namespace paddle::optimizer; Tensor* FillTensor(size_t size) { - real* ptr = new real[size]; - Tensor* param = new Tensor(ptr, size); + Tensor* param = new Tensor(size); Tensor& p = *param; for (auto i = 0; i < p.size(); ++i) { p[i] = (float)rand() / (float)RAND_MAX; @@ -21,8 +20,7 @@ Tensor* FillTensor(size_t size) { } Tensor* FixedTensor(size_t size) { - real* ptr = new real[size]; - Tensor* param = new Tensor(ptr, size); + Tensor* param = new Tensor(size); Tensor& p = *param; for (auto i = 0; i < p.size(); ++i) { p[i] = i; diff --git a/paddle/optimizer/sgd_optmizer.cc b/paddle/optimizer/sgd_optmizer.cc index a222672815b..b40bf7c1020 100644 --- a/paddle/optimizer/sgd_optmizer.cc +++ b/paddle/optimizer/sgd_optmizer.cc @@ -6,11 +6,10 @@ namespace optimizer { void SGDOptimizer::set_weight(Tensor *p) { parameter_ = p; - size_t size = p->size(); - // TODO: fix it with align aware allocator bind to Tensor if (momentum_ != 0.0) { - real *ptr = new real[size]; - momentums_ = new Tensor(ptr, size); + size_t size = p->size(); + // TODO: fix it with align aware allocator bind to Tensor + momentums_ = new Tensor(size); } } -- GitLab