提交 5a1e678b 编写于 作者: D dzhwinter

"update macro and fix some part"

上级 beb26978
......@@ -14,10 +14,15 @@ namespace optimizer {
template <class T>
class TensorT {
public:
TensorT(size_t h, size_t w, T* data) : height_(h), width_(w), data_(data_) {}
TensorT(T* data, int size) : height_(1), width_(size), data_(data) {}
TensorT(size_t size) : height_(1), width_(size) { data_ = new T[size]; }
TensorT(T* data, size_t size) : height_(1), width_(size), data_(data) {}
TensorT(T* data, size_t h, size_t w) : height_(h), width_(w), data_(data_) {}
TensorT(const TensorT& t)
: TensorT(1, t.size(), 0, t.get_buffer(), false, false) {}
~TensorT() {
if (data_) delete data_;
}
TensorT& operator=(const TensorT& t) {
this->width_ = t.size();
this->data_ = t.get_buffer();
......
......@@ -6,14 +6,12 @@ using namespace paddle;
using namespace paddle::optimizer;
TEST(Tensor, indexer) {
real* ptr = new real[3];
Tensor t(ptr, 3);
Tensor t(3);
for (auto i = 0; i < t.size(); ++i) {
t[i] = i;
}
ASSERT_EQ(t[2], 2);
ASSERT_EQ(t[1], 1);
delete ptr;
}
int main(int argc, char** argv) {
......
......@@ -8,12 +8,9 @@ namespace optimizer {
void AdadeltaOptimizer::set_weight(Tensor* p) {
parameter_ = p;
size_t size = p->size();
real* gptr = new real[size];
accum_gradient_ = new Tensor(gptr, size);
real* dptr = new real[size];
accum_delta_ = new Tensor(dptr, size);
real* dptr_current = new real[size];
update_delta_ = new Tensor(dptr_current, size);
accum_gradient_ = new Tensor(size);
accum_delta_ = new Tensor(size);
update_delta_ = new Tensor(size);
}
void AdadeltaOptimizer::Update(const Tensor* gradient) {
......
......@@ -8,8 +8,7 @@ namespace optimizer {
void AdagradOptimizer::set_weight(Tensor* p) {
parameter_ = p;
size_t size = p->size();
real* gptr = new real[size];
accum_gradient_ = new Tensor(gptr, size);
accum_gradient_ = new Tensor(size);
}
void AdagradOptimizer::Update(const Tensor* gradient) {
......
......@@ -7,10 +7,8 @@ namespace optimizer {
void AdamOptimizer::set_weight(Tensor *p) {
parameter_ = p;
size_t size = p->size();
real *mptr = new real[size];
momentums_ = new Tensor(mptr, size);
real *vptr = new real[size];
velocitys_ = new Tensor(vptr, size);
momentums_ = new Tensor(size);
velocitys_ = new Tensor(size);
}
void AdamOptimizer::Update(const Tensor *gradient) {
......
......@@ -11,8 +11,7 @@ using namespace paddle;
using namespace paddle::optimizer;
Tensor* FillTensor(size_t size) {
real* ptr = new real[size];
Tensor* param = new Tensor(ptr, size);
Tensor* param = new Tensor(size);
Tensor& p = *param;
for (auto i = 0; i < p.size(); ++i) {
p[i] = (float)rand() / (float)RAND_MAX;
......@@ -21,8 +20,7 @@ Tensor* FillTensor(size_t size) {
}
Tensor* FixedTensor(size_t size) {
real* ptr = new real[size];
Tensor* param = new Tensor(ptr, size);
Tensor* param = new Tensor(size);
Tensor& p = *param;
for (auto i = 0; i < p.size(); ++i) {
p[i] = i;
......
......@@ -6,11 +6,10 @@ namespace optimizer {
void SGDOptimizer::set_weight(Tensor *p) {
parameter_ = p;
size_t size = p->size();
// TODO: fix it with align aware allocator bind to Tensor
if (momentum_ != 0.0) {
real *ptr = new real[size];
momentums_ = new Tensor(ptr, size);
size_t size = p->size();
// TODO: fix it with align aware allocator bind to Tensor
momentums_ = new Tensor(size);
}
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册