提交 e148bc15 编写于 作者: D dzhwinter

"remove unused tensor line"

上级 1814fc29
...@@ -84,6 +84,7 @@ function(link_paddle_exe TARGET_NAME) ...@@ -84,6 +84,7 @@ function(link_paddle_exe TARGET_NAME)
paddle_parameter paddle_parameter
paddle_proto paddle_proto
paddle_cuda paddle_cuda
paddle_optimizer
${EXTERNAL_LIBS} ${EXTERNAL_LIBS}
${CMAKE_THREAD_LIBS_INIT} ${CMAKE_THREAD_LIBS_INIT}
${CMAKE_DL_LIBS} ${CMAKE_DL_LIBS}
......
...@@ -9,9 +9,8 @@ set(OPITMIZER_SRCS ...@@ -9,9 +9,8 @@ set(OPITMIZER_SRCS
sgd_optimizer.cc sgd_optimizer.cc
) )
add_library(optimizer STATIC ${OPITMIZER_SRCS}) add_library(paddle_optimizer STATIC ${OPITMIZER_SRCS})
add_dependencies(optimizer gen_proto_cpp) add_dependencies(paddle_optimizer gen_proto_cpp)
add_simple_unittest(serialization_test) add_simple_unittest(serialization_test)
add_simple_unittest(parameter_optimizer_test) add_simple_unittest(parameter_optimizer_test)
add_dependencies(parameter_optimizer_test optimizer)
...@@ -15,13 +15,17 @@ template <class T> ...@@ -15,13 +15,17 @@ template <class T>
class TensorT { class TensorT {
public: public:
TensorT(size_t size) : height_(1), width_(size) { data_ = new T[size]; } TensorT(size_t size) : height_(1), width_(size) { data_ = new T[size]; }
TensorT(T* data, size_t size) : height_(1), width_(size), data_(data) {} TensorT(T* data, size_t size) : height_(1), width_(size), data_(data) {}
TensorT(T* data, size_t h, size_t w) : height_(h), width_(w), data_(data_) {}
TensorT(T* data, size_t h, size_t w) : height_(h), width_(w), data_(data) {}
~TensorT() { ~TensorT() {
if (data_) delete data_; if (data_) delete data_;
} }
T* get_buffer() { return this->data_; } T* get_buffer() { return this->data_; }
T& operator[](const size_t idx) { T& operator[](const size_t idx) {
CHECK(idx >= 0 && idx < this->width_) << "out of index range"; CHECK(idx >= 0 && idx < this->width_) << "out of index range";
return data_[idx]; return data_[idx];
......
...@@ -49,8 +49,8 @@ public: ...@@ -49,8 +49,8 @@ public:
config_.set_lr_policy(OptimizerConfig::ConstLr); config_.set_lr_policy(OptimizerConfig::ConstLr);
config_.mutable_const_lr()->set_learning_rate(0.1); config_.mutable_const_lr()->set_learning_rate(0.1);
ParameterOptimizer* opt = std::string str = config_.SerializeAsString();
ParameterOptimizer::Create(config_.SerializeAsString(), parameter); ParameterOptimizer* opt = ParameterOptimizer::Create(str, parameter);
opts_.push_back(opt); opts_.push_back(opt);
opts_table_[opts_.size()] = OptimizerConfig::SGD; opts_table_[opts_.size()] = OptimizerConfig::SGD;
} }
...@@ -64,8 +64,8 @@ public: ...@@ -64,8 +64,8 @@ public:
config_.mutable_adam()->set_decay(0.0); config_.mutable_adam()->set_decay(0.0);
config_.set_lr_policy(OptimizerConfig::ConstLr); config_.set_lr_policy(OptimizerConfig::ConstLr);
config_.mutable_const_lr()->set_learning_rate(0.1); config_.mutable_const_lr()->set_learning_rate(0.1);
ParameterOptimizer* opt = std::string str = config_.SerializeAsString();
ParameterOptimizer::Create(config_.SerializeAsString(), parameter); ParameterOptimizer* opt = ParameterOptimizer::Create(str, parameter);
opts_.push_back(opt); opts_.push_back(opt);
opts_table_[opts_.size()] = OptimizerConfig::Adam; opts_table_[opts_.size()] = OptimizerConfig::Adam;
} }
......
#pragma once #pragma once
#include <iostream>
#include <sstream> #include <sstream>
#include <string> #include <string>
#include <type_traits> #include <type_traits>
...@@ -16,7 +17,7 @@ static void TensorToProto(const Tensor& tensor, TensorProto* proto) { ...@@ -16,7 +17,7 @@ static void TensorToProto(const Tensor& tensor, TensorProto* proto) {
for (size_t i = 0; i < tensor.size(); ++i) { for (size_t i = 0; i < tensor.size(); ++i) {
os << tensor[i]; os << tensor[i];
proto->add_content(os.str()); proto->add_content(os.str());
os.clear(); os.str(std::string());
} }
} }
...@@ -25,6 +26,7 @@ static void ProtoToTensor(const TensorProto& proto, Tensor* tensor) { ...@@ -25,6 +26,7 @@ static void ProtoToTensor(const TensorProto& proto, Tensor* tensor) {
for (auto i = 0; i < proto.content_size(); ++i) { for (auto i = 0; i < proto.content_size(); ++i) {
sin << proto.content(i); sin << proto.content(i);
sin >> (*tensor)[i]; sin >> (*tensor)[i];
sin.str(std::string());
sin.clear(); sin.clear();
} }
} }
......
...@@ -10,6 +10,7 @@ TEST(TensorToProto, Case1) { ...@@ -10,6 +10,7 @@ TEST(TensorToProto, Case1) {
t[i] = i; t[i] = i;
t1[i] = 0; t1[i] = 0;
} }
TensorProto proto; TensorProto proto;
TensorToProto(t, &proto); TensorToProto(t, &proto);
ProtoToTensor(proto, &t1); ProtoToTensor(proto, &t1);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册