提交 e148bc15 编写于 作者: D dzhwinter

"remove unused tensor line"

上级 1814fc29
......@@ -84,6 +84,7 @@ function(link_paddle_exe TARGET_NAME)
paddle_parameter
paddle_proto
paddle_cuda
paddle_optimizer
${EXTERNAL_LIBS}
${CMAKE_THREAD_LIBS_INIT}
${CMAKE_DL_LIBS}
......
......@@ -9,9 +9,8 @@ set(OPITMIZER_SRCS
sgd_optimizer.cc
)
add_library(optimizer STATIC ${OPITMIZER_SRCS})
add_dependencies(optimizer gen_proto_cpp)
add_library(paddle_optimizer STATIC ${OPITMIZER_SRCS})
add_dependencies(paddle_optimizer gen_proto_cpp)
add_simple_unittest(serialization_test)
add_simple_unittest(parameter_optimizer_test)
add_dependencies(parameter_optimizer_test optimizer)
......@@ -15,13 +15,17 @@ template <class T>
class TensorT {
public:
TensorT(size_t size) : height_(1), width_(size) { data_ = new T[size]; }
TensorT(T* data, size_t size) : height_(1), width_(size), data_(data) {}
TensorT(T* data, size_t h, size_t w) : height_(h), width_(w), data_(data_) {}
TensorT(T* data, size_t h, size_t w) : height_(h), width_(w), data_(data) {}
~TensorT() {
if (data_) delete data_;
}
T* get_buffer() { return this->data_; }
T& operator[](const size_t idx) {
CHECK(idx >= 0 && idx < this->width_) << "out of index range";
return data_[idx];
......
......@@ -49,8 +49,8 @@ public:
config_.set_lr_policy(OptimizerConfig::ConstLr);
config_.mutable_const_lr()->set_learning_rate(0.1);
ParameterOptimizer* opt =
ParameterOptimizer::Create(config_.SerializeAsString(), parameter);
std::string str = config_.SerializeAsString();
ParameterOptimizer* opt = ParameterOptimizer::Create(str, parameter);
opts_.push_back(opt);
opts_table_[opts_.size()] = OptimizerConfig::SGD;
}
......@@ -64,8 +64,8 @@ public:
config_.mutable_adam()->set_decay(0.0);
config_.set_lr_policy(OptimizerConfig::ConstLr);
config_.mutable_const_lr()->set_learning_rate(0.1);
ParameterOptimizer* opt =
ParameterOptimizer::Create(config_.SerializeAsString(), parameter);
std::string str = config_.SerializeAsString();
ParameterOptimizer* opt = ParameterOptimizer::Create(str, parameter);
opts_.push_back(opt);
opts_table_[opts_.size()] = OptimizerConfig::Adam;
}
......
#pragma once
#include <iostream>
#include <sstream>
#include <string>
#include <type_traits>
......@@ -16,7 +17,7 @@ static void TensorToProto(const Tensor& tensor, TensorProto* proto) {
for (size_t i = 0; i < tensor.size(); ++i) {
os << tensor[i];
proto->add_content(os.str());
os.clear();
os.str(std::string());
}
}
......@@ -25,6 +26,7 @@ static void ProtoToTensor(const TensorProto& proto, Tensor* tensor) {
for (auto i = 0; i < proto.content_size(); ++i) {
sin << proto.content(i);
sin >> (*tensor)[i];
sin.str(std::string());
sin.clear();
}
}
......
......@@ -10,6 +10,7 @@ TEST(TensorToProto, Case1) {
t[i] = i;
t1[i] = 0;
}
TensorProto proto;
TensorToProto(t, &proto);
ProtoToTensor(proto, &t1);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册