diff --git a/paddle/optimizer/sgd_optmizer.cc b/paddle/optimizer/sgd_optmizer.cc index f4fa7756eabbf6a313cae3eddf1f3db49a83152e..a222672815bfd9a84138dd3db4218a12632aecd7 100644 --- a/paddle/optimizer/sgd_optmizer.cc +++ b/paddle/optimizer/sgd_optmizer.cc @@ -5,7 +5,6 @@ namespace paddle { namespace optimizer { void SGDOptimizer::set_weight(Tensor *p) { - // ParameterOptimizer::set_weight(p); parameter_ = p; size_t size = p->size(); // TODO: fix it with align aware allocator bind to Tensor @@ -44,8 +43,6 @@ const char *SGDOptimizer::SerializeState() { state.set_version(kOptimizerVersion); TensorToProto(*parameter_, state.add_data()); TensorToProto(*momentums_, state.add_data()); - // state.add_data(param_proto); - // state.add_data(momentum_proto); state.add_hyperparam(momentum_); return state.SerializeAsString().c_str(); } diff --git a/proto/OptimizerConfig.proto b/proto/OptimizerConfig.proto index f492364a5aadf1e352756ad706da6344799e4d53..16b041a9b257d11cbaa2e07c265053ecdedcaa71 100644 --- a/proto/OptimizerConfig.proto +++ b/proto/OptimizerConfig.proto @@ -87,7 +87,6 @@ message OptimizerState { message OptimizerConfig { // common config of optimizer - // algorithm config, type : string enum Optimizer { SGD = 1; Adadelta = 2; @@ -100,7 +99,6 @@ message OptimizerConfig { optional AdagradConfig adagrad = 5; optional AdamConfig adam = 6; - // learning rate runtime policy config enum LrPolicy { ConstLr = 0; LinearLr = 1;