提交 886e66a5 编写于 作者: 武毅 提交者: GitHub

golang pserver use OptimizerConfig.proto (#3358)

* golang pserver optimizer config for user

* update

* update

* update

* update

* update by comments

* fix errors

* fix errors
上级 a277903d
......@@ -17,12 +17,10 @@ def main():
# network config
x = paddle.layer.data(name='x', type=paddle.data_type.dense_vector(13))
y_predict = paddle.layer.fc(input=x,
param_attr=paddle.attr.Param(
name='w', learning_rate=1e-3),
param_attr=paddle.attr.Param(name='w'),
size=1,
act=paddle.activation.Linear(),
bias_attr=paddle.attr.Param(
name='b', learning_rate=1e-3))
bias_attr=paddle.attr.Param(name='b'))
y = paddle.layer.data(name='y', type=paddle.data_type.dense_vector(1))
cost = paddle.layer.mse_cost(input=y_predict, label=y)
......
......@@ -41,7 +41,7 @@ ParameterUpdater *ParameterUpdater::createNewRemoteUpdater(
config->m->getConfig(), pserverSpec, useEtcd));
return updater;
#else
throw UnsupportError();
throw UnsupportError("not compiled with WITH_GOLANG");
#endif
}
......
......@@ -66,28 +66,92 @@ void NewRemoteParameterUpdater::init(
// from parameter server
if (paddle_begin_init_params(parameterClient_)) {
LOG(INFO) << "paddle_begin_init_params start";
// NOTE: convert V1 OptimizatioinConfig proto to V2 OptimizerConfig.
// This makes golang pserver compatible with handy V1 demos.
// TODO: Refine or remove these ugly converting lines
OptimizerConfig optimizerConfigV2;
if (trainerConfig_.learning_method() == "momentum") {
optimizerConfigV2.set_optimizer(paddle::OptimizerConfig::SGD);
} else if (trainerConfig_.learning_method() == "adagrad") {
optimizerConfigV2.set_optimizer(paddle::OptimizerConfig::Adagrad);
optimizerConfigV2.mutable_adagrad()->set_epsilon(
trainerConfig_.ada_epsilon());
} else if (trainerConfig_.learning_method() == "adadelta") {
optimizerConfigV2.set_optimizer(paddle::OptimizerConfig::Adagrad);
optimizerConfigV2.mutable_adadelta()->set_epsilon(
trainerConfig_.ada_epsilon());
optimizerConfigV2.mutable_adadelta()->set_rho(trainerConfig_.ada_rou());
} else if (trainerConfig_.learning_method() == "adam") {
optimizerConfigV2.set_optimizer(paddle::OptimizerConfig::Adam);
optimizerConfigV2.mutable_adam()->set_beta_1(trainerConfig_.adam_beta1());
optimizerConfigV2.mutable_adam()->set_beta_2(trainerConfig_.adam_beta2());
optimizerConfigV2.mutable_adam()->set_epsilon(
trainerConfig_.adam_epsilon());
} else {
LOG(ERROR) << "got unsupported v1 optimizer config: "
<< trainerConfig_.learning_method();
optimizerConfigV2.set_optimizer(paddle::OptimizerConfig::SGD);
}
if (trainerConfig_.learning_rate_schedule() == "constant") {
optimizerConfigV2.set_lr_policy(paddle::OptimizerConfig::Const);
optimizerConfigV2.mutable_const_lr()->set_learning_rate(
trainerConfig_.learning_rate());
} else if (trainerConfig_.learning_rate_schedule() == "linear") {
optimizerConfigV2.set_lr_policy(paddle::OptimizerConfig::Linear);
optimizerConfigV2.mutable_linear_lr()->set_learning_rate(
trainerConfig_.learning_rate());
optimizerConfigV2.mutable_linear_lr()->set_lr_decay_a(
trainerConfig_.learning_rate_decay_a());
optimizerConfigV2.mutable_linear_lr()->set_lr_decay_b(
trainerConfig_.learning_rate_decay_b());
} else {
LOG(ERROR) << "got unsupported v1 learning_rate_schedule config: "
<< trainerConfig_.learning_rate_schedule() << ", set to const";
optimizerConfigV2.set_lr_policy(paddle::OptimizerConfig::Const);
}
// overwrite optimizerConfigV2 for per-parameter(layer) configs
for (int i = 0; i < parameterSize(); ++i) {
auto paramConfig = parameters_[i]->getConfig();
LOG(INFO) << "old param config: " << paramConfig.DebugString();
// FIXME(typhoonzero): convert old paramConfig to optimizerConfig
OptimizerConfig optimizeConfigV2;
auto sgdConfigV2 = optimizeConfigV2.mutable_sgd();
sgdConfigV2->set_momentum(paramConfig.momentum());
sgdConfigV2->set_decay(paramConfig.decay_rate());
optimizeConfigV2.set_lr_policy(paddle::OptimizerConfig::Const);
auto constlr = optimizeConfigV2.mutable_const_lr();
if (paramConfig.has_momentum() &&
trainerConfig_.learning_method() == "momentum") {
optimizerConfigV2.mutable_sgd()->set_momentum(paramConfig.momentum());
}
if (paramConfig.has_learning_rate()) {
constlr->set_learning_rate(paramConfig.learning_rate());
} else {
constlr->set_learning_rate(trainerConfig_.learning_rate());
switch (optimizerConfigV2.lr_policy()) {
case 0:
optimizerConfigV2.mutable_const_lr()->set_learning_rate(
paramConfig.learning_rate());
break;
case 1:
optimizerConfigV2.mutable_linear_lr()->set_learning_rate(
paramConfig.learning_rate());
break;
}
}
if (paramConfig.has_decay_rate()) {
switch (optimizerConfigV2.optimizer()) {
case 1: // SGD
optimizerConfigV2.mutable_sgd()->set_decay(
paramConfig.decay_rate());
break;
case 2: // Adadelta
optimizerConfigV2.mutable_adadelta()->set_decay(
paramConfig.decay_rate());
break;
case 3: // Adagrad
optimizerConfigV2.mutable_adagrad()->set_decay(
paramConfig.decay_rate());
break;
case 4: // Adam
optimizerConfigV2.mutable_adam()->set_decay(
paramConfig.decay_rate());
break;
}
if (trainerConfig_.algorithm() == "sgd") {
optimizeConfigV2.set_optimizer(paddle::OptimizerConfig::SGD);
// FIXME: config all algorithms
} else {
optimizeConfigV2.set_optimizer(paddle::OptimizerConfig::SGD);
}
std::string bytes = optimizeConfigV2.SerializeAsString();
// send param and config to pserver
std::string bytes = optimizerConfigV2.SerializeAsString();
const char *array = bytes.data();
int size = (int)bytes.size();
paddle_init_param(
......
import paddle.trainer_config_helpers.config_parser_utils as config_parser_utils
import paddle.trainer_config_helpers.optimizers as v1_optimizers
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Optimizers(update equation) for SGD method.
TODO(zhihong) : create new optimizer with proto config, add new optimizer here
TODO(yuyang18): Complete comments.
"""
import paddle.trainer_config_helpers.config_parser_utils as config_parser_utils
import paddle.trainer_config_helpers.optimizers as v1_optimizers
from paddle.proto.OptimizerConfig_pb2 import OptimizerConfig
__all__ = [
'Momentum', 'Adam', 'Adamax', 'AdaGrad', 'DecayedAdaGrad', 'AdaDelta',
'RMSProp', 'ModelAverage', 'L2Regularization'
......@@ -70,7 +83,8 @@ class Optimizer(object):
gradient_machine.prefetch(in_args)
parameter_updater.getParametersRemote()
:param pserver_spec: pserver location, eg: localhost:3000
:param pserver_spec: pserver location, eg: localhost:3000, if use etcd,
pserver_spec should be the etcd endpoints, eg: http://localhost:2379
:return: parameter_updater
"""
if is_local:
......
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from paddle.proto.ParameterConfig_pb2 import ParameterConfig
import paddle.trainer.config_parser as cp
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册