diff --git a/python/paddle/v2/parameters.py b/python/paddle/v2/parameters.py index 4cfd91882e2d5f0098d27b8897359152ddd94dda..d0b5ff12f2748909745294d4ba96888f2baf2b8d 100644 --- a/python/paddle/v2/parameters.py +++ b/python/paddle/v2/parameters.py @@ -101,6 +101,10 @@ class Parameters(object): self.__param_conf__[param_conf.name] = param_conf + def update_param_conf(self, model_config): + for p in model_config.parameters: + self.__param_conf__[p.name] = p + def keys(self): """ keys are the names of each parameter. diff --git a/python/paddle/v2/topology.py b/python/paddle/v2/topology.py index 2db66be2505dde38a501edf45984e1f36beb351d..8dbe944aead83f066d8339d7cf3e27aecd07e200 100644 --- a/python/paddle/v2/topology.py +++ b/python/paddle/v2/topology.py @@ -19,6 +19,7 @@ import paddle.trainer_config_helpers as conf_helps import layer as v2_layer import config_base import cPickle +from paddle.trainer import config_parser as cp __all__ = ['Topology'] @@ -50,6 +51,32 @@ class Topology(object): assert isinstance(self.__model_config__, ModelConfig) + def update_from_default(self): + # HACK(typhoonzero): update ParameterConfig(proto) in case of optimizers + # are defined after layers, or between layers. + # Must be called from trainer.__init__() + for parameter in self.__model_config__.parameters: + print "####", parameter.decay_rate, cp.g_default_decay_rate + if parameter.momentum == 0.0 and cp.g_default_momentum: + parameter.momentum = cp.g_default_momentum + if parameter.decay_rate == 0.0 and cp.g_default_decay_rate: + parameter.decay_rate = cp.g_default_decay_rate + if parameter.initial_mean == 0.0: + parameter.initial_mean = cp.g_default_initial_mean + if parameter.initial_std == 0.01: + parameter.initial_std = cp.g_default_initial_std + if parameter.initial_strategy == 0: + parameter.initial_strategy = cp.g_default_initial_strategy + if parameter.initial_smart == False: + parameter.initial_smart = cp.g_default_initial_smart + if parameter.num_batches_regularization == 1 and cp.g_default_num_batches_regularization: + parameter.num_batches_regularization = cp.g_default_num_batches_regularization + if parameter.gradient_clipping_threshold == 0.0 and cp.g_default_gradient_clipping_threshold: + parameter.gradient_clipping_threshold = cp.g_default_gradient_clipping_threshold + if parameter.device == -1 and cp.g_default_device: + parameter.device = cp.g_default_device + # FIXME(typhoonzero): ignored: update_hooks, g_default_compact_func + def use_sparse_updater(self): """ check if any parameter require to use sparse_update diff --git a/python/paddle/v2/trainer.py b/python/paddle/v2/trainer.py index 076e75593991415bc3fbcbd36a108c8c7de66932..d937d182b2cc3afff91afaac78e9e1a9fa2722bc 100644 --- a/python/paddle/v2/trainer.py +++ b/python/paddle/v2/trainer.py @@ -64,6 +64,11 @@ class SGD(object): "paddle.v2.optimizer.Optimizer") import py_paddle.swig_paddle as api topology = Topology(cost, extra_layers=extra_layers) + # HACK(typhoonzero): update ParameterConfig(proto) in case of optimizers + # are defined after layers, or between layers. + topology.update_from_default() + parameters.update_param_conf(topology.proto()) + self.__optimizer__ = update_equation self.__topology__ = topology self.__parameters__ = parameters