From 023166a82080cee3fc599c934c09a0746c541a2c Mon Sep 17 00:00:00 2001 From: typhoonzero Date: Mon, 23 Oct 2017 16:29:45 +0800 Subject: [PATCH] add ut, follow comments --- paddle/math/RowBuffer.h | 4 +- python/paddle/v2/tests/CMakeLists.txt | 1 + .../paddle/v2/tests/test_paramconf_order.py | 85 +++++++++++++++++++ python/paddle/v2/topology.py | 17 ++-- python/paddle/v2/trainer.py | 3 + 5 files changed, 102 insertions(+), 8 deletions(-) create mode 100644 python/paddle/v2/tests/test_paramconf_order.py diff --git a/paddle/math/RowBuffer.h b/paddle/math/RowBuffer.h index 9ef5b89680b..14766d85db0 100644 --- a/paddle/math/RowBuffer.h +++ b/paddle/math/RowBuffer.h @@ -60,7 +60,9 @@ public: */ inline real* get(int row) const { if (preallocatedBuf_) { - CHECK_LE((row + 1) * width_ * sizeof(real), preallocatedBuf_->getSize()); + // CHECK_LE((row + 1) * width_ * sizeof(real), + // preallocatedBuf_->getSize()); + CHECK_LE((row)*width_ * sizeof(real), preallocatedBuf_->getSize()); return reinterpret_cast(preallocatedBuf_->getBuf()) + row * width_; } else { CHECK_LE((row + 1) * width_, rowStore_.size()); diff --git a/python/paddle/v2/tests/CMakeLists.txt b/python/paddle/v2/tests/CMakeLists.txt index b7791559594..b4333ed530c 100644 --- a/python/paddle/v2/tests/CMakeLists.txt +++ b/python/paddle/v2/tests/CMakeLists.txt @@ -5,3 +5,4 @@ py_test(test_topology SRCS test_topology.py) py_test(test_rnn_layer SRCS test_rnn_layer.py) py_test(test_parameters SRCS test_parameters.py) py_test(test_data_feeder SRCS test_data_feeder.py) +py_test(test_paramconf_order SRCS test_paramconf_order.py) diff --git a/python/paddle/v2/tests/test_paramconf_order.py b/python/paddle/v2/tests/test_paramconf_order.py new file mode 100644 index 00000000000..41fea64122b --- /dev/null +++ b/python/paddle/v2/tests/test_paramconf_order.py @@ -0,0 +1,85 @@ +# Copyright PaddlePaddle contributors. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import unittest +import math +import paddle.v2 as paddle + + +def wordemb(inlayer): + wordemb = paddle.layer.table_projection( + input=inlayer, + size=5, + param_attr=paddle.attr.Param( + name="_proj", initial_std=0.001, learning_rate=1, l2_rate=0)) + return wordemb + + +def train(): + word_dict = paddle.dataset.imikolov.build_dict() + dict_size = len(word_dict) + # Every layer takes integer value of range [0, dict_size) + firstword = paddle.layer.data( + name="firstw", type=paddle.data_type.integer_value(dict_size)) + secondword = paddle.layer.data( + name="secondw", type=paddle.data_type.integer_value(dict_size)) + thirdword = paddle.layer.data( + name="thirdw", type=paddle.data_type.integer_value(dict_size)) + fourthword = paddle.layer.data( + name="fourthw", type=paddle.data_type.integer_value(dict_size)) + nextword = paddle.layer.data( + name="fifthw", type=paddle.data_type.integer_value(dict_size)) + + Efirst = wordemb(firstword) + Esecond = wordemb(secondword) + Ethird = wordemb(thirdword) + Efourth = wordemb(fourthword) + + contextemb = paddle.layer.concat(input=[Efirst, Esecond, Ethird, Efourth]) + hidden1 = paddle.layer.fc(name="fc1", + input=contextemb, + size=128, + act=paddle.activation.Sigmoid(), + layer_attr=paddle.attr.Extra(drop_rate=0.5), + bias_attr=paddle.attr.Param(learning_rate=2), + param_attr=paddle.attr.Param( + initial_std=1. / math.sqrt(5 * 8), + learning_rate=1, + l2_rate=6e-4)) + predictword = paddle.layer.fc(input=hidden1, + size=dict_size, + bias_attr=paddle.attr.Param(learning_rate=2), + act=paddle.activation.Softmax()) + + return paddle.layer.classification_cost(input=predictword, label=nextword) + + +class TestParamConfOrder(unittest.TestCase): + def test_param_conf_order(self): + paddle.init() + cost = train() + parameters = paddle.parameters.create(cost) + adagrad = paddle.optimizer.AdaGrad( + learning_rate=3e-3, + regularization=paddle.optimizer.L2Regularization(rate=8e-4)) + + trainer = paddle.trainer.SGD(cost, parameters, adagrad) + for p in trainer.get_topology_proto().parameters: + if p.name == "_fc1.w0": + self.assertEqual(p.decay_rate, 6e-4) + else: + self.assertEqual(p.decay_rate, 8e-4) + + +if __name__ == '__main__': + unittest.main() diff --git a/python/paddle/v2/topology.py b/python/paddle/v2/topology.py index 8dbe944aead..923ccecb0bf 100644 --- a/python/paddle/v2/topology.py +++ b/python/paddle/v2/topology.py @@ -52,11 +52,10 @@ class Topology(object): assert isinstance(self.__model_config__, ModelConfig) def update_from_default(self): - # HACK(typhoonzero): update ParameterConfig(proto) in case of optimizers - # are defined after layers, or between layers. + # HACK(typhoonzero): update ParameterConfig(proto) in case of + # optimizers are defined after layers, or between layers. # Must be called from trainer.__init__() for parameter in self.__model_config__.parameters: - print "####", parameter.decay_rate, cp.g_default_decay_rate if parameter.momentum == 0.0 and cp.g_default_momentum: parameter.momentum = cp.g_default_momentum if parameter.decay_rate == 0.0 and cp.g_default_decay_rate: @@ -69,10 +68,14 @@ class Topology(object): parameter.initial_strategy = cp.g_default_initial_strategy if parameter.initial_smart == False: parameter.initial_smart = cp.g_default_initial_smart - if parameter.num_batches_regularization == 1 and cp.g_default_num_batches_regularization: - parameter.num_batches_regularization = cp.g_default_num_batches_regularization - if parameter.gradient_clipping_threshold == 0.0 and cp.g_default_gradient_clipping_threshold: - parameter.gradient_clipping_threshold = cp.g_default_gradient_clipping_threshold + if parameter.num_batches_regularization == 1 and \ + cp.g_default_num_batches_regularization: + parameter.num_batches_regularization = \ + cp.g_default_num_batches_regularization + if parameter.gradient_clipping_threshold == 0.0 and \ + cp.g_default_gradient_clipping_threshold: + parameter.gradient_clipping_threshold = \ + cp.g_default_gradient_clipping_threshold if parameter.device == -1 and cp.g_default_device: parameter.device = cp.g_default_device # FIXME(typhoonzero): ignored: update_hooks, g_default_compact_func diff --git a/python/paddle/v2/trainer.py b/python/paddle/v2/trainer.py index d937d182b2c..b68fd0d5a97 100644 --- a/python/paddle/v2/trainer.py +++ b/python/paddle/v2/trainer.py @@ -96,6 +96,9 @@ class SGD(object): self.__parameters__.append_gradient_machine(gm) self.__parameter_updater__ = None + def get_topology_proto(self): + return self.__topology_in_proto__ + def __use_remote_sparse_updater__(self): return self.__use_sparse_updater__ and not self.__is_local__ -- GitLab