提交 023166a8 编写于 作者: T typhoonzero

add ut, follow comments

上级 71c2b296
...@@ -60,7 +60,9 @@ public: ...@@ -60,7 +60,9 @@ public:
*/ */
inline real* get(int row) const { inline real* get(int row) const {
if (preallocatedBuf_) { if (preallocatedBuf_) {
CHECK_LE((row + 1) * width_ * sizeof(real), preallocatedBuf_->getSize()); // CHECK_LE((row + 1) * width_ * sizeof(real),
// preallocatedBuf_->getSize());
CHECK_LE((row)*width_ * sizeof(real), preallocatedBuf_->getSize());
return reinterpret_cast<real*>(preallocatedBuf_->getBuf()) + row * width_; return reinterpret_cast<real*>(preallocatedBuf_->getBuf()) + row * width_;
} else { } else {
CHECK_LE((row + 1) * width_, rowStore_.size()); CHECK_LE((row + 1) * width_, rowStore_.size());
......
...@@ -5,3 +5,4 @@ py_test(test_topology SRCS test_topology.py) ...@@ -5,3 +5,4 @@ py_test(test_topology SRCS test_topology.py)
py_test(test_rnn_layer SRCS test_rnn_layer.py) py_test(test_rnn_layer SRCS test_rnn_layer.py)
py_test(test_parameters SRCS test_parameters.py) py_test(test_parameters SRCS test_parameters.py)
py_test(test_data_feeder SRCS test_data_feeder.py) py_test(test_data_feeder SRCS test_data_feeder.py)
py_test(test_paramconf_order SRCS test_paramconf_order.py)
# Copyright PaddlePaddle contributors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import math
import paddle.v2 as paddle
def wordemb(inlayer):
wordemb = paddle.layer.table_projection(
input=inlayer,
size=5,
param_attr=paddle.attr.Param(
name="_proj", initial_std=0.001, learning_rate=1, l2_rate=0))
return wordemb
def train():
word_dict = paddle.dataset.imikolov.build_dict()
dict_size = len(word_dict)
# Every layer takes integer value of range [0, dict_size)
firstword = paddle.layer.data(
name="firstw", type=paddle.data_type.integer_value(dict_size))
secondword = paddle.layer.data(
name="secondw", type=paddle.data_type.integer_value(dict_size))
thirdword = paddle.layer.data(
name="thirdw", type=paddle.data_type.integer_value(dict_size))
fourthword = paddle.layer.data(
name="fourthw", type=paddle.data_type.integer_value(dict_size))
nextword = paddle.layer.data(
name="fifthw", type=paddle.data_type.integer_value(dict_size))
Efirst = wordemb(firstword)
Esecond = wordemb(secondword)
Ethird = wordemb(thirdword)
Efourth = wordemb(fourthword)
contextemb = paddle.layer.concat(input=[Efirst, Esecond, Ethird, Efourth])
hidden1 = paddle.layer.fc(name="fc1",
input=contextemb,
size=128,
act=paddle.activation.Sigmoid(),
layer_attr=paddle.attr.Extra(drop_rate=0.5),
bias_attr=paddle.attr.Param(learning_rate=2),
param_attr=paddle.attr.Param(
initial_std=1. / math.sqrt(5 * 8),
learning_rate=1,
l2_rate=6e-4))
predictword = paddle.layer.fc(input=hidden1,
size=dict_size,
bias_attr=paddle.attr.Param(learning_rate=2),
act=paddle.activation.Softmax())
return paddle.layer.classification_cost(input=predictword, label=nextword)
class TestParamConfOrder(unittest.TestCase):
def test_param_conf_order(self):
paddle.init()
cost = train()
parameters = paddle.parameters.create(cost)
adagrad = paddle.optimizer.AdaGrad(
learning_rate=3e-3,
regularization=paddle.optimizer.L2Regularization(rate=8e-4))
trainer = paddle.trainer.SGD(cost, parameters, adagrad)
for p in trainer.get_topology_proto().parameters:
if p.name == "_fc1.w0":
self.assertEqual(p.decay_rate, 6e-4)
else:
self.assertEqual(p.decay_rate, 8e-4)
if __name__ == '__main__':
unittest.main()
...@@ -52,11 +52,10 @@ class Topology(object): ...@@ -52,11 +52,10 @@ class Topology(object):
assert isinstance(self.__model_config__, ModelConfig) assert isinstance(self.__model_config__, ModelConfig)
def update_from_default(self): def update_from_default(self):
# HACK(typhoonzero): update ParameterConfig(proto) in case of optimizers # HACK(typhoonzero): update ParameterConfig(proto) in case of
# are defined after layers, or between layers. # optimizers are defined after layers, or between layers.
# Must be called from trainer.__init__() # Must be called from trainer.__init__()
for parameter in self.__model_config__.parameters: for parameter in self.__model_config__.parameters:
print "####", parameter.decay_rate, cp.g_default_decay_rate
if parameter.momentum == 0.0 and cp.g_default_momentum: if parameter.momentum == 0.0 and cp.g_default_momentum:
parameter.momentum = cp.g_default_momentum parameter.momentum = cp.g_default_momentum
if parameter.decay_rate == 0.0 and cp.g_default_decay_rate: if parameter.decay_rate == 0.0 and cp.g_default_decay_rate:
...@@ -69,10 +68,14 @@ class Topology(object): ...@@ -69,10 +68,14 @@ class Topology(object):
parameter.initial_strategy = cp.g_default_initial_strategy parameter.initial_strategy = cp.g_default_initial_strategy
if parameter.initial_smart == False: if parameter.initial_smart == False:
parameter.initial_smart = cp.g_default_initial_smart parameter.initial_smart = cp.g_default_initial_smart
if parameter.num_batches_regularization == 1 and cp.g_default_num_batches_regularization: if parameter.num_batches_regularization == 1 and \
parameter.num_batches_regularization = cp.g_default_num_batches_regularization cp.g_default_num_batches_regularization:
if parameter.gradient_clipping_threshold == 0.0 and cp.g_default_gradient_clipping_threshold: parameter.num_batches_regularization = \
parameter.gradient_clipping_threshold = cp.g_default_gradient_clipping_threshold cp.g_default_num_batches_regularization
if parameter.gradient_clipping_threshold == 0.0 and \
cp.g_default_gradient_clipping_threshold:
parameter.gradient_clipping_threshold = \
cp.g_default_gradient_clipping_threshold
if parameter.device == -1 and cp.g_default_device: if parameter.device == -1 and cp.g_default_device:
parameter.device = cp.g_default_device parameter.device = cp.g_default_device
# FIXME(typhoonzero): ignored: update_hooks, g_default_compact_func # FIXME(typhoonzero): ignored: update_hooks, g_default_compact_func
......
...@@ -96,6 +96,9 @@ class SGD(object): ...@@ -96,6 +96,9 @@ class SGD(object):
self.__parameters__.append_gradient_machine(gm) self.__parameters__.append_gradient_machine(gm)
self.__parameter_updater__ = None self.__parameter_updater__ = None
def get_topology_proto(self):
return self.__topology_in_proto__
def __use_remote_sparse_updater__(self): def __use_remote_sparse_updater__(self):
return self.__use_sparse_updater__ and not self.__is_local__ return self.__use_sparse_updater__ and not self.__is_local__
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册