From e3a8929cf8b1311fbccb46e6d46eb451c71dcea5 Mon Sep 17 00:00:00 2001 From: JiabinYang Date: Fri, 25 Jan 2019 03:31:45 +0000 Subject: [PATCH] little change --- paddle/fluid/inference/utils/CMakeLists.txt | 4 +- python/paddle/fluid/imperative/nn.py | 2 +- .../unittests/test_imperative_ptb_rnn.py | 166 ++++++++++++------ .../tests/unittests/test_imperative_split.py | 48 +++++ 4 files changed, 159 insertions(+), 61 deletions(-) create mode 100644 python/paddle/fluid/tests/unittests/test_imperative_split.py diff --git a/paddle/fluid/inference/utils/CMakeLists.txt b/paddle/fluid/inference/utils/CMakeLists.txt index c43eaf7f9..a7b239731 100644 --- a/paddle/fluid/inference/utils/CMakeLists.txt +++ b/paddle/fluid/inference/utils/CMakeLists.txt @@ -1,4 +1,4 @@ cc_library(benchmark SRCS benchmark.cc DEPS enforce) cc_test(test_benchmark SRCS benchmark_tester.cc DEPS benchmark) -cc_binary(visualizer SRCS visualizer.cc DEPS analysis - paddle_pass_builder ir_pass_manager pass graph_viz_pass analysis_passes) +#cc_binary(visualizer SRCS visualizer.cc DEPS analysis +# paddle_pass_builder ir_pass_manager pass graph_viz_pass analysis_passes) diff --git a/python/paddle/fluid/imperative/nn.py b/python/paddle/fluid/imperative/nn.py index 381fc4ef1..0fe680b49 100644 --- a/python/paddle/fluid/imperative/nn.py +++ b/python/paddle/fluid/imperative/nn.py @@ -295,7 +295,7 @@ class EMBEDDING(layers.Layer): self._param_attr = param_attr self._dtype = dtype - self._remote_prefetch = self.is_sparse and (not self.is_distributed) + self._remote_prefetch = self._is_sparse and (not self._is_distributed) if self._remote_prefetch: assert self._is_sparse is True and self._is_distributed is False diff --git a/python/paddle/fluid/tests/unittests/test_imperative_ptb_rnn.py b/python/paddle/fluid/tests/unittests/test_imperative_ptb_rnn.py index ecd52c8b8..c64d5964e 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_ptb_rnn.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_ptb_rnn.py @@ -18,23 +18,28 @@ import unittest import paddle.fluid as fluid from paddle.fluid.imperative.nn import EMBEDDING import paddle.fluid.framework as framework -import paddle.fluid.optimizer as optimizer +from paddle.fluid.optimizer import SGDOptimizer +from paddle.fluid.imperative.base import to_variable +import numpy as np from paddle.fluid.backward import append_backward class SimpleLSTMRNN(fluid.imperative.Layer): - def __init__(self, hidden_size, num_layers=2, init_scale=0.1, dropout=None): + def __init__(self, + hidden_size, + num_steps, + num_layers=2, + init_scale=0.1, + dropout=None): + super(SimpleLSTMRNN, self).__init__() self._hidden_size = hidden_size self._num_layers = num_layers self._init_scale = init_scale self._dropout = dropout self.input = None + self.num_steps = num_steps - def _build_once(self, - input_embedding, - seq_len, - init_hidden=None, - init_cell=None): + def _build_once(self, input_embedding, init_hidden=None, init_cell=None): self.weight_1_arr = [] self.weight_2_arr = [] self.bias_arr = [] @@ -57,7 +62,7 @@ class SimpleLSTMRNN(fluid.imperative.Layer): default_initializer=fluid.initializer.Constant(0.0)) self.bias_arr.append(bias_1) - pre_hidden = self.layers.slice( + pre_hidden = fluid.layers.slice( init_hidden, axes=[0], starts=[i], ends=[i + 1]) pre_cell = fluid.layers.slice( init_cell, axes=[0], starts=[i], ends=[i + 1]) @@ -65,22 +70,20 @@ class SimpleLSTMRNN(fluid.imperative.Layer): pre_hidden, shape=[-1, self._hidden_size]) pre_cell = fluid.layers.reshape( pre_cell, shape=[-1, self._hidden_size]) - fluid.hidden_array.append(pre_hidden) - fluid.cell_array.append(pre_cell) - - def forward(self, - input_embedding, - seq_len, - init_hidden=None, - init_cell=None): + self.hidden_array.append(pre_hidden) + self.cell_array.append(pre_cell) + + def forward(self, input_embedding, init_hidden=None, init_cell=None): res = [] - for index in range(seq_len): + for index in range(self.num_steps): self.input = fluid.layers.slice( input_embedding, axes=[1], starts=[index], ends=[index + 1]) self.input = fluid.layers.reshape( self.input, shape=[-1, self._hidden_size]) for k in range(self._num_layers): pre_hidden = self.hidden_array[k] + print("pre_hidden shape is:{}".format(pre_hidden.shape)) + print("input shape is:{}".format(self.input.shape)) pre_cell = self.cell_array[k] weight_1 = self.weight_1_arr[k] bias = self.bias_arr[k] @@ -89,38 +92,41 @@ class SimpleLSTMRNN(fluid.imperative.Layer): gate_input = fluid.layers.matmul(x=nn, y=weight_1) gate_input = fluid.layers.elementwise_add(gate_input, bias) - i, j, f, o = fluid.layers.split( - gate_input, num_or_sections=4, dim=-1) - - c = pre_cell * fluid.layers.sigmoid(f) + fluid.layers.sigmoid( - i) * fluid.layers.tanh(j) - m = fluid.layers.tanh(c) * fluid.layers.sigmoid(o) - - self.hidden_array[k] = m - self.cell_array[k] = c - self.input = m - - if self.dropout is not None and self.dropout > 0.0: - self.input = fluid.layers.dropout( - self.input, - dropout_prob=self.dropout, - dropout_implementation='upscale_in_train') - - res.append( - fluid.layers.reshape( - input, shape=[1, -1, self._hidden_size])) - real_res = fluid.layers.concat(res, 0) - real_res = fluid.layers.transpose(x=real_res, perm=[1, 0, 2]) - last_hidden = fluid.layers.concat(self.hidden_array, 1) - last_hidden = fluid.layers.reshape( - last_hidden, shape=[-1, self._num_layers, self._hidden_size]) - last_hidden = fluid.layers.transpose(x=last_hidden, perm=[1, 0, 2]) - last_cell = fluid.layers.concat(self.cell_array, 1) - last_cell = fluid.layers.reshape( - last_cell, shape=[-1, self._num_layers, self._hidden_size]) - last_cell = fluid.layers.transpose(x=last_cell, perm=[1, 0, 2]) - - return real_res, last_hidden, last_cell + print("gate_input shape is: {}".format(gate_input.shape)) + print("gate_input value is :{}".format(gate_input._numpy())) + print("gate_input desc is :{}".format(gate_input)) + # i, j, f, o = fluid.layers.split(gate_input, num_or_sections=4, dim=-1) + # # + # # c = pre_cell * fluid.layers.sigmoid(f) + fluid.layers.sigmoid( + # # i) * fluid.layers.tanh(j) + # # m = fluid.layers.tanh(c) * fluid.layers.sigmoid(o) + # # + # # self.hidden_array[k] = m + # # self.cell_array[k] = c + # # self.input = m + # # + # # if self.dropout is not None and self.dropout > 0.0: + # # self.input = fluid.layers.dropout( + # # self.input, + # # dropout_prob=self.dropout, + # # dropout_implementation='upscale_in_train') + # # + # # res.append( + # # fluid.layers.reshape( + # # input, shape=[1, -1, self._hidden_size])) + # # real_res = fluid.layers.concat(res, 0) + # # real_res = fluid.layers.transpose(x=real_res, perm=[1, 0, 2]) + # # last_hidden = fluid.layers.concat(self.hidden_array, 1) + # # last_hidden = fluid.layers.reshape( + # # last_hidden, shape=[-1, self._num_layers, self._hidden_size]) + # # last_hidden = fluid.layers.transpose(x=last_hidden, perm=[1, 0, 2]) + # # last_cell = fluid.layers.concat(self.cell_array, 1) + # # last_cell = fluid.layers.reshape( + # # last_cell, shape=[-1, self._num_layers, self._hidden_size]) + # # last_cell = fluid.layers.transpose(x=last_cell, perm=[1, 0, 2]) + # # + # return real_res, last_hidden, last_cell + return [1], [2], [3] class PtbModel(fluid.imperative.Layer): @@ -137,8 +143,10 @@ class PtbModel(fluid.imperative.Layer): self.init_scale = init_scale self.num_layers = num_layers self.num_steps = num_steps + self.dropout = dropout self.simple_lstm_rnn = SimpleLSTMRNN( hidden_size, + num_steps, num_layers=num_layers, init_scale=init_scale, dropout=dropout) @@ -153,21 +161,23 @@ class PtbModel(fluid.imperative.Layer): def _build_once(self, input, label, init_hidden, init_cell): self.softmax_weight = fluid.layers.create_parameter( - [self._hidden_size, self._vocab_size], + [self.hidden_size, self.vocab_size], dtype="float32", name="softmax_weight", default_initializer=fluid.initializer.UniformInitializer( - low=-self._init_scale, high=self._init_scale)) + low=-self.init_scale, high=self.init_scale)) self.softmax_bias = fluid.layers.create_parameter( - [self._vocab_size], + [self.vocab_size], dtype="float32", name='softmax_bias', default_initializer=fluid.initializer.UniformInitializer( - low=-self._init_scale, high=self._init_scale)) + low=-self.init_scale, high=self.init_scale)) def forward(self, input, label, init_hidden, init_cell): + init_h = fluid.layers.reshape( init_hidden, shape=[self.num_layers, -1, self.hidden_size]) + init_c = fluid.layers.reshape( init_cell, shape=[self.num_layers, -1, self.hidden_size]) @@ -179,6 +189,7 @@ class PtbModel(fluid.imperative.Layer): x_emb, dropout_prob=self.drop_out, dropout_implementation='upscale_in_train') + print("init_c is {}".format(init_c)) rnn_out, last_hidden, last_cell = self.simple_lstm_rnn(x_emb, init_h, init_c) rnn_out = fluid.layers.reshape( @@ -202,14 +213,53 @@ class PtbModel(fluid.imperative.Layer): class TestImperativePtbRnn(unittest.TestCase): def test_mnist_cpu_float32(self): seed = 90 + hidden_size = 10 + vocab_size = 1000 + num_layers = 1 + num_steps = 3 + init_scale = 0.1 + batch_size = 4 with fluid.imperative.guard(): fluid.default_startup_program().random_seed = seed fluid.default_main_program().random_seed = seed # TODO: marsyang1993 Change seed to ptb_model = PtbModel( - hidden_size=10, - vocab_size=1000, - num_layers=1, - num_steps=3, - init_scale=0.1) + hidden_size=hidden_size, + vocab_size=vocab_size, + num_layers=num_layers, + num_steps=num_steps, + init_scale=init_scale) + + sgd = SGDOptimizer(learning_rate=1e-3) + print("q") + for i in range(2): + x_data = np.arange(12).reshape(4, 3).astype('int64') + y_data = np.arange(1, 13).reshape(4, 3).astype('int64') + x_data = x_data.reshape((-1, num_steps, 1)) + y_data = y_data.reshape((-1, 1)) + init_hidden_data = np.zeros( + (num_layers, batch_size, hidden_size), dtype='float32') + init_cell_data = np.zeros( + (num_layers, batch_size, hidden_size), dtype='float32') + x = to_variable(x_data) + y = to_variable(y_data) + init_hidden = to_variable(init_hidden_data) + init_cell = to_variable(init_cell_data) + dy_loss, last_hidden, last_cell = ptb_model(x, y, init_hidden, + init_cell) + dy_param_init = dict() + if i == 0: + for param in fluid.default_main_program().global_block( + ).all_parameters(): + dy_param_init[param.name] = param._numpy() + dy_loss._backward() + sgd.minimize(dy_loss) + dy_param_updated = dict() + for param in fluid.default_main_program().global_block( + ).all_parameters(): + dy_param_updated[param.name] = param._numpy() + + +if __name__ == '__main__': + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_imperative_split.py b/python/paddle/fluid/tests/unittests/test_imperative_split.py new file mode 100644 index 000000000..696fb5f78 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/test_imperative_split.py @@ -0,0 +1,48 @@ +# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import unittest +import paddle.fluid as fluid +from paddle.fluid.imperative.nn import EMBEDDING +import paddle.fluid.framework as framework +from paddle.fluid.optimizer import SGDOptimizer +from paddle.fluid.imperative.base import to_variable +import numpy as np + + +class Split_test(fluid.imperative.Layer): + def __init__(self): + super(Split_test, self).__init__() + + def _build_once(self, input): + pass + + def forward(self, input): + out = fluid.layers.split(input, num_or_sections=4, dim=-1) + return out + + +class TestImperativePtbRnn(unittest.TestCase): + def test_spilt(self): + with fluid.imperative.guard(): + inp = to_variable(np.arange(160).reshape(4, 40).astype('float32')) + st = Split_test() + out = st(inp) + print(out) + + +if __name__ == '__main__': + unittest.main() -- GitLab