test_recurrent_op.py 21.7 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15 16
from __future__ import print_function

Y
Yan Chunwei 已提交
17
import unittest
C
chengduo 已提交
18
import paddle.fluid as fluid
19
import paddle.fluid.layers as layers
20 21 22 23
import numpy as np
import paddle.fluid.core as core

from paddle.fluid import ParamAttr
24 25 26
from paddle.fluid.framework import Program, grad_var_name
from paddle.fluid.executor import Executor
from paddle.fluid.backward import append_backward
S
fix res  
superjom 已提交
27

28 29
np.random.seed(123)

S
fix res  
superjom 已提交
30

Y
Yu Yang 已提交
31
class PyRNNBase(object):
32

Y
Yu Yang 已提交
33 34 35
    def __init__(self, input_shape, output_shape):
        self.x = np.ones(shape=input_shape).astype("float32")
        self.y = np.zeros(shape=output_shape).astype("float32")
S
superjom 已提交
36

37 38
    def step(self, step_id, x):
        raise NotImplementedError
S
superjom 已提交
39 40 41

    def forward(self):
        for step_id in range(self.x.shape[0]):
Y
Yu Yang 已提交
42 43
            self.step(step_id, self.x[step_id])
        return np.array([np.mean(self.y)])
S
superjom 已提交
44 45 46 47

    def segment_inputs(self):
        return [self.x[i] for i in range(self.x.shape[0])]

Y
Yu Yang 已提交
48 49

class PySimpleRNN1(PyRNNBase):
50

Y
Yu Yang 已提交
51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71
    def __init__(self, input_shape, output_shape):
        super(PySimpleRNN1, self).__init__(input_shape, output_shape)

        seq_len, batch_size, input_dim = input_shape
        self.h_boot = np.random.normal(size=(batch_size,
                                             input_dim)).astype("float32")

        self.scale = 1.0 / 2.0
        men_dim = (seq_len, batch_size, input_dim)
        self.mems = np.zeros(shape=men_dim).astype("float32")

    def step(self, step_id, x):
        if step_id == 0:
            pre_mem = self.h_boot
        else:
            pre_mem = self.mems[step_id - 1]
        self.mems[step_id] = (pre_mem + x) * self.scale
        self.y[step_id] = self.mems[step_id]


class PySimpleRNN2(PyRNNBase):
72

Y
Yu Yang 已提交
73 74 75 76
    def __init__(self, input_shape, output_shape):
        super(PySimpleRNN2, self).__init__(input_shape, output_shape)

        seq_len, batch_size, input_dim = input_shape
77 78
        self.W = np.ones(shape=(input_dim, input_dim)).astype("float32")
        self.U = np.zeros(shape=(input_dim, input_dim)).astype("float32")
Y
Yu Yang 已提交
79 80 81 82
        self.h_boot = np.ones(shape=(batch_size, input_dim)).astype("float32")

        men_dim = (seq_len, batch_size, input_dim)
        self.mems = np.zeros(shape=men_dim).astype("float32")
S
superjom 已提交
83 84 85

    def step(self, step_id, x):
        if step_id > 0:
S
fix res  
superjom 已提交
86
            pre_mem = self.mems[step_id - 1]
S
superjom 已提交
87 88
        else:
            pre_mem = self.h_boot
Q
qiaolongfei 已提交
89 90
        xW = np.matmul(x, self.W).astype("float32")
        hU = np.matmul(pre_mem, self.U).astype("float32")
S
superjom 已提交
91

Y
Yu Yang 已提交
92 93
        def py_sigmoid(x):
            return 1. / (1. + np.exp(-x))
S
fix res  
superjom 已提交
94

Y
Yu Yang 已提交
95 96
        self.mems[step_id] = py_sigmoid(xW + hU)
        self.y[step_id] = self.mems[step_id]
Y
Yan Chunwei 已提交
97 98


Y
Yu Yang 已提交
99 100 101
def create_tensor(np_data, place):
    tensor = core.LoDTensor()
    tensor.set(np_data, place)
Y
Yan Chunwei 已提交
102 103 104
    return tensor


Y
Yu Yang 已提交
105
class RecurrentOpTest1(unittest.TestCase):
Y
Yan Chunwei 已提交
106 107 108
    '''
    Test RNNOp
    equation:
Y
Yu Yang 已提交
109
        h_t = ( x_t + h_{t-1} ) / scale
Y
Yan Chunwei 已提交
110 111 112 113 114
    vars:
        - x
    memories:
        - h
    outputs:
Y
Yu Yang 已提交
115
        - h
Y
Yan Chunwei 已提交
116 117
    '''

Y
Yu Yang 已提交
118 119 120 121
    input_dim = 2
    batch_size = 1
    sent_len = 1

122 123 124
    def setup_program(self):
        self.main_program = Program()
        self.startup_program = Program()
Y
Yu Yang 已提交
125
        self.place = core.CPUPlace()
Y
Yan Chunwei 已提交
126

S
superjom 已提交
127
    def setUp(self):
128
        self.setup_program()
129 130
        self.feed_data_field = {"x", "h_boot"}
        self.grad_data_field = self.feed_data_field
Y
Yan Chunwei 已提交
131

Y
Yu Yang 已提交
132 133 134 135
        self.input_shape = (self.sent_len, self.batch_size, self.input_dim)
        self.output_shape = (self.sent_len, self.batch_size, self.input_dim)
        self.py_rnn = PySimpleRNN1(self.input_shape, self.output_shape)

C
chengduo 已提交
136 137
        with fluid.program_guard(self.main_program, self.startup_program):
            self.output = layers.mean(self.create_rnn_op())
Y
Yan Chunwei 已提交
138 139

    def create_rnn_op(self):
140 141 142 143
        x = layers.data(shape=[self.sent_len, self.batch_size, self.input_dim],
                        dtype='float32',
                        name='x',
                        append_batch_size=False)
Y
Yu Yang 已提交
144
        x.stop_gradient = False
145 146 147
        h_boot = layers.data(shape=[self.input_dim],
                             dtype='float32',
                             name='h_boot')
Y
Yu Yang 已提交
148
        h_boot.stop_gradient = False
Y
Yu Yang 已提交
149

C
chengduo 已提交
150
        rnn = layers.StaticRNN()
Y
Yu Yang 已提交
151 152 153 154
        with rnn.step():
            h_pre = rnn.memory(init=h_boot)
            x_t = rnn.step_input(x)

155 156
            h = layers.scale(x=layers.elementwise_add(x=h_pre, y=x_t),
                             scale=self.py_rnn.scale)
Y
Yu Yang 已提交
157 158 159 160 161 162 163 164 165

            rnn.update_memory(h_pre, h)
            rnn.output(h)

        return rnn()

    def forward(self):
        self.feed_map = {
            x: create_tensor(getattr(self.py_rnn, x), self.place)
166
            for x in self.feed_data_field
Y
Yu Yang 已提交
167 168
        }
        exe = Executor(self.place)
169
        out = exe.run(self.main_program,
Y
Yu Yang 已提交
170 171 172
                      feed=self.feed_map,
                      fetch_list=[self.output])

D
dzhwinter 已提交
173
        return out[0]
Y
Yu Yang 已提交
174 175 176 177

    def backward(self):
        self.feed_map = {
            x: create_tensor(getattr(self.py_rnn, x), self.place)
178
            for x in self.feed_data_field
Y
Yu Yang 已提交
179 180
        }
        fetch_list = [
Q
qiaolongfei 已提交
181
            self.main_program.global_block().var(grad_var_name(x))
182
            for x in self.grad_data_field
Y
Yu Yang 已提交
183 184 185
        ]

        exe = Executor(self.place)
186 187
        return exe.run(self.main_program,
                       feed=self.feed_map,
D
dzhwinter 已提交
188 189
                       fetch_list=fetch_list,
                       return_numpy=False)
Y
Yu Yang 已提交
190

191
    def test_backward(self, rtol=0.01):
Y
Yu Yang 已提交
192 193
        self.check_forward()

C
chengduo 已提交
194 195
        with fluid.program_guard(self.main_program, self.startup_program):
            append_backward(self.output)
Y
Yu Yang 已提交
196 197 198 199

        ana_grad = [np.array(x) for x in self.backward()]

        num_grad = self.get_numerical_gradient()
200
        for idx, name in enumerate(self.grad_data_field):
Y
Yu Yang 已提交
201 202
            self.assertEqual(num_grad[idx].shape, ana_grad[idx].shape)
            self.assertTrue(
203
                np.isclose(num_grad[idx], ana_grad[idx], rtol=rtol).all(),
C
chengduo 已提交
204 205 206
                "num_grad (" + name + ") has diff at " + str(self.place) +
                "\nExpect " + str(num_grad[idx]) + "\n" + "But Got" +
                str(ana_grad[idx]) + " in class " + self.__class__.__name__)
Y
Yu Yang 已提交
207 208

    def check_forward(self):
S
superjom 已提交
209 210 211
        pd_output = self.forward()
        py_output = self.py_rnn.forward()
        self.assertEqual(pd_output.shape, py_output.shape)
212
        self.assertTrue(np.isclose(pd_output, py_output, rtol=0.01).all())
Y
Yan Chunwei 已提交
213

Y
Yu Yang 已提交
214 215
    def get_numerical_gradient(self, delta=0.005):
        dloss_dout = 1.0
216
        feed_list = [getattr(self.py_rnn, x) for x in self.grad_data_field]
Y
Yu Yang 已提交
217 218 219 220 221 222
        grad_list = [np.zeros_like(x) for x in feed_list]
        for feed, grad in zip(feed_list, grad_list):
            for f, g in np.nditer([feed, grad], op_flags=['readwrite']):
                o = float(f)
                f[...] = o + delta
                y_pos = self.forward()
S
fix res  
superjom 已提交
223

Y
Yu Yang 已提交
224 225 226 227 228 229 230 231 232 233 234
                f[...] = o - delta
                y_neg = self.forward()

                f[...] = o
                dout_dfeed = (y_pos - y_neg) / (delta * 2)
                g[...] = dout_dfeed[0]

        return grad_list


class RecurrentOpTest2(RecurrentOpTest1):
235
    r'''
Y
Yu Yang 已提交
236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254
    Test RNNOp
    equation:
        h_t = \sigma (W x_t + U h_{t-1})
    weights:
        - W
        - U
    vars:
        - x
    memories:
        - h
    outputs:
       - h
    '''

    input_dim = 2
    batch_size = 10
    sent_len = 2

    def setUp(self):
255
        self.setup_program()
Y
Yu Yang 已提交
256

257 258
        self.feed_data_field = {"x", "h_boot", "W", "U"}
        self.grad_data_field = self.feed_data_field
Y
Yu Yang 已提交
259 260 261 262 263

        self.input_shape = (self.sent_len, self.batch_size, self.input_dim)
        self.output_shape = (self.sent_len, self.batch_size, self.input_dim)
        self.py_rnn = PySimpleRNN2(self.input_shape, self.output_shape)

C
chengduo 已提交
264 265
        with fluid.program_guard(self.main_program, self.startup_program):
            self.output = layers.mean(self.create_rnn_op())
Y
Yu Yang 已提交
266 267

    def create_rnn_op(self):
268 269 270 271
        x = layers.data(shape=[self.sent_len, self.batch_size, self.input_dim],
                        dtype='float32',
                        name='x',
                        append_batch_size=False)
Y
Yu Yang 已提交
272
        x.stop_gradient = False
273 274 275
        h_boot = layers.data(shape=[self.input_dim],
                             dtype='float32',
                             name='h_boot')
Y
Yu Yang 已提交
276
        h_boot.stop_gradient = False
Y
Yu Yang 已提交
277

C
chengduo 已提交
278
        rnn = layers.StaticRNN()
Y
Yu Yang 已提交
279 280 281 282
        with rnn.step():
            h_pre = rnn.memory(init=h_boot)
            x_t = rnn.step_input(x)

283 284 285 286 287 288 289 290 291 292 293 294 295 296
            temp_l = layers.fc(
                input=x_t,
                size=self.input_dim,
                param_attr=ParamAttr(
                    name='W',
                    initializer=fluid.initializer.ConstantInitializer(1.0)),
                bias_attr=False)
            temp_r = layers.fc(
                input=h_pre,
                size=self.input_dim,
                param_attr=ParamAttr(
                    name='U',
                    initializer=fluid.initializer.ConstantInitializer(0.0)),
                bias_attr=False)
297

C
chengduo 已提交
298
            h = layers.sigmoid(x=layers.elementwise_add(x=temp_l, y=temp_r))
Y
Yu Yang 已提交
299 300 301 302 303 304

            rnn.update_memory(h_pre, h)
            rnn.output(h)

        return rnn()

C
chengduo 已提交
305
    def test_backward(self):
306
        super(RecurrentOpTest2, self).test_backward(rtol=0.01)
C
chengduo 已提交
307

Y
Yu Yang 已提交
308

309
class RecurrentOpMultipleMemoryTest(RecurrentOpTest1):
Y
Yu Yang 已提交
310 311 312 313 314 315 316 317 318 319 320 321 322 323 324
    '''
    Test RNNOp with two memories
    equation:
        h_1 = h_pre_1
        h_2 = h_pre_2
        y = h_1 + h_2
    vars:
        - x
    memories:
        - h_1, h_2
    outputs:
       - y
    '''

    class PySimpleRNN3(PyRNNBase):
325

Y
Yu Yang 已提交
326
        def __init__(self, input_shape, output_shape):
327 328
            super(RecurrentOpMultipleMemoryTest.PySimpleRNN3,
                  self).__init__(input_shape, output_shape)
Y
Yu Yang 已提交
329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355

            seq_len, batch_size, input_dim = input_shape
            self.h_boot1 = np.random.normal(size=(batch_size,
                                                  input_dim)).astype("float32")
            self.h_boot2 = np.random.normal(size=(batch_size,
                                                  input_dim)).astype("float32")

            men_dim = (seq_len, batch_size, input_dim)
            self.mems1 = np.zeros(shape=men_dim).astype("float32")
            self.mems2 = np.zeros(shape=men_dim).astype("float32")

        def step(self, step_id, x):
            if step_id == 0:
                pre_mem1 = self.h_boot1
                pre_mem2 = self.h_boot2
            else:
                pre_mem1 = self.mems1[step_id - 1]
                pre_mem2 = self.mems2[step_id - 1]
            self.mems1[step_id] = pre_mem1
            self.mems2[step_id] = pre_mem2
            self.y[step_id] = self.mems1[step_id] + self.mems2[step_id] + x

    input_dim = 1
    batch_size = 1
    sent_len = 2

    def setUp(self):
356
        self.setup_program()
Y
Yu Yang 已提交
357

358 359
        self.feed_data_field = {"x", "h_boot1", "h_boot2"}
        self.grad_data_field = self.feed_data_field
Y
Yu Yang 已提交
360 361 362

        self.input_shape = (self.sent_len, self.batch_size, self.input_dim)
        self.output_shape = (self.sent_len, self.batch_size, self.input_dim)
363 364
        self.py_rnn = RecurrentOpMultipleMemoryTest.PySimpleRNN3(
            self.input_shape, self.output_shape)
Y
Yu Yang 已提交
365

C
chengduo 已提交
366 367
        with fluid.program_guard(self.main_program, self.startup_program):
            self.output = layers.mean(self.create_rnn_op())
Y
Yu Yang 已提交
368 369

    def create_rnn_op(self):
370 371 372 373
        x = layers.data(shape=[self.sent_len, self.batch_size, self.input_dim],
                        dtype='float32',
                        name='x',
                        append_batch_size=False)
Y
Yu Yang 已提交
374
        x.stop_gradient = False
375 376 377 378
        h_boot1 = layers.data(shape=[self.batch_size, self.input_dim],
                              dtype='float32',
                              name='h_boot1',
                              append_batch_size=False)
Y
Yu Yang 已提交
379
        h_boot1.stop_gradient = False
380 381 382 383
        h_boot2 = layers.data(shape=[self.batch_size, self.input_dim],
                              dtype='float32',
                              name='h_boot2',
                              append_batch_size=False)
Y
Yu Yang 已提交
384
        h_boot2.stop_gradient = False
Y
Yu Yang 已提交
385

C
chengduo 已提交
386
        rnn = layers.StaticRNN()
Y
Yu Yang 已提交
387 388 389 390 391
        with rnn.step():
            h_pre1 = rnn.memory(init=h_boot1)
            h_pre2 = rnn.memory(init=h_boot2)
            x_t = rnn.step_input(x)

C
chengduo 已提交
392 393 394
            mem1 = layers.scale(x=h_pre1, scale=1.0)
            mem2 = layers.scale(x=h_pre2, scale=1.0)
            out = layers.sums(input=[mem1, x_t, mem2])
Y
Yu Yang 已提交
395 396 397 398 399 400

            rnn.update_memory(h_pre1, mem1)
            rnn.update_memory(h_pre2, mem2)
            rnn.output(out)

        return rnn()
S
init  
superjom 已提交
401 402


403 404 405 406 407 408 409 410 411 412 413 414 415 416 417
class RecurrentOpNoMemBootTest(RecurrentOpTest1):
    '''
    Test RNNOp with two memories
    equation:
        mem = x + mem_pre
        y = mem
    vars:
        - x
    memories:
        - mem
    outputs:
       - y
    '''

    class PySimpleRNN4(PyRNNBase):
418

419
        def __init__(self, input_shape, output_shape):
420 421
            super(RecurrentOpNoMemBootTest.PySimpleRNN4,
                  self).__init__(input_shape, output_shape)
422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439
            men_dim = input_shape
            self.mems = np.zeros(shape=men_dim).astype("float32")

        def step(self, step_id, x):
            if step_id == 0:
                pre_mem = np.zeros_like(x)
            else:
                pre_mem = self.mems[step_id - 1]
            self.mems[step_id] = pre_mem + x
            self.y[step_id] = self.mems[step_id]

    input_dim = 1
    batch_size = 1
    sent_len = 2

    def setUp(self):
        self.setup_program()

440 441
        self.feed_data_field = {"x"}
        self.grad_data_field = self.feed_data_field
442 443 444

        self.input_shape = (self.sent_len, self.batch_size, self.input_dim)
        self.output_shape = (self.sent_len, self.batch_size, self.input_dim)
445 446
        self.py_rnn = RecurrentOpNoMemBootTest.PySimpleRNN4(
            self.input_shape, self.output_shape)
C
chengduo 已提交
447 448 449

        with fluid.program_guard(self.main_program, self.startup_program):
            self.output = layers.mean(self.create_rnn_op())
450 451

    def create_rnn_op(self):
452 453 454 455
        x = layers.data(shape=[self.sent_len, self.batch_size, self.input_dim],
                        dtype='float32',
                        name='x',
                        append_batch_size=False)
456 457
        x.stop_gradient = False

C
chengduo 已提交
458
        rnn = layers.StaticRNN()
459 460 461
        with rnn.step():
            mem_pre = rnn.memory(shape=[-1, self.input_dim], batch_ref=x)
            x_t = rnn.step_input(x)
C
chengduo 已提交
462
            mem = layers.elementwise_add(x=mem_pre, y=x_t)
463 464 465 466 467 468
            rnn.update_memory(mem_pre, mem)
            rnn.output(mem)

        return rnn()


469
class RecurrentOpSubBlockTest(RecurrentOpTest1):
470
    r'''
471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490
    Test RNNOp with subblock variable
    equation:
        y_ = emb * w1
        h_t = \concat([x, h_{t-1}])
        h_t = h_t * w2
        h_t = \\unsqueeze(h_t, 1)
        h_t = \dot_attention(h_t, y_)
        h_t = \squeeze(h_t, 1)
        y = h_t
    vars:
        - x
        - w1
        - w2
    memories:
        - h
    outputs:
       - y
    '''

    class PySimpleRNN5(PyRNNBase):
491

492
        def __init__(self, input_shape, output_shape):
493 494
            super(RecurrentOpSubBlockTest.PySimpleRNN5,
                  self).__init__(input_shape, output_shape)
495 496

            seq_len, batch_size, input_dim = input_shape
497 498 499 500 501 502 503 504 505 506 507 508
            self.w1 = np.random.uniform(-0.1, 0.1,
                                        size=(input_dim,
                                              input_dim)).astype("float32")
            self.w2 = np.random.uniform(-0.1,
                                        0.1,
                                        size=(input_dim * 2,
                                              input_dim)).astype("float32")

            self.emb = np.random.uniform(-0.1,
                                         0.1,
                                         size=(seq_len, batch_size,
                                               input_dim)).astype("float32")
509 510 511 512 513 514

            men_dim = (seq_len, batch_size, input_dim)
            self.mems = np.zeros(shape=men_dim).astype("float32")
            self.oy = np.matmul(self.emb, self.w1)

        def step(self, step_id, x):
515

516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545
            def dot_attention(query, memory):
                attn = np.matmul(query, memory.transpose((0, 2, 1)))
                weight = softmax(attn)
                weight_memory = np.matmul(weight, memory)
                return weight_memory, weight

            def softmax(x):
                return np.exp(x) / sum(np.exp(x))

            if step_id == 0:
                pre_mem = np.zeros_like(x)
            else:
                pre_mem = self.mems[step_id - 1]
            concat_in = np.concatenate([x, pre_mem], 1)
            new_mem = np.matmul(concat_in, self.w2)

            new_mem = np.expand_dims(new_mem, 1)
            new_mem, _ = dot_attention(new_mem, self.oy)
            new_mem = np.squeeze(new_mem, 1)

            self.mems[step_id] = new_mem
            self.y[step_id] = self.mems[step_id]

    input_dim = 2
    batch_size = 3
    sent_len = 3

    def setUp(self):
        self.setup_program()

546 547
        self.feed_data_field = {"x", "emb", "w1", "w2"}
        self.grad_data_field = self.feed_data_field
548 549 550

        self.input_shape = (self.sent_len, self.batch_size, self.input_dim)
        self.output_shape = (self.sent_len, self.batch_size, self.input_dim)
551 552
        self.py_rnn = RecurrentOpSubBlockTest.PySimpleRNN5(
            self.input_shape, self.output_shape)
553 554 555 556 557 558

        with fluid.program_guard(self.main_program, self.startup_program):
            rnn_out = self.create_rnn_op()
            self.output = layers.mean(rnn_out)

    def create_rnn_op(self):
559 560 561 562
        x = layers.data(shape=[self.sent_len, self.batch_size, self.input_dim],
                        dtype='float32',
                        name='x',
                        append_batch_size=False)
563 564 565 566 567 568 569 570 571
        x.stop_gradient = False

        emb = layers.data(
            name='emb',
            shape=[self.sent_len, self.batch_size, self.input_dim],
            dtype='float32',
            append_batch_size=False)
        emb.stop_gradient = False

572 573 574 575
        w1 = layers.data(shape=[self.input_dim, self.input_dim],
                         dtype='float32',
                         name='w1',
                         append_batch_size=False)
576
        w1.stop_gradient = False
577 578 579 580
        w2 = layers.data(shape=[self.input_dim * 2, self.input_dim],
                         dtype='float32',
                         name='w2',
                         append_batch_size=False)
581 582 583 584 585 586 587 588 589 590 591 592 593
        w2.stop_gradient = False

        rnn = layers.StaticRNN()

        def dot_attention(query, memory):
            attn = layers.matmul(query, memory, transpose_y=True)
            weight = layers.softmax(attn)
            weight_memory = layers.matmul(weight, memory)

            return weight_memory, weight

        y = layers.matmul(emb, w1)
        with rnn.step():
594 595 596
            pre_h = rnn.memory(shape=(self.sent_len, self.input_dim),
                               batch_ref=x,
                               init_value=0.0)
597 598 599 600 601 602 603 604 605 606 607 608 609
            step_in = rnn.step_input(x)
            concat_in = layers.concat([step_in, pre_h], 1)
            new_h = layers.matmul(concat_in, w2)
            new_h = layers.unsqueeze(new_h, [1])
            new_h, _ = dot_attention(new_h, y)
            new_h = layers.squeeze(new_h, [1])

            rnn.update_memory(pre_h, new_h)
            rnn.step_output(new_h)

        return rnn()


610
class RecurrentOpStopGradientTest(RecurrentOpTest1):
611
    r"""
612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642
    Test RNNOp with stop_gradient = True
    equation:
        h_t = \sigma (W x_t + U h_{t-1})
    weights:
        - W
	- U
    vars:
        - x
    memories:
        - h
    output:
        - h
    """

    input_dim = 2
    batch_size = 10
    sent_len = 2

    def setUp(self):
        self.setup_program()
        self.feed_data_field = {"x", "h_boot", "W", "U"}
        self.grad_data_field = {"x", "W", "U"}

        self.input_shape = (self.sent_len, self.batch_size, self.input_dim)
        self.output_shape = (self.sent_len, self.batch_size, self.input_dim)
        self.py_rnn = PySimpleRNN2(self.input_shape, self.output_shape)

        with fluid.program_guard(self.main_program, self.startup_program):
            self.output = layers.mean(self.create_rnn_op())

    def create_rnn_op(self):
643 644 645 646
        x = layers.data(shape=[self.sent_len, self.batch_size, self.input_dim],
                        dtype="float32",
                        name="x",
                        append_batch_size=False)
647
        x.stop_gradient = False
648 649 650
        h_boot = layers.data(shape=[self.input_dim],
                             dtype="float32",
                             name="h_boot")
651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680
        h_boot.stop_gradient = True

        rnn = layers.StaticRNN()
        with rnn.step():
            h_pre = rnn.memory(init=h_boot)  # init doesn't have gradient
            x_t = rnn.step_input(x)

            temp_l = layers.fc(
                input=x_t,
                size=self.input_dim,
                param_attr=ParamAttr(
                    name="W",
                    initializer=fluid.initializer.ConstantInitializer(1.0)),
                bias_attr=False)
            temp_r = layers.fc(
                input=h_pre,
                size=self.input_dim,
                param_attr=ParamAttr(
                    name="U",
                    initializer=fluid.initializer.ConstantInitializer(0.0)),
                bias_attr=False)

            h = layers.sigmoid(x=layers.elementwise_add(temp_l, temp_r))

            rnn.update_memory(h_pre, h)
            rnn.output(h)

        return rnn()


Y
Yan Chunwei 已提交
681 682
if __name__ == '__main__':
    unittest.main()