test_multihead_attention.py 3.3 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Y
ying 已提交
2 3 4 5 6 7 8 9 10 11 12 13 14 15
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import unittest
16 17
import paddle.fluid as fluid
import paddle.fluid.core as core
Y
ying 已提交
18 19 20 21
import numpy as np


class TestMultiheadAttention(unittest.TestCase):
22

Y
ying 已提交
23 24 25 26 27 28 29 30 31 32 33
    def gen_random_input(self):
        """Generate random input data.
        """
        # batch_size, max_sequence_length, hidden dimension
        self.input_shape = (3, 13, 16)
        self.queries = np.random.random(size=self.input_shape).astype("float32")
        self.keys = np.random.random(size=self.input_shape).astype("float32")

    def set_program(self):
        """Build the test program.
        """
34 35 36 37
        queries = fluid.layers.data(name="queries",
                                    shape=self.input_shape,
                                    dtype="float32",
                                    append_batch_size=False)
Y
ying 已提交
38
        queries.stop_gradient = False
39 40 41 42
        keys = fluid.layers.data(name="keys",
                                 shape=self.input_shape,
                                 dtype="float32",
                                 append_batch_size=False)
Y
ying 已提交
43 44
        keys.stop_gradient = False

45 46 47 48 49
        contexts = fluid.nets.scaled_dot_product_attention(queries=queries,
                                                           keys=keys,
                                                           values=keys,
                                                           num_heads=8,
                                                           dropout_rate=0.)
Y
ying 已提交
50 51 52 53 54 55 56 57 58
        out = fluid.layers.reduce_sum(contexts, dim=None)
        fluid.backward.append_backward(loss=out)

        self.fetch_list = [contexts]

    def run_program(self):
        """Run the test program.
        """
        places = [core.CPUPlace()]
X
Xi Chen 已提交
59
        if core.is_compiled_with_cuda():
Y
ying 已提交
60 61 62 63 64 65
            places.append(core.CUDAPlace(0))

        for place in places:
            self.set_inputs(place)
            exe = fluid.Executor(place)

Y
ying 已提交
66
            exe.run(fluid.default_startup_program())
Y
ying 已提交
67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83
            output = exe.run(fluid.default_main_program(),
                             feed=self.inputs,
                             fetch_list=self.fetch_list,
                             return_numpy=True)
            self.op_output = output

    def set_inputs(self, place):
        """Set the randomly generated data to the test program.
        """
        self.inputs = {}
        queries = fluid.Tensor()
        queries.set(self.queries, place)

        keys = fluid.Tensor()
        keys.set(self.keys, place)

        self.inputs["keys"] = keys
Y
ying 已提交
84
        self.inputs["queries"] = queries
Y
ying 已提交
85 86 87 88 89 90 91

    def test_multihead_attention(self):
        self.gen_random_input()

        self.set_program()
        self.run_program()

Y
ying 已提交
92 93
        #fixme(caoying) add more meaningfull unittest.

Y
ying 已提交
94 95 96

if __name__ == '__main__':
    unittest.main()