test_multihead_attention.py 2.9 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Y
ying 已提交
2 3 4 5 6 7 8 9 10 11 12 13 14 15
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import unittest
16 17 18

import numpy as np

19
import paddle
20 21
from paddle import fluid
from paddle.fluid import core
Y
ying 已提交
22 23 24 25


class TestMultiheadAttention(unittest.TestCase):
    def gen_random_input(self):
26
        """Generate random input data."""
Y
ying 已提交
27 28 29 30 31 32
        # batch_size, max_sequence_length, hidden dimension
        self.input_shape = (3, 13, 16)
        self.queries = np.random.random(size=self.input_shape).astype("float32")
        self.keys = np.random.random(size=self.input_shape).astype("float32")

    def set_program(self):
33
        """Build the test program."""
G
GGBond8488 已提交
34
        queries = paddle.static.data(
35 36 37 38
            name="queries",
            shape=self.input_shape,
            dtype="float32",
        )
Y
ying 已提交
39
        queries.stop_gradient = False
G
GGBond8488 已提交
40
        keys = paddle.static.data(
41 42 43 44
            name="keys",
            shape=self.input_shape,
            dtype="float32",
        )
Y
ying 已提交
45 46
        keys.stop_gradient = False

47 48 49 50 51 52 53
        contexts = fluid.nets.scaled_dot_product_attention(
            queries=queries,
            keys=keys,
            values=keys,
            num_heads=8,
            dropout_rate=0.0,
        )
54
        out = paddle.sum(contexts, axis=None)
Y
ying 已提交
55 56 57 58 59
        fluid.backward.append_backward(loss=out)

        self.fetch_list = [contexts]

    def run_program(self):
60
        """Run the test program."""
Y
ying 已提交
61
        places = [core.CPUPlace()]
X
Xi Chen 已提交
62
        if core.is_compiled_with_cuda():
Y
ying 已提交
63 64 65 66 67 68
            places.append(core.CUDAPlace(0))

        for place in places:
            self.set_inputs(place)
            exe = fluid.Executor(place)

Y
ying 已提交
69
            exe.run(fluid.default_startup_program())
70 71 72 73 74 75
            output = exe.run(
                fluid.default_main_program(),
                feed=self.inputs,
                fetch_list=self.fetch_list,
                return_numpy=True,
            )
Y
ying 已提交
76 77 78
            self.op_output = output

    def set_inputs(self, place):
79
        """Set the randomly generated data to the test program."""
Y
ying 已提交
80 81 82 83 84 85 86 87
        self.inputs = {}
        queries = fluid.Tensor()
        queries.set(self.queries, place)

        keys = fluid.Tensor()
        keys.set(self.keys, place)

        self.inputs["keys"] = keys
Y
ying 已提交
88
        self.inputs["queries"] = queries
Y
ying 已提交
89 90 91 92 93 94 95

    def test_multihead_attention(self):
        self.gen_random_input()

        self.set_program()
        self.run_program()

96
        # fixme(caoying) add more meaningfull unittest.
Y
ying 已提交
97

Y
ying 已提交
98 99 100

if __name__ == '__main__':
    unittest.main()