test_custom_concat.py 6.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
# 
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# 
#     http://www.apache.org/licenses/LICENSE-2.0
# 
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import os
import unittest
import numpy as np

import paddle
import paddle.static as static
from paddle.utils.cpp_extension import load, get_build_directory
from paddle.utils.cpp_extension.extension_utils import run_cmd
from utils import paddle_includes, extra_cc_args, extra_nvcc_args
24
from paddle.fluid.framework import _test_eager_guard, _in_eager_mode
25 26 27

# Because Windows don't use docker, the shared lib already exists in the
# cache dir, it will not be compiled again unless the shared lib is removed.
28
file = '{}\\custom_concat\\custom_concat.pyd'.format(get_build_directory())
29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47
if os.name == 'nt' and os.path.isfile(file):
    cmd = 'del {}'.format(file)
    run_cmd(cmd, True)

if os.name == 'nt':
    test_include = "..\\python\\paddle\\fluid\\tests\\custom_op"
else:
    test_include = "../python/paddle/fluid/tests/custom_op"
paddle_includes.append(test_include)

custom_ops = load(
    name='custom_concat_jit',
    sources=['custom_concat_op.cc'],
    extra_include_paths=paddle_includes,  # add for Coverage CI
    extra_cxx_cflags=extra_cc_args,  # test for cc flags
    extra_cuda_cflags=extra_nvcc_args,  # test for nvcc flags
    verbose=True)


48 49
def concat_dynamic(func, dtype, np_inputs, axis_v, with_attr=False):
    paddle.set_device("cpu")
50 51
    inputs = [
        paddle.to_tensor(
52
            x, dtype=dtype, stop_gradient=False) for x in np_inputs
53
    ]
54 55 56 57
    if with_attr:
        axis = axis_v
    else:
        axis = paddle.full(shape=[1], dtype='int64', fill_value=axis_v)
58 59 60
    out = func(inputs, axis)
    out.stop_gradient = False
    out.backward()
61
    grad_inputs = [x.grad.numpy() for x in inputs]
62 63 64
    return out.numpy(), grad_inputs


65
def concat_static(func, dtype, np_inputs, axis_v, with_attr=False):
66
    paddle.enable_static()
67
    paddle.set_device("cpu")
68 69 70 71
    with static.scope_guard(static.Scope()):
        with static.program_guard(static.Program()):
            x1 = static.data(name="x1", shape=[2, 3], dtype=dtype)
            x2 = static.data(name="x2", shape=[2, 3], dtype=dtype)
72 73 74 75
            if with_attr:
                axis = axis_v
            else:
                axis = paddle.full(shape=[1], dtype='int64', fill_value=axis_v)
76 77 78 79 80 81 82 83 84 85
            x1.stop_gradient = False
            x2.stop_gradient = False
            out = func([x1, x2], axis)
            # mean only support float, so here use sum
            sum_out = paddle.sum(out)
            static.append_backward(sum_out)

            exe = static.Executor()
            exe.run(static.default_startup_program())

86 87 88 89 90 91 92
            if with_attr:
                feed_dict = {
                    "x1": np_inputs[0].astype(dtype),
                    "x2": np_inputs[1].astype(dtype)
                }
            else:
                feed_dict = {
93 94 95
                    "x1": np_inputs[0].astype(dtype),
                    "x2": np_inputs[1].astype(dtype),
                    "axis": axis
96 97 98 99
                }
            out_v, x1_grad_v, x2_grad_v = exe.run(
                static.default_main_program(),
                feed=feed_dict,
100 101 102 103 104 105 106 107 108 109 110 111 112 113
                fetch_list=[out.name, x1.name + "@GRAD", x2.name + "@GRAD"])
    paddle.disable_static()
    return out_v, x1_grad_v, x2_grad_v


class TestCustomConcatDynamicAxisJit(unittest.TestCase):
    def setUp(self):
        self.dtypes = ['float32', 'float64', 'int32', 'int64']
        self.np_inputs = [
            np.array([[1, 2, 3], [4, 5, 6]]),
            np.array([[11, 12, 13], [14, 15, 16]])
        ]
        self.axises = [0, 1]

114 115 116 117 118 119
    def check_output(self, out, pd_out, name):
        self.assertTrue(
            np.array_equal(out, pd_out),
            "custom op {}: {},\n paddle api {}: {}".format(name, out, name,
                                                           pd_out))

120
    def func_dynamic(self):
121 122 123 124 125 126 127 128 129 130
        for dtype in self.dtypes:
            for axis in self.axises:
                out, grad_inputs = concat_dynamic(custom_ops.custom_concat,
                                                  dtype, self.np_inputs, axis)
                pd_out, pd_grad_inputs = concat_dynamic(paddle.concat, dtype,
                                                        self.np_inputs, axis)

                self.check_output(out, pd_out, "out")
                for x_grad, pd_x_grad in zip(grad_inputs, pd_grad_inputs):
                    self.check_output(x_grad, pd_x_grad, "x_grad")
131

132 133 134 135 136
    def test_dynamic(self):
        with _test_eager_guard():
            self.func_dynamic()
        self.func_dynamic()

137
    def test_static(self):
138 139 140 141 142 143 144 145 146 147 148
        for dtype in self.dtypes:
            for axis in self.axises:
                out, x1_grad, x2_grad = concat_static(
                    custom_ops.custom_concat, dtype, self.np_inputs, axis)
                pd_out, pd_x1_grad, pd_x2_grad = concat_static(
                    paddle.concat, dtype, self.np_inputs, axis)

                self.check_output(out, pd_out, "out")
                self.check_output(x1_grad, pd_x1_grad, "x1_grad")
                self.check_output(x2_grad, pd_x2_grad, "x2_grad")

149
    def func_dynamic_with_attr(self):
150 151 152 153 154 155 156 157 158 159 160 161
        for dtype in self.dtypes:
            for axis in self.axises:
                out, grad_inputs = concat_dynamic(
                    custom_ops.custom_concat_with_attr, dtype, self.np_inputs,
                    axis, True)
                pd_out, pd_grad_inputs = concat_dynamic(
                    paddle.concat, dtype, self.np_inputs, axis, True)

                self.check_output(out, pd_out, "out")
                for x_grad, pd_x_grad in zip(grad_inputs, pd_grad_inputs):
                    self.check_output(x_grad, pd_x_grad, "x_grad")

162 163 164 165 166
    def test_dynamic_with_attr(self):
        with _test_eager_guard():
            self.func_dynamic_with_attr()
        self.func_dynamic_with_attr()

167 168 169 170 171 172 173 174 175 176 177 178
    def test_static_with_attr(self):
        for dtype in self.dtypes:
            for axis in self.axises:
                out, x1_grad, x2_grad = concat_static(
                    custom_ops.custom_concat_with_attr, dtype, self.np_inputs,
                    axis, True)
                pd_out, pd_x1_grad, pd_x2_grad = concat_static(
                    paddle.concat, dtype, self.np_inputs, axis, True)

                self.check_output(out, pd_out, "out")
                self.check_output(x1_grad, pd_x1_grad, "x1_grad")
                self.check_output(x2_grad, pd_x2_grad, "x2_grad")
179 180 181 182


if __name__ == "__main__":
    unittest.main()