op_test_xpu.py 11.2 KB
Newer Older
Q
QingshuChen 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import numpy as np
16
from eager_op_test import OpTest
17 18 19 20 21 22 23
from testsuite import append_loss_ops, create_op, set_input
from white_list import no_grad_set_white_list, op_threshold_white_list
from xpu.get_test_cover_info import (
    get_xpu_op_support_types,
    is_empty_grad_op_type,
    type_dict_str_to_numpy,
)
Q
QingshuChen 已提交
24 25 26 27 28

import paddle
import paddle.fluid as fluid
import paddle.fluid.core as core
from paddle.fluid.backward import append_backward
29
from paddle.fluid.framework import Program, convert_np_dtype_to_dtype_
Q
QingshuChen 已提交
30 31 32 33 34 35


class XPUOpTest(OpTest):
    @classmethod
    def setUpClass(cls):
        '''Fix random seeds to remove randomness from tests'''
T
taixiurong 已提交
36 37
        cls.use_xpu = True
        cls.use_mkldnn = False
38
        cls.epsilon_xpu2xpu = 0.00000001
T
taixiurong 已提交
39
        super().setUpClass()
Q
QingshuChen 已提交
40 41 42 43 44 45 46

    @classmethod
    def tearDownClass(cls):
        """Restore random seeds"""

        def is_empty_grad_op(op_type):
            grad_op = op_type + '_grad'
47 48 49 50
            xpu_version = core.get_xpu_device_version(0)
            xpu_op_list = core.get_xpu_device_op_list(xpu_version)
            if grad_op in xpu_op_list.keys():
                return False
Q
QingshuChen 已提交
51 52
            return True

T
taixiurong 已提交
53 54
        if cls.dtype == np.float16:
            place = paddle.XPUPlace(0)
55
            if not core.is_float16_supported(place):
T
taixiurong 已提交
56
                return
57 58 59 60

        if cls.dtype == np.float64:
            return

T
taixiurong 已提交
61
        super().tearDownClass()
Q
QingshuChen 已提交
62

T
taixiurong 已提交
63
    def _get_places(self):
64
        places = [paddle.XPUPlace(0)]
T
taixiurong 已提交
65
        return places
Q
QingshuChen 已提交
66

67 68 69 70 71
    def check_output(
        self,
        atol=0.001,
        no_check_set=None,
        equal_nan=False,
72
        check_dygraph=False,
73 74
        inplace_atol=None,
    ):
75
        place = paddle.XPUPlace(0)
76 77 78 79 80 81 82 83 84 85 86 87 88 89 90
        self.check_output_with_place(
            place,
            atol,
            no_check_set,
            equal_nan,
            check_dygraph,
            inplace_atol,
        )

    def check_output_with_place(
        self,
        place,
        atol=0.001,
        no_check_set=None,
        equal_nan=False,
91
        check_dygraph=False,
92 93
        inplace_atol=None,
    ):
Q
QingshuChen 已提交
94
        self.infer_dtype_from_inputs_outputs(self.inputs, self.outputs)
T
taixiurong 已提交
95 96 97 98
        if self.dtype == np.float64:
            return

        if self.dtype == np.float16:
99
            if not core.is_float16_supported(place):
T
taixiurong 已提交
100
                return
101

102 103
        if self.dtype == np.float16:
            atol = 0.1
104 105 106 107 108 109 110 111 112 113 114 115 116 117
        return super().check_output_with_place(
            place, atol, no_check_set, equal_nan, check_dygraph, inplace_atol
        )

    def check_grad(
        self,
        inputs_to_check,
        output_names,
        no_grad_set=None,
        numeric_grad_delta=0.005,
        in_place=False,
        max_relative_error=0.005,
        user_defined_grads=None,
        user_defined_grad_outputs=None,
118
        check_dygraph=False,
119 120
        numeric_place=None,
    ):
121
        place = paddle.XPUPlace(0)
122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146
        self.check_grad_with_place(
            place,
            inputs_to_check,
            output_names,
            no_grad_set,
            numeric_grad_delta,
            in_place,
            max_relative_error,
            user_defined_grads,
            user_defined_grad_outputs,
            check_dygraph,
            numeric_place,
        )

    def check_grad_with_place(
        self,
        place,
        inputs_to_check,
        output_names,
        no_grad_set=None,
        numeric_grad_delta=0.005,
        in_place=False,
        max_relative_error=0.005,
        user_defined_grads=None,
        user_defined_grad_outputs=None,
147
        check_dygraph=False,
148 149
        numeric_place=None,
    ):
T
TTerror 已提交
150 151
        if hasattr(self, 'op_type_need_check_grad'):
            xpu_version = core.get_xpu_device_version(0)
152 153 154
            if is_empty_grad_op_type(
                xpu_version, self.op_type, self.in_type_str
            ):
T
TTerror 已提交
155 156 157
                self._check_grad_helper()
                return

158 159 160 161 162
        cast_grad_op_types = get_xpu_op_support_types('cast')
        cast_grad_op_types_np = []
        for ctype in cast_grad_op_types:
            cast_grad_op_types_np.append(type_dict_str_to_numpy[ctype])

163
        if self.dtype not in cast_grad_op_types_np:
164 165
            return

T
taixiurong 已提交
166 167 168 169
        if self.dtype == np.float64:
            return

        if self.dtype == np.float16:
170
            if not core.is_float16_supported(place):
T
taixiurong 已提交
171 172 173
                return

        if self.dtype == np.float16:
174
            max_relative_error = 1.0
T
taixiurong 已提交
175
            return super().check_grad_with_place(
176 177 178 179 180 181 182 183 184 185 186
                place,
                inputs_to_check,
                output_names,
                no_grad_set,
                numeric_grad_delta,
                in_place,
                max_relative_error,
                user_defined_grads,
                user_defined_grad_outputs,
                check_dygraph,
            )
T
taixiurong 已提交
187

Q
QingshuChen 已提交
188
        a1 = self.get_grad_with_place(
T
TTerror 已提交
189 190 191 192
            place,
            inputs_to_check,
            output_names,
            no_grad_set=no_grad_set,
193 194
            user_defined_grad_outputs=user_defined_grad_outputs,
        )
Q
QingshuChen 已提交
195
        a2 = self.get_grad_with_place(
T
TTerror 已提交
196 197 198 199
            place,
            inputs_to_check,
            output_names,
            no_grad_set=no_grad_set,
200 201
            user_defined_grad_outputs=user_defined_grad_outputs,
        )
Q
QingshuChen 已提交
202 203 204 205
        a3 = self.get_grad_with_place(
            paddle.CPUPlace(),
            inputs_to_check,
            output_names,
T
TTerror 已提交
206
            no_grad_set=no_grad_set,
207 208 209
            user_defined_grad_outputs=user_defined_grad_outputs,
        )
        self._assert_is_close(
210 211 212 213 214
            a1,
            a2,
            inputs_to_check,
            self.epsilon_xpu2xpu,
            "Gradient Check On two xpu",
215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233
        )
        self._assert_is_close(
            a1,
            a3,
            inputs_to_check,
            max_relative_error,
            "Gradient Check On cpu & xpu",
        )

    def get_grad_with_place(
        self,
        place,
        inputs_to_check,
        output_names,
        no_grad_set=None,
        numeric_grad_delta=0.005,
        in_place=False,
        max_relative_error=0.005,
        user_defined_grad_outputs=None,
234
        check_dygraph=False,
235
    ):
Q
QingshuChen 已提交
236
        self.scope = core.Scope()
237 238 239
        op_inputs = self.inputs if hasattr(self, "inputs") else {}
        op_outputs = self.outputs if hasattr(self, "outputs") else {}
        op_attrs = self.attrs if hasattr(self, "attrs") else {}
Q
QingshuChen 已提交
240 241

        self._check_grad_helper()
242 243 244 245 246
        if (
            self.dtype == np.float64
            and self.op_type
            not in op_threshold_white_list.NEED_FIX_FP64_CHECK_GRAD_THRESHOLD_OP_LIST
        ):
Q
QingshuChen 已提交
247 248 249 250 251 252 253 254 255
            numeric_grad_delta = 1e-5
            max_relative_error = 1e-7

        cache_list = None
        if hasattr(self, "cache_name_list"):
            cache_list = self.cache_name_list

        # oneDNN numeric gradient should use CPU kernel
        use_onednn = False
256
        if "use_mkldnn" in op_attrs and op_attrs["use_mkldnn"]:
Q
QingshuChen 已提交
257 258 259
            op_attrs["use_mkldnn"] = False
            use_onednn = True

260 261 262 263 264
        mean_grad_op_types = get_xpu_op_support_types('mean')
        mean_grad_op_types_np = []
        for mtype in mean_grad_op_types:
            mean_grad_op_types_np.append(type_dict_str_to_numpy[mtype])

265 266 267 268 269 270 271 272
        self.op = create_op(
            self.scope,
            self.op_type,
            op_inputs,
            op_outputs,
            op_attrs,
            cache_list=cache_list,
        )
Q
QingshuChen 已提交
273 274 275 276 277 278 279

        if use_onednn:
            op_attrs["use_mkldnn"] = True

        if no_grad_set is None:
            no_grad_set = set()
        else:
280 281 282 283 284 285 286 287 288 289 290 291
            if (
                (self.op_type not in no_grad_set_white_list.NEED_TO_FIX_OP_LIST)
                and (
                    self.op_type not in no_grad_set_white_list.NOT_CHECK_OP_LIST
                )
                and (not self.is_bfloat16_op())
            ):
                raise AssertionError(
                    "no_grad_set must be None, op_type is "
                    + self.op_type
                    + " Op."
                )
Q
QingshuChen 已提交
292 293 294 295 296 297 298

        for input_to_check in inputs_to_check:
            set_input(self.scope, self.op, self.inputs, place)

        if not type(output_names) is list:
            output_names = [output_names]

299
        if self.dtype not in mean_grad_op_types_np:
300 301 302 303 304 305 306 307 308 309

            prog = Program()
            block = prog.global_block()
            scope = core.Scope()
            self._append_ops(block)

            inputs = self._get_inputs(block)
            outputs = self._get_outputs(block)
            feed_dict = self.feed_var(inputs, place)
            cast_inputs = list(map(block.var, output_names))
310 311 312 313 314 315 316 317 318 319 320 321
            cast_outputs = block.create_var(
                dtype="float32", shape=cast_inputs[0].shape
            )
            cast_op = block.append_op(
                type="cast",
                inputs={"X": cast_inputs},
                outputs={"Out": cast_outputs},
                attrs={
                    "in_dtype": convert_np_dtype_to_dtype_(self.dtype),
                    "out_dtype": core.VarDesc.VarType.FP32,
                },
            )
322 323 324 325 326 327 328 329
            cast_op.desc.infer_var_type(block.desc)
            cast_op.desc.infer_shape(block.desc)

            output_names = [cast_outputs.name]

            loss = append_loss_ops(block, output_names)
            loss_names = [loss.name]
            recast_inputs = list(map(block.var, loss_names))
330 331 332 333 334 335 336 337 338 339 340 341 342
            recast_loss = block.create_var(
                dtype=self.dtype, shape=recast_inputs[0].shape
            )

            recast_op = block.append_op(
                type="cast",
                inputs={"X": recast_inputs},
                outputs={"Out": recast_loss},
                attrs={
                    "in_dtype": core.VarDesc.VarType.FP32,
                    "out_dtype": convert_np_dtype_to_dtype_(self.dtype),
                },
            )
343 344 345
            recast_op.desc.infer_var_type(block.desc)
            recast_op.desc.infer_shape(block.desc)

346 347 348 349 350
            param_grad_list = append_backward(
                loss=recast_loss,
                parameter_list=[input_to_check],
                no_grad_set=no_grad_set,
            )
351 352 353 354 355 356
            fetch_list = [g for p, g in param_grad_list]

            executor = fluid.Executor(place)
            return list(
                map(
                    np.array,
357 358 359 360 361 362 363 364 365
                    executor.run(
                        prog,
                        feed_dict,
                        fetch_list,
                        scope=scope,
                        return_numpy=False,
                    ),
                )
            )
366

T
TTerror 已提交
367 368 369 370 371
        analytic_grads = self._get_gradient(
            inputs_to_check,
            place,
            output_names,
            no_grad_set,
372 373
            user_defined_grad_outputs=user_defined_grad_outputs,
        )
Q
QingshuChen 已提交
374
        return analytic_grads