op_test_xpu.py 9.2 KB
Newer Older
Q
QingshuChen 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from __future__ import print_function

import os
import unittest
import warnings
import numpy as np
import random
import six
import struct
import time
import itertools
import collections
from collections import defaultdict

import paddle
import paddle.fluid as fluid
import paddle.fluid.core as core
from paddle.fluid.backward import append_backward
from paddle.fluid.op import Operator
from paddle.fluid.executor import Executor
from paddle.fluid.framework import Program, OpProtoHolder, Variable
from testsuite import create_op, set_input, append_input_output, append_loss_ops
from paddle.fluid import unique_name
from white_list import op_accuracy_white_list, check_shape_white_list, compile_vs_runtime_white_list, no_check_set_white_list
from white_list import op_threshold_white_list, no_grad_set_white_list
from op_test import OpTest, _set_use_system_allocator, get_numeric_gradient
T
TTerror 已提交
41
from xpu.get_test_cover_info import is_empty_grad_op_type
Q
QingshuChen 已提交
42 43 44 45 46 47


class XPUOpTest(OpTest):
    @classmethod
    def setUpClass(cls):
        '''Fix random seeds to remove randomness from tests'''
T
taixiurong 已提交
48 49 50
        cls.use_xpu = True
        cls.use_mkldnn = False
        super().setUpClass()
Q
QingshuChen 已提交
51 52 53 54 55 56 57

    @classmethod
    def tearDownClass(cls):
        """Restore random seeds"""

        def is_empty_grad_op(op_type):
            grad_op = op_type + '_grad'
58 59 60 61
            xpu_version = core.get_xpu_device_version(0)
            xpu_op_list = core.get_xpu_device_op_list(xpu_version)
            if grad_op in xpu_op_list.keys():
                return False
Q
QingshuChen 已提交
62 63
            return True

T
taixiurong 已提交
64 65 66 67 68
        if cls.dtype == np.float16:
            place = paddle.XPUPlace(0)
            if core.is_float16_supported(place) == False:
                return
        super().tearDownClass()
Q
QingshuChen 已提交
69

T
taixiurong 已提交
70
    def _get_places(self):
71
        places = [paddle.XPUPlace(0)]
T
taixiurong 已提交
72
        return places
Q
QingshuChen 已提交
73

74 75 76 77 78 79 80 81 82 83 84
    def check_output(self,
                     atol=0.001,
                     no_check_set=None,
                     equal_nan=False,
                     check_dygraph=True,
                     inplace_atol=None,
                     check_eager=False):
        place = paddle.XPUPlace(0)
        self.check_output_with_place(place, atol, no_check_set, equal_nan,
                                     check_dygraph, inplace_atol, check_eager)

Q
QingshuChen 已提交
85 86 87 88 89 90
    def check_output_with_place(self,
                                place,
                                atol=0.001,
                                no_check_set=None,
                                equal_nan=False,
                                check_dygraph=True,
91 92
                                inplace_atol=None,
                                check_eager=False):
Q
QingshuChen 已提交
93
        self.infer_dtype_from_inputs_outputs(self.inputs, self.outputs)
T
taixiurong 已提交
94 95 96 97 98 99
        if self.dtype == np.float64:
            return

        if self.dtype == np.float16:
            if core.is_float16_supported(place) == False:
                return
100

101 102
        if self.dtype == np.float16:
            atol = 0.1
T
taixiurong 已提交
103 104
        return super().check_output_with_place(
            place, atol, no_check_set, equal_nan, check_dygraph, inplace_atol)
Q
QingshuChen 已提交
105

106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124
    def check_grad(self,
                   inputs_to_check,
                   output_names,
                   no_grad_set=None,
                   numeric_grad_delta=0.005,
                   in_place=False,
                   max_relative_error=0.005,
                   user_defined_grads=None,
                   user_defined_grad_outputs=None,
                   check_dygraph=True,
                   numeric_place=None,
                   check_eager=False):
        place = paddle.XPUPlace(0)
        self.check_grad_with_place(place, inputs_to_check, output_names,
                                   no_grad_set, numeric_grad_delta, in_place,
                                   max_relative_error, user_defined_grads,
                                   user_defined_grad_outputs, check_dygraph,
                                   numeric_place, check_eager)

Q
QingshuChen 已提交
125 126 127 128 129 130 131 132 133
    def check_grad_with_place(self,
                              place,
                              inputs_to_check,
                              output_names,
                              no_grad_set=None,
                              numeric_grad_delta=0.005,
                              in_place=False,
                              max_relative_error=0.005,
                              user_defined_grads=None,
T
taixiurong 已提交
134 135
                              user_defined_grad_outputs=None,
                              check_dygraph=True,
136 137
                              numeric_place=None,
                              check_eager=False):
T
TTerror 已提交
138 139 140 141 142 143 144
        if hasattr(self, 'op_type_need_check_grad'):
            xpu_version = core.get_xpu_device_version(0)
            if is_empty_grad_op_type(xpu_version, self.op_type,
                                     self.in_type_str):
                self._check_grad_helper()
                return

T
taixiurong 已提交
145 146 147 148 149 150 151 152
        if self.dtype == np.float64:
            return

        if self.dtype == np.float16:
            if core.is_float16_supported(place) == False:
                return

        if self.dtype == np.float16:
153
            max_relative_error = 1.0
T
taixiurong 已提交
154 155 156
            return super().check_grad_with_place(
                place, inputs_to_check, output_names, no_grad_set,
                numeric_grad_delta, in_place, max_relative_error,
T
TTerror 已提交
157
                user_defined_grads, user_defined_grad_outputs, check_dygraph)
T
taixiurong 已提交
158

Q
QingshuChen 已提交
159
        a1 = self.get_grad_with_place(
T
TTerror 已提交
160 161 162 163 164
            place,
            inputs_to_check,
            output_names,
            no_grad_set=no_grad_set,
            user_defined_grad_outputs=user_defined_grad_outputs)
Q
QingshuChen 已提交
165
        a2 = self.get_grad_with_place(
T
TTerror 已提交
166 167 168 169 170
            place,
            inputs_to_check,
            output_names,
            no_grad_set=no_grad_set,
            user_defined_grad_outputs=user_defined_grad_outputs)
Q
QingshuChen 已提交
171 172 173 174
        a3 = self.get_grad_with_place(
            paddle.CPUPlace(),
            inputs_to_check,
            output_names,
T
TTerror 已提交
175 176
            no_grad_set=no_grad_set,
            user_defined_grad_outputs=user_defined_grad_outputs)
Q
QingshuChen 已提交
177 178
        self._assert_is_close(a1, a2, inputs_to_check, 0.00000001,
                              "Gradient Check On two xpu")
179
        self._assert_is_close(a1, a3, inputs_to_check, max_relative_error,
Q
QingshuChen 已提交
180 181 182 183 184 185 186 187 188 189
                              "Gradient Check On cpu & xpu")

    def get_grad_with_place(self,
                            place,
                            inputs_to_check,
                            output_names,
                            no_grad_set=None,
                            numeric_grad_delta=0.005,
                            in_place=False,
                            max_relative_error=0.005,
T
TTerror 已提交
190
                            user_defined_grad_outputs=None,
Q
QingshuChen 已提交
191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239
                            check_dygraph=True):
        self.scope = core.Scope()
        op_inputs = self.inputs if hasattr(self, "inputs") else dict()
        op_outputs = self.outputs if hasattr(self, "outputs") else dict()
        op_attrs = self.attrs if hasattr(self, "attrs") else dict()

        self._check_grad_helper()
        if self.dtype == np.float64 and \
            self.op_type not in op_threshold_white_list.NEED_FIX_FP64_CHECK_GRAD_THRESHOLD_OP_LIST:
            numeric_grad_delta = 1e-5
            max_relative_error = 1e-7

        cache_list = None
        if hasattr(self, "cache_name_list"):
            cache_list = self.cache_name_list

        # oneDNN numeric gradient should use CPU kernel
        use_onednn = False
        if "use_mkldnn" in op_attrs and op_attrs["use_mkldnn"] == True:
            op_attrs["use_mkldnn"] = False
            use_onednn = True

        self.op = create_op(
            self.scope,
            self.op_type,
            op_inputs,
            op_outputs,
            op_attrs,
            cache_list=cache_list)

        if use_onednn:
            op_attrs["use_mkldnn"] = True

        if no_grad_set is None:
            no_grad_set = set()
        else:
            if (self.op_type not in no_grad_set_white_list.NEED_TO_FIX_OP_LIST
                ) and (
                    self.op_type not in no_grad_set_white_list.NOT_CHECK_OP_LIST
                ) and (not self.is_bfloat16_op()):
                raise AssertionError("no_grad_set must be None, op_type is " +
                                     self.op_type + " Op.")

        for input_to_check in inputs_to_check:
            set_input(self.scope, self.op, self.inputs, place)

        if not type(output_names) is list:
            output_names = [output_names]

T
TTerror 已提交
240 241 242 243 244 245
        analytic_grads = self._get_gradient(
            inputs_to_check,
            place,
            output_names,
            no_grad_set,
            user_defined_grad_outputs=user_defined_grad_outputs)
Q
QingshuChen 已提交
246
        return analytic_grads