op_test.py 17.7 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15 16
import unittest
import numpy as np
17
import random
18
import time
19
import itertools
Y
Yu Yang 已提交
20
import collections
21 22 23

import paddle.fluid as fluid
import paddle.fluid.core as core
24 25 26
from paddle.fluid.backward import append_backward
from paddle.fluid.op import Operator
from paddle.fluid.executor import Executor
27
from paddle.fluid.framework import Program, OpProtoHolder, Variable
28
from testsuite import create_op, set_input, append_input_output, append_loss_ops
29 30
from functools import reduce
from six.moves import zip
31 32


33 34 35 36
def randomize_probability(batch_size, class_num, dtype='float32'):
    prob = np.random.uniform(
        0.1, 1.0, size=(batch_size, class_num)).astype(dtype)
    prob_sum = prob.sum(axis=1)
37
    for i in range(len(prob)):
38 39 40 41
        prob[i] /= prob_sum[i]
    return prob


42 43
def get_numeric_gradient(place,
                         scope,
44 45 46
                         op,
                         inputs,
                         input_to_check,
Y
Yancey 已提交
47
                         output_names,
48 49
                         delta=0.005,
                         in_place=False):
Y
Yu Yang 已提交
50
    # FIXME: change this method by compile time concepts
51
    set_input(scope, op, inputs, place)
52 53 54 55 56

    def product(dim):
        return reduce(lambda a, b: a * b, dim, 1)

    def get_output():
Y
Yu Yang 已提交
57
        sum = []
Y
Yancey 已提交
58
        for output_name in output_names:
59
            op.run(scope, place)
Y
Yu Yang 已提交
60 61 62
            sum.append(
                np.array(scope.find_var(output_name).get_tensor()).mean())
        return np.array(sum).mean()
63 64

    tensor_to_check = scope.find_var(input_to_check).get_tensor()
Y
yuyang18 已提交
65 66
    tensor_size = product(tensor_to_check.shape())
    tensor_to_check_dtype = tensor_to_check._dtype()
67
    if tensor_to_check_dtype == core.VarDesc.VarType.FP32:
68
        tensor_to_check_dtype = np.float32
69
    elif tensor_to_check_dtype == core.VarDesc.VarType.FP64:
70 71 72 73 74 75 76 77 78
        tensor_to_check_dtype = np.float64
    else:
        raise ValueError("Not supported data type " + str(
            tensor_to_check_dtype))

    gradient_flat = np.zeros(shape=(tensor_size, ), dtype=tensor_to_check_dtype)

    def __get_elem__(tensor, i):
        if tensor_to_check_dtype == np.float32:
Y
yuyang18 已提交
79
            return tensor._get_float_element(i)
80
        else:
Y
yuyang18 已提交
81
            return tensor._get_double_element(i)
82 83 84

    def __set_elem__(tensor, i, e):
        if tensor_to_check_dtype == np.float32:
Y
yuyang18 已提交
85
            tensor._set_float_element(i, e)
86
        else:
Y
yuyang18 已提交
87
            tensor._set_double_element(i, e)
88

89 90
    # we only compute gradient of one element each time.
    # we use a for loop to compute the gradient of every element.
91
    for i in range(tensor_size):
92
        if in_place:
93
            set_input(scope, op, inputs, place)
94 95

        # get one input element throw it's index i.
96
        origin = __get_elem__(tensor_to_check, i)
97 98
        # add delta to it, run op and then get the sum of the result tensor.
        x_pos = origin + delta
99
        __set_elem__(tensor_to_check, i, x_pos)
100 101 102
        y_pos = get_output()

        if in_place:
103
            set_input(scope, op, inputs, place)
104 105

        x_neg = origin - delta
106
        __set_elem__(tensor_to_check, i, x_neg)
107 108
        y_neg = get_output()

109
        __set_elem__(tensor_to_check, i, origin)
110 111
        gradient_flat[i] = (y_pos - y_neg) / delta / 2

Y
yuyang18 已提交
112
    return gradient_flat.reshape(tensor_to_check.shape())
113 114 115


class OpTest(unittest.TestCase):
116 117 118 119 120
    @classmethod
    def setUpClass(cls):
        '''Fix random seeds to remove randomness from tests'''
        cls._np_rand_state = np.random.get_state()
        cls._py_rand_state = random.getstate()
121 122 123
        cls.call_once = False
        cls.dtype = "float32"
        cls.outputs = {}
124 125 126 127 128 129

        np.random.seed(123)
        random.seed(124)

    @classmethod
    def tearDownClass(cls):
Y
yuyang18 已提交
130
        """Restore random seeds"""
131 132 133
        np.random.set_state(cls._np_rand_state)
        random.setstate(cls._py_rand_state)

134 135 136 137 138 139 140 141 142 143
    def try_call_once(self, data_type):
        if not self.call_once:
            self.call_once = True
            self.dtype = data_type

    def infer_dtype_from_inputs_outputs(self, inputs, outputs):
        def infer_dtype(numpy_dict):
            assert isinstance(
                numpy_dict,
                dict), "self.inputs, self.outputs must be numpy_dict"
144
            for var_name, var_value in numpy_dict.items():
145 146 147 148 149 150 151 152 153 154 155 156 157 158
                if isinstance(var_value, (np.ndarray, np.generic)):
                    self.try_call_once(var_value.dtype)
                elif isinstance(var_value, (list, tuple)):
                    # the case of self.inputs = {"X": [("x0", x0), ("x1", x1), ("x2", x2)]}
                    if len(var_value) > 1 and isinstance(var_value[1], (
                            np.ndarray, np.generic)):
                        instance = var_value[1]
                        self.try_call_once(instance[1].dtype)
                else:
                    self.try_call_once("float32")

        infer_dtype(inputs)
        infer_dtype(outputs)

Y
Yang Yang(Tony) 已提交
159 160 161 162 163 164
    def feed_var(self, input_vars, place):
        feed_map = {}
        for var_name in input_vars:
            if isinstance(input_vars[var_name], list):
                for name, np_value in self.inputs[var_name]:
                    tensor = core.LoDTensor()
165 166
                    if isinstance(np_value, tuple):
                        tensor.set(np_value[0], place)
167
                        tensor.set_recursive_sequence_lengths(np_value[1])
168 169
                    else:
                        tensor.set(np_value, place)
Y
Yang Yang(Tony) 已提交
170 171 172 173 174
                    feed_map[name] = tensor
            else:
                tensor = core.LoDTensor()
                if isinstance(self.inputs[var_name], tuple):
                    tensor.set(self.inputs[var_name][0], place)
175 176
                    tensor.set_recursive_sequence_lengths(self.inputs[var_name][
                        1])
Y
Yang Yang(Tony) 已提交
177 178 179 180 181 182
                else:
                    tensor.set(self.inputs[var_name], place)
                feed_map[var_name] = tensor

        return feed_map

183
    def _append_ops(self, block):
Y
Yang Yang(Tony) 已提交
184
        op_proto = OpProtoHolder.instance().get_op_proto(self.op_type)
185 186 187 188 189 190
        "infer datatype from inputs and outputs for this test case"
        self.infer_dtype_from_inputs_outputs(self.inputs, self.outputs)
        inputs = append_input_output(block, op_proto, self.inputs, True,
                                     self.dtype)
        outputs = append_input_output(block, op_proto, self.outputs, False,
                                      self.dtype)
Y
Yang Yang(Tony) 已提交
191 192 193 194 195
        op = block.append_op(
            type=self.op_type,
            inputs=inputs,
            outputs=outputs,
            attrs=self.attrs if hasattr(self, "attrs") else dict())
Q
QI JUN 已提交
196 197 198
        # infer variable type and infer shape in compile-time
        op.desc.infer_var_type(block.desc)
        op.desc.infer_shape(block.desc)
Y
Yang Yang(Tony) 已提交
199

200 201
    def _get_io_vars(self, block, numpy_inputs):
        inputs = {}
202
        for name, value in numpy_inputs.items():
203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244
            if isinstance(value, list):
                var_list = [
                    block.var(sub_name) for sub_name, sub_value in value
                ]
                inputs[name] = var_list
            else:
                inputs[name] = block.var(name)
        return inputs

    def _get_inputs(self, block):
        return self._get_io_vars(block, self.inputs)

    def _get_outputs(self, block):
        return self._get_io_vars(block, self.outputs)

    def calc_output(self, place):
        outs, _ = self._calc_output(place)
        return outs

    def _calc_output(self, place, parallel=False):

        program = Program()
        block = program.global_block()
        self._append_ops(block)

        inputs = self._get_inputs(block)
        outputs = self._get_outputs(block)
        feed_map = self.feed_var(inputs, place)

        if parallel:
            use_cuda = False
            if isinstance(place, fluid.CUDAPlace(0)):
                use_cuda = True
            executor = fluid.ParallelExecutor(
                use_cuda=use_cuda, loss_name=loss.name, main_program=program)
        else:
            executor = Executor(place)

        fetch_list = getattr(self, "fetch_list", [])
        # if the fetch_list is customized by user, we use it directly.
        # if not, fill the fetch_list by the user configured outputs in test.
        if len(fetch_list) == 0:
245
            for var_name, var in outputs.items():
Y
Yang Yang(Tony) 已提交
246 247 248 249 250
                if isinstance(var, list):
                    for v in var:
                        fetch_list.append(v)
                else:
                    fetch_list.append(var)
251 252 253 254 255
        # if the fetch_list still empty, fill the fetch_list by the operator output.
        if len(fetch_list) == 0:
            for out_name, out_dup in Operator.get_op_outputs(self.op_type):
                fetch_list.append(str(out_name))
        # fetch_list = map(block.var, fetch_list)
W
Wu Yi 已提交
256
        if not isinstance(fetch_list[0], fluid.framework.Variable):
257
            fetch_list = list(map(block.var, fetch_list))
258 259 260 261
        outs = executor.run(program,
                            feed=feed_map,
                            fetch_list=fetch_list,
                            return_numpy=False)
262
        return outs, fetch_list
Y
Yang Yang(Tony) 已提交
263

264 265
    def check_output_with_place(self, place, atol):
        outs, fetch_list = self._calc_output(place)
Y
Yang Yang(Tony) 已提交
266
        for out_name, out_dup in Operator.get_op_outputs(self.op_type):
267 268 269
            if out_name not in self.outputs:
                continue

Y
Yang Yang(Tony) 已提交
270 271 272 273 274 275 276 277 278 279
            def find_actual(target_name, fetch_list):
                found = [
                    i for i, var in enumerate(fetch_list)
                    if var.name == target_name
                ]
                self.assertTrue(
                    len(found) == 1, "Found {} {}".format(
                        len(found), target_name))
                return found[0]

280 281
            if out_dup:
                sub_out = self.outputs[out_name]
Y
Yancey 已提交
282 283 284
                if not isinstance(sub_out, list):
                    raise AssertionError("sub_out type %s is not list",
                                         type(sub_out))
285 286
                for item in sub_out:
                    sub_out_name, expect = item[0], item[1]
Y
Yang Yang(Tony) 已提交
287
                    idx = find_actual(sub_out_name, fetch_list)
Q
QI JUN 已提交
288 289
                    actual = outs[idx]
                    actual_t = np.array(actual)
290 291
                    expect_t = expect[0] \
                        if isinstance(expect, tuple) else expect
292 293
                    self.assertTrue(
                        np.allclose(
294
                            actual_t, expect_t, atol=atol),
Y
Yang Yang(Tony) 已提交
295 296
                        "Output (" + sub_out_name + ") has diff at " +
                        str(place))
297 298
                    if isinstance(expect, tuple):
                        self.assertListEqual(
299 300
                            actual.recursive_sequence_lengths(), expect[1],
                            "Output (" + sub_out_name +
Q
QI JUN 已提交
301
                            ") has different lod at " + str(place))
302
            else:
Y
Yang Yang(Tony) 已提交
303
                idx = find_actual(out_name, fetch_list)
Q
QI JUN 已提交
304 305
                actual = outs[idx]
                actual_t = np.array(actual)
306
                expect = self.outputs[out_name]
307
                expect_t = expect[0] if isinstance(expect, tuple) else expect
308 309
                self.assertTrue(
                    np.allclose(
310
                        actual_t, expect_t, atol=atol),
E
emailweixu 已提交
311
                    "Output (" + out_name + ") has diff at " + str(place) +
Y
ying 已提交
312
                    str(actual_t) + "\n" + str(expect_t))
313
                if isinstance(expect, tuple):
314 315
                    self.assertListEqual(actual.recursive_sequence_lengths(),
                                         expect[1], "Output (" + out_name +
316
                                         ") has different lod at " + str(place))
317

318 319
    def _get_places(self):
        places = [fluid.CPUPlace()]
320
        if core.is_compiled_with_cuda() and core.op_support_gpu(self.op_type):
D
dzhwinter 已提交
321
            places.append(core.CUDAPlace(0))
322 323 324 325
        return places

    def check_output(self, atol=1e-5):
        places = self._get_places()
Q
qijun 已提交
326
        for place in places:
327
            self.check_output_with_place(place, atol)
Q
qijun 已提交
328

329
    def check_output_customized(self, checker):
330
        places = self._get_places()
331 332 333 334 335
        for place in places:
            outs = self.calc_output(place)
            outs = [np.array(out) for out in outs]
            checker(outs)

336 337 338
    def __assert_is_close(self, numeric_grads, analytic_grads, names,
                          max_relative_error, msg_prefix):

339
        for a, b, name in zip(numeric_grads, analytic_grads, names):
340 341 342 343 344 345 346 347
            abs_a = np.abs(a)
            abs_a[abs_a < 1e-3] = 1

            diff_mat = np.abs(a - b) / abs_a
            max_diff = np.max(diff_mat)

            def err_msg():
                offset = np.argmax(diff_mat > max_relative_error)
348
                return ("%s Variable %s max gradient diff %f over limit %f, "
349
                        "the first error element is %d, %f, %f") % (
350
                            msg_prefix, name, max_diff, max_relative_error,
351
                            offset, a.flatten()[offset], b.flatten()[offset])
352 353 354 355 356

            self.assertLessEqual(max_diff, max_relative_error, err_msg())

    def check_grad(self,
                   inputs_to_check,
Y
Yancey 已提交
357
                   output_names,
358
                   no_grad_set=None,
359
                   numeric_grad_delta=0.005,
360
                   in_place=False,
Q
Qiao Longfei 已提交
361 362
                   max_relative_error=0.005,
                   user_defined_grads=None):
363
        places = self._get_places()
364 365 366 367 368 369 370 371 372 373 374 375 376 377 378
        for place in places:
            self.check_grad_with_place(place, inputs_to_check, output_names,
                                       no_grad_set, numeric_grad_delta,
                                       in_place, max_relative_error,
                                       user_defined_grads)

    def check_grad_with_place(self,
                              place,
                              inputs_to_check,
                              output_names,
                              no_grad_set=None,
                              numeric_grad_delta=0.005,
                              in_place=False,
                              max_relative_error=0.005,
                              user_defined_grads=None):
379
        self.scope = core.Scope()
Q
qijun 已提交
380
        op_inputs = self.inputs if hasattr(self, "inputs") else dict()
381
        op_outputs = self.outputs if hasattr(self, "outputs") else dict()
Q
qijun 已提交
382
        op_attrs = self.attrs if hasattr(self, "attrs") else dict()
383
        self.op = create_op(self.scope, self.op_type, op_inputs, op_outputs,
Q
qijun 已提交
384
                            op_attrs)
Y
Yu Yang 已提交
385

386 387 388
        if no_grad_set is None:
            no_grad_set = set()

Y
Yancey 已提交
389 390 391
        if not type(output_names) is list:
            output_names = [output_names]

Q
Qiao Longfei 已提交
392
        numeric_grads = user_defined_grads or [
393
            get_numeric_gradient(
394
                place,
395 396 397 398
                self.scope,
                self.op,
                self.inputs,
                input_to_check,
Y
Yancey 已提交
399
                output_names,
400
                delta=numeric_grad_delta,
401 402
                in_place=in_place) for input_to_check in inputs_to_check
        ]
403 404 405 406 407 408
        analytic_grads = self._get_gradient(inputs_to_check, place,
                                            output_names, no_grad_set)

        self.__assert_is_close(numeric_grads, analytic_grads, inputs_to_check,
                               max_relative_error,
                               "Gradient Check On %s" % str(place))
Q
qijun 已提交
409

Y
Yu Yang 已提交
410 411 412 413 414
    @staticmethod
    def _numpy_to_lod_tensor(np_value, lod, place):
        tensor = core.LoDTensor()
        tensor.set(np_value, place)
        if lod is not None:
415
            tensor.set_recursive_sequence_lengths(lod)
Y
Yu Yang 已提交
416 417
        return tensor

K
Kexin Zhao 已提交
418
    @staticmethod
K
Kexin Zhao 已提交
419 420
    def np_dtype_to_fluid_dtype(input):
        """Change the dtype of float16 numpy array
K
Kexin Zhao 已提交
421

422
        numpy float16 is binded to paddle::platform::float16
K
Kexin Zhao 已提交
423
        in tensor_py.h via the help of uint16 data type since
424
        the internal memory representation of float16 is
K
Kexin Zhao 已提交
425 426
        uint16_t in paddle and np.uint16 in numpy, which are
        themselves binded together by pybind.
K
Kexin Zhao 已提交
427 428 429 430 431

        Args:
            input: input numpy array

        Returns:
432
            input: The dtype of input will be changed to np.uint16 if
K
Kexin Zhao 已提交
433
                it is originally np.float16, such that the internal memory
434
                of input will be reinterpreted as of dtype np.uint16.
K
Kexin Zhao 已提交
435 436
        """
        if input.dtype == np.float16:
K
Kexin Zhao 已提交
437 438
            input.dtype = np.uint16
        return input
K
Kexin Zhao 已提交
439

440 441 442 443 444 445
    def _get_gradient(self,
                      input_to_check,
                      place,
                      output_names,
                      no_grad_set,
                      parallel=False):
Y
Yu Yang 已提交
446 447
        prog = Program()
        block = prog.global_block()
448 449
        self._append_ops(block)
        loss = append_loss_ops(block, output_names)
F
fengjiayi 已提交
450
        param_grad_list = append_backward(
Y
Yu Yang 已提交
451 452
            loss=loss, parameter_list=input_to_check, no_grad_set=no_grad_set)

453 454
        inputs = self._get_inputs(block)
        feed_dict = self.feed_var(inputs, place)
Y
Yu Yang 已提交
455 456

        fetch_list = [g for p, g in param_grad_list]
457 458 459 460 461 462 463 464
        if parallel:
            use_cuda = False
            if isinstance(place, fluid.CUDAPlace(0)):
                use_cuda = True
            executor = fluid.ParallelExecutor(
                use_cuda=use_cuda, loss_name=loss.name, main_program=program)
        else:
            executor = Executor(place)
465 466 467
        return list(
            map(np.array,
                executor.run(prog, feed_dict, fetch_list, return_numpy=False)))