“d0a908d5f59cd3989be7aa6320a34bb02c392f24”上不存在“doc/howto/dev/new_layer_en.rst”
op_test.py 19.7 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15 16
import unittest
import numpy as np
17
import random
18
import time
19
import itertools
Y
Yu Yang 已提交
20
import collections
21 22 23

import paddle.fluid as fluid
import paddle.fluid.core as core
24 25 26
from paddle.fluid.backward import append_backward
from paddle.fluid.op import Operator
from paddle.fluid.executor import Executor
27
from paddle.fluid.framework import Program, OpProtoHolder, Variable
28
from testsuite import create_op, set_input, append_input_output, append_loss_ops
29 30
from functools import reduce
from six.moves import zip
31 32


33 34 35 36
def randomize_probability(batch_size, class_num, dtype='float32'):
    prob = np.random.uniform(
        0.1, 1.0, size=(batch_size, class_num)).astype(dtype)
    prob_sum = prob.sum(axis=1)
37
    for i in range(len(prob)):
38 39 40 41
        prob[i] /= prob_sum[i]
    return prob


42 43
def get_numeric_gradient(place,
                         scope,
44 45 46
                         op,
                         inputs,
                         input_to_check,
Y
Yancey 已提交
47
                         output_names,
48 49
                         delta=0.005,
                         in_place=False):
Y
Yu Yang 已提交
50
    # FIXME: change this method by compile time concepts
51
    set_input(scope, op, inputs, place)
52 53 54 55 56

    def product(dim):
        return reduce(lambda a, b: a * b, dim, 1)

    def get_output():
Y
Yu Yang 已提交
57
        sum = []
Y
Yancey 已提交
58
        for output_name in output_names:
59
            op.run(scope, place)
Y
Yu Yang 已提交
60 61 62
            sum.append(
                np.array(scope.find_var(output_name).get_tensor()).mean())
        return np.array(sum).mean()
63 64

    tensor_to_check = scope.find_var(input_to_check).get_tensor()
Y
yuyang18 已提交
65 66
    tensor_size = product(tensor_to_check.shape())
    tensor_to_check_dtype = tensor_to_check._dtype()
67
    if tensor_to_check_dtype == core.VarDesc.VarType.FP32:
68
        tensor_to_check_dtype = np.float32
69
    elif tensor_to_check_dtype == core.VarDesc.VarType.FP64:
70
        tensor_to_check_dtype = np.float64
D
dzhwinter 已提交
71 72 73 74
    elif tensor_to_check_dtype == core.VarDesc.VarType.FP16:
        tensor_to_check_dtype = np.float16
        # set delta as np.float16, will automatic convert to float32, float64
        delta = np.array(delta).astype(np.float16)
75 76 77 78 79 80 81
    else:
        raise ValueError("Not supported data type " + str(
            tensor_to_check_dtype))

    gradient_flat = np.zeros(shape=(tensor_size, ), dtype=tensor_to_check_dtype)

    def __get_elem__(tensor, i):
D
dzhwinter 已提交
82 83 84 85 86
        if tensor_to_check_dtype == np.float16:
            numpy_tensor = np.array(tensor).astype(np.float16)
            numpy_tensor = numpy_tensor.flatten()
            return numpy_tensor[i]
        elif tensor_to_check_dtype == np.float32:
Y
yuyang18 已提交
87
            return tensor._get_float_element(i)
88
        else:
Y
yuyang18 已提交
89
            return tensor._get_double_element(i)
90 91

    def __set_elem__(tensor, i, e):
D
dzhwinter 已提交
92 93 94 95 96 97 98 99
        if tensor_to_check_dtype == np.float16:
            numpy_tensor = np.array(tensor).astype(np.float16)
            shape = numpy_tensor.shape
            numpy_tensor = numpy_tensor.flatten()
            numpy_tensor[i] = e
            numpy_tensor = numpy_tensor.reshape(shape).view(np.uint16)
            tensor.set(numpy_tensor, place)
        elif tensor_to_check_dtype == np.float32:
Y
yuyang18 已提交
100
            tensor._set_float_element(i, e)
101
        else:
Y
yuyang18 已提交
102
            tensor._set_double_element(i, e)
103

104 105
    # we only compute gradient of one element each time.
    # we use a for loop to compute the gradient of every element.
106
    for i in range(tensor_size):
107
        if in_place:
108
            set_input(scope, op, inputs, place)
109 110

        # get one input element throw it's index i.
111
        origin = __get_elem__(tensor_to_check, i)
112 113
        # add delta to it, run op and then get the sum of the result tensor.
        x_pos = origin + delta
114
        __set_elem__(tensor_to_check, i, x_pos)
115 116 117
        y_pos = get_output()

        if in_place:
118
            set_input(scope, op, inputs, place)
119 120

        x_neg = origin - delta
121
        __set_elem__(tensor_to_check, i, x_neg)
122 123
        y_neg = get_output()

124
        __set_elem__(tensor_to_check, i, origin)
125 126
        gradient_flat[i] = (y_pos - y_neg) / delta / 2

Y
yuyang18 已提交
127
    return gradient_flat.reshape(tensor_to_check.shape())
128 129 130


class OpTest(unittest.TestCase):
131 132 133 134 135
    @classmethod
    def setUpClass(cls):
        '''Fix random seeds to remove randomness from tests'''
        cls._np_rand_state = np.random.get_state()
        cls._py_rand_state = random.getstate()
136 137 138
        cls.call_once = False
        cls.dtype = "float32"
        cls.outputs = {}
139 140 141 142 143 144

        np.random.seed(123)
        random.seed(124)

    @classmethod
    def tearDownClass(cls):
Y
yuyang18 已提交
145
        """Restore random seeds"""
146 147 148
        np.random.set_state(cls._np_rand_state)
        random.setstate(cls._py_rand_state)

149 150 151 152
    def try_call_once(self, data_type):
        if not self.call_once:
            self.call_once = True
            self.dtype = data_type
D
dzhwinter 已提交
153 154 155 156 157
            # See the comment of np_dtype_to_fluid_dtype
            # If the input type is uint16, we assume use float16
            # for lodtensor dtype.
            if self.dtype == np.uint16:
                self.dtype == np.float16
158 159 160 161 162 163

    def infer_dtype_from_inputs_outputs(self, inputs, outputs):
        def infer_dtype(numpy_dict):
            assert isinstance(
                numpy_dict,
                dict), "self.inputs, self.outputs must be numpy_dict"
164
            for var_name, var_value in numpy_dict.items():
165 166 167 168 169 170 171 172 173 174 175 176 177 178
                if isinstance(var_value, (np.ndarray, np.generic)):
                    self.try_call_once(var_value.dtype)
                elif isinstance(var_value, (list, tuple)):
                    # the case of self.inputs = {"X": [("x0", x0), ("x1", x1), ("x2", x2)]}
                    if len(var_value) > 1 and isinstance(var_value[1], (
                            np.ndarray, np.generic)):
                        instance = var_value[1]
                        self.try_call_once(instance[1].dtype)
                else:
                    self.try_call_once("float32")

        infer_dtype(inputs)
        infer_dtype(outputs)

Y
Yang Yang(Tony) 已提交
179 180 181 182 183 184
    def feed_var(self, input_vars, place):
        feed_map = {}
        for var_name in input_vars:
            if isinstance(input_vars[var_name], list):
                for name, np_value in self.inputs[var_name]:
                    tensor = core.LoDTensor()
185
                    if isinstance(np_value, tuple):
D
dzhwinter 已提交
186 187
                        tensor.set(
                            OpTest.np_value_to_fluid_value(np_value[0]), place)
188
                        tensor.set_recursive_sequence_lengths(np_value[1])
189
                    else:
D
dzhwinter 已提交
190 191
                        tensor.set(
                            OpTest.np_value_to_fluid_value(np_value), place)
Y
Yang Yang(Tony) 已提交
192 193 194 195
                    feed_map[name] = tensor
            else:
                tensor = core.LoDTensor()
                if isinstance(self.inputs[var_name], tuple):
D
dzhwinter 已提交
196 197 198
                    tensor.set(
                        OpTest.np_value_to_fluid_value(self.inputs[var_name][
                            0]), place)
199 200
                    tensor.set_recursive_sequence_lengths(self.inputs[var_name][
                        1])
Y
Yang Yang(Tony) 已提交
201
                else:
D
dzhwinter 已提交
202 203 204
                    tensor.set(
                        OpTest.np_value_to_fluid_value(self.inputs[var_name]),
                        place)
Y
Yang Yang(Tony) 已提交
205 206 207 208
                feed_map[var_name] = tensor

        return feed_map

209
    def _append_ops(self, block):
Y
Yang Yang(Tony) 已提交
210
        op_proto = OpProtoHolder.instance().get_op_proto(self.op_type)
211 212 213 214 215 216
        "infer datatype from inputs and outputs for this test case"
        self.infer_dtype_from_inputs_outputs(self.inputs, self.outputs)
        inputs = append_input_output(block, op_proto, self.inputs, True,
                                     self.dtype)
        outputs = append_input_output(block, op_proto, self.outputs, False,
                                      self.dtype)
Y
Yang Yang(Tony) 已提交
217 218 219 220 221
        op = block.append_op(
            type=self.op_type,
            inputs=inputs,
            outputs=outputs,
            attrs=self.attrs if hasattr(self, "attrs") else dict())
Q
QI JUN 已提交
222 223 224
        # infer variable type and infer shape in compile-time
        op.desc.infer_var_type(block.desc)
        op.desc.infer_shape(block.desc)
Y
Yang Yang(Tony) 已提交
225

226 227
    def _get_io_vars(self, block, numpy_inputs):
        inputs = {}
228
        for name, value in numpy_inputs.items():
229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270
            if isinstance(value, list):
                var_list = [
                    block.var(sub_name) for sub_name, sub_value in value
                ]
                inputs[name] = var_list
            else:
                inputs[name] = block.var(name)
        return inputs

    def _get_inputs(self, block):
        return self._get_io_vars(block, self.inputs)

    def _get_outputs(self, block):
        return self._get_io_vars(block, self.outputs)

    def calc_output(self, place):
        outs, _ = self._calc_output(place)
        return outs

    def _calc_output(self, place, parallel=False):

        program = Program()
        block = program.global_block()
        self._append_ops(block)

        inputs = self._get_inputs(block)
        outputs = self._get_outputs(block)
        feed_map = self.feed_var(inputs, place)

        if parallel:
            use_cuda = False
            if isinstance(place, fluid.CUDAPlace(0)):
                use_cuda = True
            executor = fluid.ParallelExecutor(
                use_cuda=use_cuda, loss_name=loss.name, main_program=program)
        else:
            executor = Executor(place)

        fetch_list = getattr(self, "fetch_list", [])
        # if the fetch_list is customized by user, we use it directly.
        # if not, fill the fetch_list by the user configured outputs in test.
        if len(fetch_list) == 0:
271
            for var_name, var in outputs.items():
Y
Yang Yang(Tony) 已提交
272 273 274 275 276
                if isinstance(var, list):
                    for v in var:
                        fetch_list.append(v)
                else:
                    fetch_list.append(var)
277 278 279 280 281
        # if the fetch_list still empty, fill the fetch_list by the operator output.
        if len(fetch_list) == 0:
            for out_name, out_dup in Operator.get_op_outputs(self.op_type):
                fetch_list.append(str(out_name))
        # fetch_list = map(block.var, fetch_list)
W
Wu Yi 已提交
282
        if not isinstance(fetch_list[0], fluid.framework.Variable):
283
            fetch_list = list(map(block.var, fetch_list))
284 285 286 287
        outs = executor.run(program,
                            feed=feed_map,
                            fetch_list=fetch_list,
                            return_numpy=False)
288
        return outs, fetch_list
Y
Yang Yang(Tony) 已提交
289

290 291
    def check_output_with_place(self, place, atol):
        outs, fetch_list = self._calc_output(place)
Y
Yang Yang(Tony) 已提交
292
        for out_name, out_dup in Operator.get_op_outputs(self.op_type):
293 294 295
            if out_name not in self.outputs:
                continue

Y
Yang Yang(Tony) 已提交
296 297 298 299 300 301 302 303 304 305
            def find_actual(target_name, fetch_list):
                found = [
                    i for i, var in enumerate(fetch_list)
                    if var.name == target_name
                ]
                self.assertTrue(
                    len(found) == 1, "Found {} {}".format(
                        len(found), target_name))
                return found[0]

306 307
            if out_dup:
                sub_out = self.outputs[out_name]
Y
Yancey 已提交
308 309 310
                if not isinstance(sub_out, list):
                    raise AssertionError("sub_out type %s is not list",
                                         type(sub_out))
311 312
                for item in sub_out:
                    sub_out_name, expect = item[0], item[1]
Y
Yang Yang(Tony) 已提交
313
                    idx = find_actual(sub_out_name, fetch_list)
Q
QI JUN 已提交
314 315
                    actual = outs[idx]
                    actual_t = np.array(actual)
316 317
                    expect_t = expect[0] \
                        if isinstance(expect, tuple) else expect
318 319
                    self.assertTrue(
                        np.allclose(
320
                            actual_t, expect_t, atol=atol),
Y
Yang Yang(Tony) 已提交
321 322
                        "Output (" + sub_out_name + ") has diff at " +
                        str(place))
323 324
                    if isinstance(expect, tuple):
                        self.assertListEqual(
325 326
                            actual.recursive_sequence_lengths(), expect[1],
                            "Output (" + sub_out_name +
Q
QI JUN 已提交
327
                            ") has different lod at " + str(place))
328
            else:
Y
Yang Yang(Tony) 已提交
329
                idx = find_actual(out_name, fetch_list)
Q
QI JUN 已提交
330 331
                actual = outs[idx]
                actual_t = np.array(actual)
332
                expect = self.outputs[out_name]
333
                expect_t = expect[0] if isinstance(expect, tuple) else expect
334 335
                self.assertTrue(
                    np.allclose(
336
                        actual_t, expect_t, atol=atol),
E
emailweixu 已提交
337
                    "Output (" + out_name + ") has diff at " + str(place) +
D
dzhwinter 已提交
338 339
                    "\nExpect " + str(expect_t) + "\n" + "But Got" +
                    str(actual_t))
340
                if isinstance(expect, tuple):
341 342
                    self.assertListEqual(actual.recursive_sequence_lengths(),
                                         expect[1], "Output (" + out_name +
343
                                         ") has different lod at " + str(place))
344

345
    def _get_places(self):
D
dzhwinter 已提交
346 347 348 349 350 351 352 353
        if self.dtype == np.float16:
            if core.is_compiled_with_cuda() and core.op_support_gpu(
                    self.op_type):
                place = core.CUDAPlace(0)
                if core.is_float16_supported(place):
                    return [place]
            else:
                return []
354
        places = [fluid.CPUPlace()]
355
        if core.is_compiled_with_cuda() and core.op_support_gpu(self.op_type):
D
dzhwinter 已提交
356
            places.append(core.CUDAPlace(0))
357 358 359 360
        return places

    def check_output(self, atol=1e-5):
        places = self._get_places()
Q
qijun 已提交
361
        for place in places:
362
            self.check_output_with_place(place, atol)
Q
qijun 已提交
363

364
    def check_output_customized(self, checker):
365
        places = self._get_places()
366 367 368 369 370
        for place in places:
            outs = self.calc_output(place)
            outs = [np.array(out) for out in outs]
            checker(outs)

371 372 373
    def __assert_is_close(self, numeric_grads, analytic_grads, names,
                          max_relative_error, msg_prefix):

374
        for a, b, name in zip(numeric_grads, analytic_grads, names):
375 376 377 378 379 380 381 382
            abs_a = np.abs(a)
            abs_a[abs_a < 1e-3] = 1

            diff_mat = np.abs(a - b) / abs_a
            max_diff = np.max(diff_mat)

            def err_msg():
                offset = np.argmax(diff_mat > max_relative_error)
383
                return ("%s Variable %s max gradient diff %f over limit %f, "
D
dzhwinter 已提交
384 385 386
                        "the first error element is %d, expected %f, but got %f"
                        ) % (msg_prefix, name, max_diff, max_relative_error,
                             offset, a.flatten()[offset], b.flatten()[offset])
387 388 389 390 391

            self.assertLessEqual(max_diff, max_relative_error, err_msg())

    def check_grad(self,
                   inputs_to_check,
Y
Yancey 已提交
392
                   output_names,
393
                   no_grad_set=None,
394
                   numeric_grad_delta=0.005,
395
                   in_place=False,
Q
Qiao Longfei 已提交
396 397
                   max_relative_error=0.005,
                   user_defined_grads=None):
398
        places = self._get_places()
399 400 401 402 403 404 405 406 407 408 409 410 411 412 413
        for place in places:
            self.check_grad_with_place(place, inputs_to_check, output_names,
                                       no_grad_set, numeric_grad_delta,
                                       in_place, max_relative_error,
                                       user_defined_grads)

    def check_grad_with_place(self,
                              place,
                              inputs_to_check,
                              output_names,
                              no_grad_set=None,
                              numeric_grad_delta=0.005,
                              in_place=False,
                              max_relative_error=0.005,
                              user_defined_grads=None):
414
        self.scope = core.Scope()
Q
qijun 已提交
415
        op_inputs = self.inputs if hasattr(self, "inputs") else dict()
416
        op_outputs = self.outputs if hasattr(self, "outputs") else dict()
Q
qijun 已提交
417
        op_attrs = self.attrs if hasattr(self, "attrs") else dict()
418
        self.op = create_op(self.scope, self.op_type, op_inputs, op_outputs,
Q
qijun 已提交
419
                            op_attrs)
Y
Yu Yang 已提交
420

421 422 423
        if no_grad_set is None:
            no_grad_set = set()

Y
Yancey 已提交
424 425 426
        if not type(output_names) is list:
            output_names = [output_names]

Q
Qiao Longfei 已提交
427
        numeric_grads = user_defined_grads or [
428
            get_numeric_gradient(
429
                place,
430 431 432 433
                self.scope,
                self.op,
                self.inputs,
                input_to_check,
Y
Yancey 已提交
434
                output_names,
435
                delta=numeric_grad_delta,
436 437
                in_place=in_place) for input_to_check in inputs_to_check
        ]
438 439 440 441 442 443
        analytic_grads = self._get_gradient(inputs_to_check, place,
                                            output_names, no_grad_set)

        self.__assert_is_close(numeric_grads, analytic_grads, inputs_to_check,
                               max_relative_error,
                               "Gradient Check On %s" % str(place))
Q
qijun 已提交
444

Y
Yu Yang 已提交
445 446 447 448 449
    @staticmethod
    def _numpy_to_lod_tensor(np_value, lod, place):
        tensor = core.LoDTensor()
        tensor.set(np_value, place)
        if lod is not None:
450
            tensor.set_recursive_sequence_lengths(lod)
Y
Yu Yang 已提交
451 452
        return tensor

K
Kexin Zhao 已提交
453
    @staticmethod
K
Kexin Zhao 已提交
454 455
    def np_dtype_to_fluid_dtype(input):
        """Change the dtype of float16 numpy array
K
Kexin Zhao 已提交
456

457
        numpy float16 is binded to paddle::platform::float16
K
Kexin Zhao 已提交
458
        in tensor_py.h via the help of uint16 data type since
459
        the internal memory representation of float16 is
K
Kexin Zhao 已提交
460 461
        uint16_t in paddle and np.uint16 in numpy, which are
        themselves binded together by pybind.
K
Kexin Zhao 已提交
462 463 464 465 466

        Args:
            input: input numpy array

        Returns:
467
            input: The dtype of input will be changed to np.uint16 if
K
Kexin Zhao 已提交
468
                it is originally np.float16, such that the internal memory
469
                of input will be reinterpreted as of dtype np.uint16.
K
Kexin Zhao 已提交
470 471
        """
        if input.dtype == np.float16:
K
Kexin Zhao 已提交
472 473
            input.dtype = np.uint16
        return input
K
Kexin Zhao 已提交
474

D
dzhwinter 已提交
475 476 477 478 479 480 481 482 483 484 485 486 487 488 489
    @staticmethod
    def fluid_dtype_to_np_dtype(self, dtype):
        """
        See above, convert the dtype to normal type.
        """
        if dtype == np.uint16:
            dtype = np.float16
        return dtype

    @staticmethod
    def np_value_to_fluid_value(input):
        if input.dtype == np.float16:
            input = input.view(np.uint16)
        return input

490 491 492 493 494 495
    def _get_gradient(self,
                      input_to_check,
                      place,
                      output_names,
                      no_grad_set,
                      parallel=False):
Y
Yu Yang 已提交
496 497
        prog = Program()
        block = prog.global_block()
498 499
        self._append_ops(block)
        loss = append_loss_ops(block, output_names)
F
fengjiayi 已提交
500
        param_grad_list = append_backward(
Y
Yu Yang 已提交
501 502
            loss=loss, parameter_list=input_to_check, no_grad_set=no_grad_set)

503 504
        inputs = self._get_inputs(block)
        feed_dict = self.feed_var(inputs, place)
Y
Yu Yang 已提交
505 506

        fetch_list = [g for p, g in param_grad_list]
507 508 509 510 511
        if parallel:
            use_cuda = False
            if isinstance(place, fluid.CUDAPlace(0)):
                use_cuda = True
            executor = fluid.ParallelExecutor(
D
dzhwinter 已提交
512
                use_cuda=use_cuda, loss_name=loss.name, main_program=prog)
513 514
        else:
            executor = Executor(place)
515 516 517
        return list(
            map(np.array,
                executor.run(prog, feed_dict, fetch_list, return_numpy=False)))