op_test.py 19.7 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15 16
import unittest
import numpy as np
17
import random
18
import time
19
import itertools
Y
Yu Yang 已提交
20
import collections
21 22 23

import paddle.fluid as fluid
import paddle.fluid.core as core
24 25 26
from paddle.fluid.backward import append_backward
from paddle.fluid.op import Operator
from paddle.fluid.executor import Executor
27 28
from paddle.fluid.framework import Program, OpProtoHolder, Variable
from testsuite import create_op, set_input, append_input_output, append_loss_ops
29 30


31 32 33 34 35 36 37 38 39
def randomize_probability(batch_size, class_num, dtype='float32'):
    prob = np.random.uniform(
        0.1, 1.0, size=(batch_size, class_num)).astype(dtype)
    prob_sum = prob.sum(axis=1)
    for i in xrange(len(prob)):
        prob[i] /= prob_sum[i]
    return prob


40 41
def get_numeric_gradient(place,
                         scope,
42 43 44
                         op,
                         inputs,
                         input_to_check,
Y
Yancey 已提交
45
                         output_names,
46 47
                         delta=0.005,
                         in_place=False):
Y
Yu Yang 已提交
48
    # FIXME: change this method by compile time concepts
49
    set_input(scope, op, inputs, place)
50 51 52 53 54

    def product(dim):
        return reduce(lambda a, b: a * b, dim, 1)

    def get_output():
Y
Yu Yang 已提交
55
        sum = []
Y
Yancey 已提交
56
        for output_name in output_names:
57
            op.run(scope, place)
Y
Yu Yang 已提交
58 59 60
            sum.append(
                np.array(scope.find_var(output_name).get_tensor()).mean())
        return np.array(sum).mean()
61 62

    tensor_to_check = scope.find_var(input_to_check).get_tensor()
Y
yuyang18 已提交
63 64
    tensor_size = product(tensor_to_check.shape())
    tensor_to_check_dtype = tensor_to_check._dtype()
65
    if tensor_to_check_dtype == core.VarDesc.VarType.FP32:
66
        tensor_to_check_dtype = np.float32
67
    elif tensor_to_check_dtype == core.VarDesc.VarType.FP64:
68
        tensor_to_check_dtype = np.float64
D
dzhwinter 已提交
69 70 71 72
    elif tensor_to_check_dtype == core.VarDesc.VarType.FP16:
        tensor_to_check_dtype = np.float16
        # set delta as np.float16, will automatic convert to float32, float64
        delta = np.array(delta).astype(np.float16)
73 74 75 76 77 78 79
    else:
        raise ValueError("Not supported data type " + str(
            tensor_to_check_dtype))

    gradient_flat = np.zeros(shape=(tensor_size, ), dtype=tensor_to_check_dtype)

    def __get_elem__(tensor, i):
D
dzhwinter 已提交
80 81 82 83 84
        if tensor_to_check_dtype == np.float16:
            numpy_tensor = np.array(tensor).astype(np.float16)
            numpy_tensor = numpy_tensor.flatten()
            return numpy_tensor[i]
        elif tensor_to_check_dtype == np.float32:
Y
yuyang18 已提交
85
            return tensor._get_float_element(i)
86
        else:
Y
yuyang18 已提交
87
            return tensor._get_double_element(i)
88 89

    def __set_elem__(tensor, i, e):
D
dzhwinter 已提交
90 91 92 93 94 95 96 97
        if tensor_to_check_dtype == np.float16:
            numpy_tensor = np.array(tensor).astype(np.float16)
            shape = numpy_tensor.shape
            numpy_tensor = numpy_tensor.flatten()
            numpy_tensor[i] = e
            numpy_tensor = numpy_tensor.reshape(shape).view(np.uint16)
            tensor.set(numpy_tensor, place)
        elif tensor_to_check_dtype == np.float32:
Y
yuyang18 已提交
98
            tensor._set_float_element(i, e)
99
        else:
Y
yuyang18 已提交
100
            tensor._set_double_element(i, e)
101

102 103 104 105
    # we only compute gradient of one element each time.
    # we use a for loop to compute the gradient of every element.
    for i in xrange(tensor_size):
        if in_place:
106
            set_input(scope, op, inputs, place)
107 108

        # get one input element throw it's index i.
109
        origin = __get_elem__(tensor_to_check, i)
110 111
        # add delta to it, run op and then get the sum of the result tensor.
        x_pos = origin + delta
112
        __set_elem__(tensor_to_check, i, x_pos)
113 114 115
        y_pos = get_output()

        if in_place:
116
            set_input(scope, op, inputs, place)
117 118

        x_neg = origin - delta
119
        __set_elem__(tensor_to_check, i, x_neg)
120 121
        y_neg = get_output()

122
        __set_elem__(tensor_to_check, i, origin)
123 124
        gradient_flat[i] = (y_pos - y_neg) / delta / 2

Y
yuyang18 已提交
125
    return gradient_flat.reshape(tensor_to_check.shape())
126 127 128


class OpTest(unittest.TestCase):
129 130 131 132 133
    @classmethod
    def setUpClass(cls):
        '''Fix random seeds to remove randomness from tests'''
        cls._np_rand_state = np.random.get_state()
        cls._py_rand_state = random.getstate()
134 135 136
        cls.call_once = False
        cls.dtype = "float32"
        cls.outputs = {}
137 138 139 140 141 142

        np.random.seed(123)
        random.seed(124)

    @classmethod
    def tearDownClass(cls):
Y
yuyang18 已提交
143
        """Restore random seeds"""
144 145 146
        np.random.set_state(cls._np_rand_state)
        random.setstate(cls._py_rand_state)

147 148 149 150
    def try_call_once(self, data_type):
        if not self.call_once:
            self.call_once = True
            self.dtype = data_type
D
dzhwinter 已提交
151 152 153 154 155
            # See the comment of np_dtype_to_fluid_dtype
            # If the input type is uint16, we assume use float16
            # for lodtensor dtype.
            if self.dtype == np.uint16:
                self.dtype == np.float16
156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176

    def infer_dtype_from_inputs_outputs(self, inputs, outputs):
        def infer_dtype(numpy_dict):
            assert isinstance(
                numpy_dict,
                dict), "self.inputs, self.outputs must be numpy_dict"
            for var_name, var_value in numpy_dict.iteritems():
                if isinstance(var_value, (np.ndarray, np.generic)):
                    self.try_call_once(var_value.dtype)
                elif isinstance(var_value, (list, tuple)):
                    # the case of self.inputs = {"X": [("x0", x0), ("x1", x1), ("x2", x2)]}
                    if len(var_value) > 1 and isinstance(var_value[1], (
                            np.ndarray, np.generic)):
                        instance = var_value[1]
                        self.try_call_once(instance[1].dtype)
                else:
                    self.try_call_once("float32")

        infer_dtype(inputs)
        infer_dtype(outputs)

Y
Yang Yang(Tony) 已提交
177 178 179 180 181 182
    def feed_var(self, input_vars, place):
        feed_map = {}
        for var_name in input_vars:
            if isinstance(input_vars[var_name], list):
                for name, np_value in self.inputs[var_name]:
                    tensor = core.LoDTensor()
183
                    if isinstance(np_value, tuple):
D
dzhwinter 已提交
184 185
                        tensor.set(
                            OpTest.np_value_to_fluid_value(np_value[0]), place)
186
                        tensor.set_recursive_sequence_lengths(np_value[1])
187
                    else:
D
dzhwinter 已提交
188 189
                        tensor.set(
                            OpTest.np_value_to_fluid_value(np_value), place)
Y
Yang Yang(Tony) 已提交
190 191 192 193
                    feed_map[name] = tensor
            else:
                tensor = core.LoDTensor()
                if isinstance(self.inputs[var_name], tuple):
D
dzhwinter 已提交
194 195 196
                    tensor.set(
                        OpTest.np_value_to_fluid_value(self.inputs[var_name][
                            0]), place)
197 198
                    tensor.set_recursive_sequence_lengths(self.inputs[var_name][
                        1])
Y
Yang Yang(Tony) 已提交
199
                else:
D
dzhwinter 已提交
200 201 202
                    tensor.set(
                        OpTest.np_value_to_fluid_value(self.inputs[var_name]),
                        place)
Y
Yang Yang(Tony) 已提交
203 204 205 206
                feed_map[var_name] = tensor

        return feed_map

207
    def _append_ops(self, block):
Y
Yang Yang(Tony) 已提交
208
        op_proto = OpProtoHolder.instance().get_op_proto(self.op_type)
209 210 211 212 213 214
        "infer datatype from inputs and outputs for this test case"
        self.infer_dtype_from_inputs_outputs(self.inputs, self.outputs)
        inputs = append_input_output(block, op_proto, self.inputs, True,
                                     self.dtype)
        outputs = append_input_output(block, op_proto, self.outputs, False,
                                      self.dtype)
Y
Yang Yang(Tony) 已提交
215 216 217 218 219
        op = block.append_op(
            type=self.op_type,
            inputs=inputs,
            outputs=outputs,
            attrs=self.attrs if hasattr(self, "attrs") else dict())
Q
QI JUN 已提交
220 221 222
        # infer variable type and infer shape in compile-time
        op.desc.infer_var_type(block.desc)
        op.desc.infer_shape(block.desc)
Y
Yang Yang(Tony) 已提交
223

224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269
    def _get_io_vars(self, block, numpy_inputs):
        inputs = {}
        for name, value in numpy_inputs.iteritems():
            if isinstance(value, list):
                var_list = [
                    block.var(sub_name) for sub_name, sub_value in value
                ]
                inputs[name] = var_list
            else:
                inputs[name] = block.var(name)
        return inputs

    def _get_inputs(self, block):
        return self._get_io_vars(block, self.inputs)

    def _get_outputs(self, block):
        return self._get_io_vars(block, self.outputs)

    def calc_output(self, place):
        outs, _ = self._calc_output(place)
        return outs

    def _calc_output(self, place, parallel=False):

        program = Program()
        block = program.global_block()
        self._append_ops(block)

        inputs = self._get_inputs(block)
        outputs = self._get_outputs(block)
        feed_map = self.feed_var(inputs, place)

        if parallel:
            use_cuda = False
            if isinstance(place, fluid.CUDAPlace(0)):
                use_cuda = True
            executor = fluid.ParallelExecutor(
                use_cuda=use_cuda, loss_name=loss.name, main_program=program)
        else:
            executor = Executor(place)

        fetch_list = getattr(self, "fetch_list", [])
        # if the fetch_list is customized by user, we use it directly.
        # if not, fill the fetch_list by the user configured outputs in test.
        if len(fetch_list) == 0:
            for var_name, var in outputs.iteritems():
Y
Yang Yang(Tony) 已提交
270 271 272 273 274
                if isinstance(var, list):
                    for v in var:
                        fetch_list.append(v)
                else:
                    fetch_list.append(var)
275 276 277 278 279
        # if the fetch_list still empty, fill the fetch_list by the operator output.
        if len(fetch_list) == 0:
            for out_name, out_dup in Operator.get_op_outputs(self.op_type):
                fetch_list.append(str(out_name))
        # fetch_list = map(block.var, fetch_list)
W
Wu Yi 已提交
280
        if not isinstance(fetch_list[0], fluid.framework.Variable):
281 282 283 284 285
            fetch_list = map(block.var, fetch_list)
        outs = executor.run(program,
                            feed=feed_map,
                            fetch_list=fetch_list,
                            return_numpy=False)
286
        return outs, fetch_list
Y
Yang Yang(Tony) 已提交
287

288 289
    def check_output_with_place(self, place, atol):
        outs, fetch_list = self._calc_output(place)
Y
Yang Yang(Tony) 已提交
290
        for out_name, out_dup in Operator.get_op_outputs(self.op_type):
291 292 293
            if out_name not in self.outputs:
                continue

Y
Yang Yang(Tony) 已提交
294 295 296 297 298 299 300 301 302 303
            def find_actual(target_name, fetch_list):
                found = [
                    i for i, var in enumerate(fetch_list)
                    if var.name == target_name
                ]
                self.assertTrue(
                    len(found) == 1, "Found {} {}".format(
                        len(found), target_name))
                return found[0]

304 305
            if out_dup:
                sub_out = self.outputs[out_name]
Y
Yancey 已提交
306 307 308
                if not isinstance(sub_out, list):
                    raise AssertionError("sub_out type %s is not list",
                                         type(sub_out))
309 310
                for item in sub_out:
                    sub_out_name, expect = item[0], item[1]
Y
Yang Yang(Tony) 已提交
311
                    idx = find_actual(sub_out_name, fetch_list)
Q
QI JUN 已提交
312 313
                    actual = outs[idx]
                    actual_t = np.array(actual)
314 315
                    expect_t = expect[0] \
                        if isinstance(expect, tuple) else expect
316 317
                    self.assertTrue(
                        np.allclose(
318
                            actual_t, expect_t, atol=atol),
Y
Yang Yang(Tony) 已提交
319 320
                        "Output (" + sub_out_name + ") has diff at " +
                        str(place))
321 322
                    if isinstance(expect, tuple):
                        self.assertListEqual(
323 324
                            actual.recursive_sequence_lengths(), expect[1],
                            "Output (" + sub_out_name +
Q
QI JUN 已提交
325
                            ") has different lod at " + str(place))
326
            else:
Y
Yang Yang(Tony) 已提交
327
                idx = find_actual(out_name, fetch_list)
Q
QI JUN 已提交
328 329
                actual = outs[idx]
                actual_t = np.array(actual)
330
                expect = self.outputs[out_name]
331
                expect_t = expect[0] if isinstance(expect, tuple) else expect
332 333
                self.assertTrue(
                    np.allclose(
334
                        actual_t, expect_t, atol=atol),
E
emailweixu 已提交
335
                    "Output (" + out_name + ") has diff at " + str(place) +
D
dzhwinter 已提交
336 337
                    "\nExpect " + str(expect_t) + "\n" + "But Got" +
                    str(actual_t))
338
                if isinstance(expect, tuple):
339 340
                    self.assertListEqual(actual.recursive_sequence_lengths(),
                                         expect[1], "Output (" + out_name +
341
                                         ") has different lod at " + str(place))
342

343
    def _get_places(self):
D
dzhwinter 已提交
344 345 346 347 348 349 350 351
        if self.dtype == np.float16:
            if core.is_compiled_with_cuda() and core.op_support_gpu(
                    self.op_type):
                place = core.CUDAPlace(0)
                if core.is_float16_supported(place):
                    return [place]
            else:
                return []
352
        places = [fluid.CPUPlace()]
353
        if core.is_compiled_with_cuda() and core.op_support_gpu(self.op_type):
D
dzhwinter 已提交
354
            places.append(core.CUDAPlace(0))
355 356 357 358
        return places

    def check_output(self, atol=1e-5):
        places = self._get_places()
Q
qijun 已提交
359
        for place in places:
360
            self.check_output_with_place(place, atol)
Q
qijun 已提交
361

362
    def check_output_customized(self, checker):
363
        places = self._get_places()
364 365 366 367 368
        for place in places:
            outs = self.calc_output(place)
            outs = [np.array(out) for out in outs]
            checker(outs)

369 370 371 372 373 374 375 376 377 378 379 380
    def __assert_is_close(self, numeric_grads, analytic_grads, names,
                          max_relative_error, msg_prefix):

        for a, b, name in itertools.izip(numeric_grads, analytic_grads, names):
            abs_a = np.abs(a)
            abs_a[abs_a < 1e-3] = 1

            diff_mat = np.abs(a - b) / abs_a
            max_diff = np.max(diff_mat)

            def err_msg():
                offset = np.argmax(diff_mat > max_relative_error)
381
                return ("%s Variable %s max gradient diff %f over limit %f, "
D
dzhwinter 已提交
382 383 384
                        "the first error element is %d, expected %f, but got %f"
                        ) % (msg_prefix, name, max_diff, max_relative_error,
                             offset, a.flatten()[offset], b.flatten()[offset])
385 386 387 388 389

            self.assertLessEqual(max_diff, max_relative_error, err_msg())

    def check_grad(self,
                   inputs_to_check,
Y
Yancey 已提交
390
                   output_names,
391
                   no_grad_set=None,
392
                   numeric_grad_delta=0.005,
393
                   in_place=False,
Q
Qiao Longfei 已提交
394 395
                   max_relative_error=0.005,
                   user_defined_grads=None):
396
        places = self._get_places()
397 398 399 400 401 402 403 404 405 406 407 408 409 410 411
        for place in places:
            self.check_grad_with_place(place, inputs_to_check, output_names,
                                       no_grad_set, numeric_grad_delta,
                                       in_place, max_relative_error,
                                       user_defined_grads)

    def check_grad_with_place(self,
                              place,
                              inputs_to_check,
                              output_names,
                              no_grad_set=None,
                              numeric_grad_delta=0.005,
                              in_place=False,
                              max_relative_error=0.005,
                              user_defined_grads=None):
412
        self.scope = core.Scope()
Q
qijun 已提交
413
        op_inputs = self.inputs if hasattr(self, "inputs") else dict()
414
        op_outputs = self.outputs if hasattr(self, "outputs") else dict()
Q
qijun 已提交
415
        op_attrs = self.attrs if hasattr(self, "attrs") else dict()
416
        self.op = create_op(self.scope, self.op_type, op_inputs, op_outputs,
Q
qijun 已提交
417
                            op_attrs)
Y
Yu Yang 已提交
418

419 420 421
        if no_grad_set is None:
            no_grad_set = set()

Y
Yancey 已提交
422 423 424
        if not type(output_names) is list:
            output_names = [output_names]

Q
Qiao Longfei 已提交
425
        numeric_grads = user_defined_grads or [
426
            get_numeric_gradient(
427
                place,
428 429 430 431
                self.scope,
                self.op,
                self.inputs,
                input_to_check,
Y
Yancey 已提交
432
                output_names,
433
                delta=numeric_grad_delta,
434 435
                in_place=in_place) for input_to_check in inputs_to_check
        ]
436 437 438 439 440 441
        analytic_grads = self._get_gradient(inputs_to_check, place,
                                            output_names, no_grad_set)

        self.__assert_is_close(numeric_grads, analytic_grads, inputs_to_check,
                               max_relative_error,
                               "Gradient Check On %s" % str(place))
Q
qijun 已提交
442

Y
Yu Yang 已提交
443 444 445 446 447
    @staticmethod
    def _numpy_to_lod_tensor(np_value, lod, place):
        tensor = core.LoDTensor()
        tensor.set(np_value, place)
        if lod is not None:
448
            tensor.set_recursive_sequence_lengths(lod)
Y
Yu Yang 已提交
449 450
        return tensor

K
Kexin Zhao 已提交
451
    @staticmethod
K
Kexin Zhao 已提交
452 453
    def np_dtype_to_fluid_dtype(input):
        """Change the dtype of float16 numpy array
K
Kexin Zhao 已提交
454

455
        numpy float16 is binded to paddle::platform::float16
K
Kexin Zhao 已提交
456
        in tensor_py.h via the help of uint16 data type since
457
        the internal memory representation of float16 is
K
Kexin Zhao 已提交
458 459
        uint16_t in paddle and np.uint16 in numpy, which are
        themselves binded together by pybind.
K
Kexin Zhao 已提交
460 461 462 463 464

        Args:
            input: input numpy array

        Returns:
465
            input: The dtype of input will be changed to np.uint16 if
K
Kexin Zhao 已提交
466
                it is originally np.float16, such that the internal memory
467
                of input will be reinterpreted as of dtype np.uint16.
K
Kexin Zhao 已提交
468 469
        """
        if input.dtype == np.float16:
K
Kexin Zhao 已提交
470 471
            input.dtype = np.uint16
        return input
K
Kexin Zhao 已提交
472

D
dzhwinter 已提交
473 474 475 476 477 478 479 480 481 482 483 484 485 486 487
    @staticmethod
    def fluid_dtype_to_np_dtype(self, dtype):
        """
        See above, convert the dtype to normal type.
        """
        if dtype == np.uint16:
            dtype = np.float16
        return dtype

    @staticmethod
    def np_value_to_fluid_value(input):
        if input.dtype == np.float16:
            input = input.view(np.uint16)
        return input

488 489 490 491 492 493
    def _get_gradient(self,
                      input_to_check,
                      place,
                      output_names,
                      no_grad_set,
                      parallel=False):
Y
Yu Yang 已提交
494 495
        prog = Program()
        block = prog.global_block()
496 497
        self._append_ops(block)
        loss = append_loss_ops(block, output_names)
F
fengjiayi 已提交
498
        param_grad_list = append_backward(
Y
Yu Yang 已提交
499 500
            loss=loss, parameter_list=input_to_check, no_grad_set=no_grad_set)

501 502
        inputs = self._get_inputs(block)
        feed_dict = self.feed_var(inputs, place)
Y
Yu Yang 已提交
503 504

        fetch_list = [g for p, g in param_grad_list]
505 506 507 508 509
        if parallel:
            use_cuda = False
            if isinstance(place, fluid.CUDAPlace(0)):
                use_cuda = True
            executor = fluid.ParallelExecutor(
D
dzhwinter 已提交
510
                use_cuda=use_cuda, loss_name=loss.name, main_program=prog)
511 512
        else:
            executor = Executor(place)
Y
ying 已提交
513 514 515
        return map(np.array,
                   executor.run(prog, feed_dict, fetch_list,
                                return_numpy=False))