op_test.py 17.7 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15 16
import unittest
import numpy as np
17
import random
18
import time
19
import itertools
Y
Yu Yang 已提交
20
import collections
21 22 23

import paddle.fluid as fluid
import paddle.fluid.core as core
24 25 26
from paddle.fluid.backward import append_backward
from paddle.fluid.op import Operator
from paddle.fluid.executor import Executor
27 28
from paddle.fluid.framework import Program, OpProtoHolder, Variable
from testsuite import create_op, set_input, append_input_output, append_loss_ops
29 30


31 32 33 34 35 36 37 38 39
def randomize_probability(batch_size, class_num, dtype='float32'):
    prob = np.random.uniform(
        0.1, 1.0, size=(batch_size, class_num)).astype(dtype)
    prob_sum = prob.sum(axis=1)
    for i in xrange(len(prob)):
        prob[i] /= prob_sum[i]
    return prob


40 41
def get_numeric_gradient(place,
                         scope,
42 43 44
                         op,
                         inputs,
                         input_to_check,
Y
Yancey 已提交
45
                         output_names,
46 47
                         delta=0.005,
                         in_place=False):
Y
Yu Yang 已提交
48
    # FIXME: change this method by compile time concepts
49
    set_input(scope, op, inputs, place)
50 51 52 53 54

    def product(dim):
        return reduce(lambda a, b: a * b, dim, 1)

    def get_output():
Y
Yu Yang 已提交
55
        sum = []
Y
Yancey 已提交
56
        for output_name in output_names:
57
            op.run(scope, place)
Y
Yu Yang 已提交
58 59 60
            sum.append(
                np.array(scope.find_var(output_name).get_tensor()).mean())
        return np.array(sum).mean()
61 62

    tensor_to_check = scope.find_var(input_to_check).get_tensor()
Y
yuyang18 已提交
63 64
    tensor_size = product(tensor_to_check.shape())
    tensor_to_check_dtype = tensor_to_check._dtype()
65
    if tensor_to_check_dtype == core.VarDesc.VarType.FP32:
66
        tensor_to_check_dtype = np.float32
67
    elif tensor_to_check_dtype == core.VarDesc.VarType.FP64:
68 69 70 71 72 73 74 75 76
        tensor_to_check_dtype = np.float64
    else:
        raise ValueError("Not supported data type " + str(
            tensor_to_check_dtype))

    gradient_flat = np.zeros(shape=(tensor_size, ), dtype=tensor_to_check_dtype)

    def __get_elem__(tensor, i):
        if tensor_to_check_dtype == np.float32:
Y
yuyang18 已提交
77
            return tensor._get_float_element(i)
78
        else:
Y
yuyang18 已提交
79
            return tensor._get_double_element(i)
80 81 82

    def __set_elem__(tensor, i, e):
        if tensor_to_check_dtype == np.float32:
Y
yuyang18 已提交
83
            tensor._set_float_element(i, e)
84
        else:
Y
yuyang18 已提交
85
            tensor._set_double_element(i, e)
86

87 88 89 90
    # we only compute gradient of one element each time.
    # we use a for loop to compute the gradient of every element.
    for i in xrange(tensor_size):
        if in_place:
91
            set_input(scope, op, inputs, place)
92 93

        # get one input element throw it's index i.
94
        origin = __get_elem__(tensor_to_check, i)
95 96
        # add delta to it, run op and then get the sum of the result tensor.
        x_pos = origin + delta
97
        __set_elem__(tensor_to_check, i, x_pos)
98 99 100
        y_pos = get_output()

        if in_place:
101
            set_input(scope, op, inputs, place)
102 103

        x_neg = origin - delta
104
        __set_elem__(tensor_to_check, i, x_neg)
105 106
        y_neg = get_output()

107
        __set_elem__(tensor_to_check, i, origin)
108 109
        gradient_flat[i] = (y_pos - y_neg) / delta / 2

Y
yuyang18 已提交
110
    return gradient_flat.reshape(tensor_to_check.shape())
111 112 113


class OpTest(unittest.TestCase):
114 115 116 117 118
    @classmethod
    def setUpClass(cls):
        '''Fix random seeds to remove randomness from tests'''
        cls._np_rand_state = np.random.get_state()
        cls._py_rand_state = random.getstate()
119 120 121
        cls.call_once = False
        cls.dtype = "float32"
        cls.outputs = {}
122 123 124 125 126 127

        np.random.seed(123)
        random.seed(124)

    @classmethod
    def tearDownClass(cls):
Y
yuyang18 已提交
128
        """Restore random seeds"""
129 130 131
        np.random.set_state(cls._np_rand_state)
        random.setstate(cls._py_rand_state)

132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156
    def try_call_once(self, data_type):
        if not self.call_once:
            self.call_once = True
            self.dtype = data_type

    def infer_dtype_from_inputs_outputs(self, inputs, outputs):
        def infer_dtype(numpy_dict):
            assert isinstance(
                numpy_dict,
                dict), "self.inputs, self.outputs must be numpy_dict"
            for var_name, var_value in numpy_dict.iteritems():
                if isinstance(var_value, (np.ndarray, np.generic)):
                    self.try_call_once(var_value.dtype)
                elif isinstance(var_value, (list, tuple)):
                    # the case of self.inputs = {"X": [("x0", x0), ("x1", x1), ("x2", x2)]}
                    if len(var_value) > 1 and isinstance(var_value[1], (
                            np.ndarray, np.generic)):
                        instance = var_value[1]
                        self.try_call_once(instance[1].dtype)
                else:
                    self.try_call_once("float32")

        infer_dtype(inputs)
        infer_dtype(outputs)

Y
Yang Yang(Tony) 已提交
157 158 159 160 161 162
    def feed_var(self, input_vars, place):
        feed_map = {}
        for var_name in input_vars:
            if isinstance(input_vars[var_name], list):
                for name, np_value in self.inputs[var_name]:
                    tensor = core.LoDTensor()
163 164
                    if isinstance(np_value, tuple):
                        tensor.set(np_value[0], place)
165
                        tensor.set_recursive_sequence_lengths(np_value[1])
166 167
                    else:
                        tensor.set(np_value, place)
Y
Yang Yang(Tony) 已提交
168 169 170 171 172
                    feed_map[name] = tensor
            else:
                tensor = core.LoDTensor()
                if isinstance(self.inputs[var_name], tuple):
                    tensor.set(self.inputs[var_name][0], place)
173 174
                    tensor.set_recursive_sequence_lengths(self.inputs[var_name][
                        1])
Y
Yang Yang(Tony) 已提交
175 176 177 178 179 180
                else:
                    tensor.set(self.inputs[var_name], place)
                feed_map[var_name] = tensor

        return feed_map

181
    def _append_ops(self, block):
Y
Yang Yang(Tony) 已提交
182
        op_proto = OpProtoHolder.instance().get_op_proto(self.op_type)
183 184 185 186 187 188
        "infer datatype from inputs and outputs for this test case"
        self.infer_dtype_from_inputs_outputs(self.inputs, self.outputs)
        inputs = append_input_output(block, op_proto, self.inputs, True,
                                     self.dtype)
        outputs = append_input_output(block, op_proto, self.outputs, False,
                                      self.dtype)
Y
Yang Yang(Tony) 已提交
189 190 191 192 193
        op = block.append_op(
            type=self.op_type,
            inputs=inputs,
            outputs=outputs,
            attrs=self.attrs if hasattr(self, "attrs") else dict())
Q
QI JUN 已提交
194 195 196
        # infer variable type and infer shape in compile-time
        op.desc.infer_var_type(block.desc)
        op.desc.infer_shape(block.desc)
Y
Yang Yang(Tony) 已提交
197

198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243
    def _get_io_vars(self, block, numpy_inputs):
        inputs = {}
        for name, value in numpy_inputs.iteritems():
            if isinstance(value, list):
                var_list = [
                    block.var(sub_name) for sub_name, sub_value in value
                ]
                inputs[name] = var_list
            else:
                inputs[name] = block.var(name)
        return inputs

    def _get_inputs(self, block):
        return self._get_io_vars(block, self.inputs)

    def _get_outputs(self, block):
        return self._get_io_vars(block, self.outputs)

    def calc_output(self, place):
        outs, _ = self._calc_output(place)
        return outs

    def _calc_output(self, place, parallel=False):

        program = Program()
        block = program.global_block()
        self._append_ops(block)

        inputs = self._get_inputs(block)
        outputs = self._get_outputs(block)
        feed_map = self.feed_var(inputs, place)

        if parallel:
            use_cuda = False
            if isinstance(place, fluid.CUDAPlace(0)):
                use_cuda = True
            executor = fluid.ParallelExecutor(
                use_cuda=use_cuda, loss_name=loss.name, main_program=program)
        else:
            executor = Executor(place)

        fetch_list = getattr(self, "fetch_list", [])
        # if the fetch_list is customized by user, we use it directly.
        # if not, fill the fetch_list by the user configured outputs in test.
        if len(fetch_list) == 0:
            for var_name, var in outputs.iteritems():
Y
Yang Yang(Tony) 已提交
244 245 246 247 248
                if isinstance(var, list):
                    for v in var:
                        fetch_list.append(v)
                else:
                    fetch_list.append(var)
249 250 251 252 253
        # if the fetch_list still empty, fill the fetch_list by the operator output.
        if len(fetch_list) == 0:
            for out_name, out_dup in Operator.get_op_outputs(self.op_type):
                fetch_list.append(str(out_name))
        # fetch_list = map(block.var, fetch_list)
W
Wu Yi 已提交
254
        if not isinstance(fetch_list[0], fluid.framework.Variable):
255 256 257 258 259
            fetch_list = map(block.var, fetch_list)
        outs = executor.run(program,
                            feed=feed_map,
                            fetch_list=fetch_list,
                            return_numpy=False)
260
        return outs, fetch_list
Y
Yang Yang(Tony) 已提交
261

262 263
    def check_output_with_place(self, place, atol):
        outs, fetch_list = self._calc_output(place)
Y
Yang Yang(Tony) 已提交
264
        for out_name, out_dup in Operator.get_op_outputs(self.op_type):
265 266 267
            if out_name not in self.outputs:
                continue

Y
Yang Yang(Tony) 已提交
268 269 270 271 272 273 274 275 276 277
            def find_actual(target_name, fetch_list):
                found = [
                    i for i, var in enumerate(fetch_list)
                    if var.name == target_name
                ]
                self.assertTrue(
                    len(found) == 1, "Found {} {}".format(
                        len(found), target_name))
                return found[0]

278 279
            if out_dup:
                sub_out = self.outputs[out_name]
Y
Yancey 已提交
280 281 282
                if not isinstance(sub_out, list):
                    raise AssertionError("sub_out type %s is not list",
                                         type(sub_out))
283 284
                for item in sub_out:
                    sub_out_name, expect = item[0], item[1]
Y
Yang Yang(Tony) 已提交
285
                    idx = find_actual(sub_out_name, fetch_list)
Q
QI JUN 已提交
286 287
                    actual = outs[idx]
                    actual_t = np.array(actual)
288 289
                    expect_t = expect[0] \
                        if isinstance(expect, tuple) else expect
290 291
                    self.assertTrue(
                        np.allclose(
292
                            actual_t, expect_t, atol=atol),
Y
Yang Yang(Tony) 已提交
293 294
                        "Output (" + sub_out_name + ") has diff at " +
                        str(place))
295 296
                    if isinstance(expect, tuple):
                        self.assertListEqual(
297 298
                            actual.recursive_sequence_lengths(), expect[1],
                            "Output (" + sub_out_name +
Q
QI JUN 已提交
299
                            ") has different lod at " + str(place))
300
            else:
Y
Yang Yang(Tony) 已提交
301
                idx = find_actual(out_name, fetch_list)
Q
QI JUN 已提交
302 303
                actual = outs[idx]
                actual_t = np.array(actual)
304
                expect = self.outputs[out_name]
305
                expect_t = expect[0] if isinstance(expect, tuple) else expect
306 307
                self.assertTrue(
                    np.allclose(
308
                        actual_t, expect_t, atol=atol),
E
emailweixu 已提交
309
                    "Output (" + out_name + ") has diff at " + str(place) +
Y
ying 已提交
310
                    str(actual_t) + "\n" + str(expect_t))
311
                if isinstance(expect, tuple):
312 313
                    self.assertListEqual(actual.recursive_sequence_lengths(),
                                         expect[1], "Output (" + out_name +
314
                                         ") has different lod at " + str(place))
315

316 317
    def _get_places(self):
        places = [fluid.CPUPlace()]
318
        if core.is_compiled_with_cuda() and core.op_support_gpu(self.op_type):
D
dzhwinter 已提交
319
            places.append(core.CUDAPlace(0))
320 321 322 323
        return places

    def check_output(self, atol=1e-5):
        places = self._get_places()
Q
qijun 已提交
324
        for place in places:
325
            self.check_output_with_place(place, atol)
Q
qijun 已提交
326

327
    def check_output_customized(self, checker):
328
        places = self._get_places()
329 330 331 332 333
        for place in places:
            outs = self.calc_output(place)
            outs = [np.array(out) for out in outs]
            checker(outs)

334 335 336 337 338 339 340 341 342 343 344 345
    def __assert_is_close(self, numeric_grads, analytic_grads, names,
                          max_relative_error, msg_prefix):

        for a, b, name in itertools.izip(numeric_grads, analytic_grads, names):
            abs_a = np.abs(a)
            abs_a[abs_a < 1e-3] = 1

            diff_mat = np.abs(a - b) / abs_a
            max_diff = np.max(diff_mat)

            def err_msg():
                offset = np.argmax(diff_mat > max_relative_error)
346
                return ("%s Variable %s max gradient diff %f over limit %f, "
347
                        "the first error element is %d, %f, %f") % (
348
                            msg_prefix, name, max_diff, max_relative_error,
349
                            offset, a.flatten()[offset], b.flatten()[offset])
350 351 352 353 354

            self.assertLessEqual(max_diff, max_relative_error, err_msg())

    def check_grad(self,
                   inputs_to_check,
Y
Yancey 已提交
355
                   output_names,
356
                   no_grad_set=None,
357
                   numeric_grad_delta=0.005,
358
                   in_place=False,
Q
Qiao Longfei 已提交
359 360
                   max_relative_error=0.005,
                   user_defined_grads=None):
361
        places = self._get_places()
362 363 364 365 366 367 368 369 370 371 372 373 374 375 376
        for place in places:
            self.check_grad_with_place(place, inputs_to_check, output_names,
                                       no_grad_set, numeric_grad_delta,
                                       in_place, max_relative_error,
                                       user_defined_grads)

    def check_grad_with_place(self,
                              place,
                              inputs_to_check,
                              output_names,
                              no_grad_set=None,
                              numeric_grad_delta=0.005,
                              in_place=False,
                              max_relative_error=0.005,
                              user_defined_grads=None):
377
        self.scope = core.Scope()
Q
qijun 已提交
378
        op_inputs = self.inputs if hasattr(self, "inputs") else dict()
379
        op_outputs = self.outputs if hasattr(self, "outputs") else dict()
Q
qijun 已提交
380
        op_attrs = self.attrs if hasattr(self, "attrs") else dict()
381
        self.op = create_op(self.scope, self.op_type, op_inputs, op_outputs,
Q
qijun 已提交
382
                            op_attrs)
Y
Yu Yang 已提交
383

384 385 386
        if no_grad_set is None:
            no_grad_set = set()

Y
Yancey 已提交
387 388 389
        if not type(output_names) is list:
            output_names = [output_names]

Q
Qiao Longfei 已提交
390
        numeric_grads = user_defined_grads or [
391
            get_numeric_gradient(
392
                place,
393 394 395 396
                self.scope,
                self.op,
                self.inputs,
                input_to_check,
Y
Yancey 已提交
397
                output_names,
398
                delta=numeric_grad_delta,
399 400
                in_place=in_place) for input_to_check in inputs_to_check
        ]
401 402 403 404 405 406
        analytic_grads = self._get_gradient(inputs_to_check, place,
                                            output_names, no_grad_set)

        self.__assert_is_close(numeric_grads, analytic_grads, inputs_to_check,
                               max_relative_error,
                               "Gradient Check On %s" % str(place))
Q
qijun 已提交
407

Y
Yu Yang 已提交
408 409 410 411 412
    @staticmethod
    def _numpy_to_lod_tensor(np_value, lod, place):
        tensor = core.LoDTensor()
        tensor.set(np_value, place)
        if lod is not None:
413
            tensor.set_recursive_sequence_lengths(lod)
Y
Yu Yang 已提交
414 415
        return tensor

K
Kexin Zhao 已提交
416
    @staticmethod
K
Kexin Zhao 已提交
417 418
    def np_dtype_to_fluid_dtype(input):
        """Change the dtype of float16 numpy array
K
Kexin Zhao 已提交
419

420
        numpy float16 is binded to paddle::platform::float16
K
Kexin Zhao 已提交
421
        in tensor_py.h via the help of uint16 data type since
422
        the internal memory representation of float16 is
K
Kexin Zhao 已提交
423 424
        uint16_t in paddle and np.uint16 in numpy, which are
        themselves binded together by pybind.
K
Kexin Zhao 已提交
425 426 427 428 429

        Args:
            input: input numpy array

        Returns:
430
            input: The dtype of input will be changed to np.uint16 if
K
Kexin Zhao 已提交
431
                it is originally np.float16, such that the internal memory
432
                of input will be reinterpreted as of dtype np.uint16.
K
Kexin Zhao 已提交
433 434
        """
        if input.dtype == np.float16:
K
Kexin Zhao 已提交
435 436
            input.dtype = np.uint16
        return input
K
Kexin Zhao 已提交
437

438 439 440 441 442 443
    def _get_gradient(self,
                      input_to_check,
                      place,
                      output_names,
                      no_grad_set,
                      parallel=False):
Y
Yu Yang 已提交
444 445
        prog = Program()
        block = prog.global_block()
446 447
        self._append_ops(block)
        loss = append_loss_ops(block, output_names)
F
fengjiayi 已提交
448
        param_grad_list = append_backward(
Y
Yu Yang 已提交
449 450
            loss=loss, parameter_list=input_to_check, no_grad_set=no_grad_set)

451 452
        inputs = self._get_inputs(block)
        feed_dict = self.feed_var(inputs, place)
Y
Yu Yang 已提交
453 454

        fetch_list = [g for p, g in param_grad_list]
455 456 457 458 459 460 461 462
        if parallel:
            use_cuda = False
            if isinstance(place, fluid.CUDAPlace(0)):
                use_cuda = True
            executor = fluid.ParallelExecutor(
                use_cuda=use_cuda, loss_name=loss.name, main_program=program)
        else:
            executor = Executor(place)
Y
ying 已提交
463 464 465
        return map(np.array,
                   executor.run(prog, feed_dict, fetch_list,
                                return_numpy=False))