op_test.py 18.5 KB
Newer Older
1 2
import unittest
import numpy as np
3
import random
4 5
import itertools
import paddle.v2.framework.core as core
Y
Yu Yang 已提交
6 7
import collections
from paddle.v2.framework.backward import append_backward_ops
8
from paddle.v2.framework.op import Operator
Y
Yang Yang(Tony) 已提交
9 10
from paddle.v2.framework.executor import Executor
from paddle.v2.framework.framework import Program, OpProtoHolder
11 12


13 14 15 16 17 18 19 20 21
def randomize_probability(batch_size, class_num, dtype='float32'):
    prob = np.random.uniform(
        0.1, 1.0, size=(batch_size, class_num)).astype(dtype)
    prob_sum = prob.sum(axis=1)
    for i in xrange(len(prob)):
        prob[i] /= prob_sum[i]
    return prob


Q
qijun 已提交
22
def create_op(scope, op_type, inputs, outputs, attrs):
23 24
    kwargs = dict()

Y
Yu Yang 已提交
25
    def __create_var__(name, var_name):
Q
QI JUN 已提交
26
        scope.var(var_name).get_tensor()
Y
Yu Yang 已提交
27 28
        kwargs[name].append(var_name)

Q
qijun 已提交
29
    for in_name, in_dup in Operator.get_op_inputs(op_type):
30 31 32 33
        if in_name in inputs:
            kwargs[in_name] = []
            if in_dup:
                sub_in = inputs[in_name]
Q
qijun 已提交
34
                for sub_in_name, _ in sub_in:
Y
Yu Yang 已提交
35
                    __create_var__(in_name, sub_in_name)
36
            else:
Y
Yu Yang 已提交
37
                __create_var__(in_name, in_name)
38

Q
qijun 已提交
39
    for out_name, out_dup in Operator.get_op_outputs(op_type):
40 41 42
        if out_name in outputs:
            kwargs[out_name] = []
            if out_dup:
43 44
                sub_out = outputs[out_name]
                for sub_out_name, _ in sub_out:
Y
Yu Yang 已提交
45
                    __create_var__(out_name, sub_out_name)
46
            else:
Y
Yu Yang 已提交
47
                __create_var__(out_name, out_name)
48

Q
qijun 已提交
49
    for attr_name in Operator.get_op_attr_names(op_type):
Q
qijun 已提交
50 51
        if attr_name in attrs:
            kwargs[attr_name] = attrs[attr_name]
52

53 54 55 56
    return Operator(op_type, **kwargs)


def set_input(scope, op, inputs, place):
Y
Yu Yang 已提交
57
    def __set_input__(var_name, var):
58 59 60 61 62 63 64 65 66 67 68
        if isinstance(var, tuple) or isinstance(var, np.ndarray):
            tensor = scope.find_var(var_name).get_tensor()
            if isinstance(var, tuple):
                tensor.set_lod(var[1])
                var = var[0]
            tensor.set_dims(var.shape)
            tensor.set(var, place)
        elif isinstance(var, float):
            scope.find_var(var_name).set_float(var)
        elif isinstance(var, int):
            scope.find_var(var_name).set_int(var)
Y
Yu Yang 已提交
69

Q
qijun 已提交
70
    for in_name, in_dup in Operator.get_op_inputs(op.type()):
71 72 73
        if in_name in inputs:
            if in_dup:
                sub_in = inputs[in_name]
74
                for sub_in_name, sub_in_val in sub_in:
Y
Yu Yang 已提交
75
                    __set_input__(sub_in_name, sub_in_val)
76
            else:
Y
Yu Yang 已提交
77
                __set_input__(in_name, inputs[in_name])
78 79 80 81 82 83


def get_numeric_gradient(scope,
                         op,
                         inputs,
                         input_to_check,
Y
Yancey 已提交
84
                         output_names,
85 86
                         delta=0.005,
                         in_place=False):
Y
Yu Yang 已提交
87
    # FIXME: change this method by compile time concepts
88 89 90 91 92 93 94 95
    set_input(scope, op, inputs, core.CPUPlace())

    def product(dim):
        return reduce(lambda a, b: a * b, dim, 1)

    ctx = core.DeviceContext.create(core.CPUPlace())

    def get_output():
Y
Yu Yang 已提交
96
        sum = []
Y
Yancey 已提交
97 98
        for output_name in output_names:
            op.run(scope, ctx)
Y
Yu Yang 已提交
99 100 101
            sum.append(
                np.array(scope.find_var(output_name).get_tensor()).mean())
        return np.array(sum).mean()
102 103 104

    tensor_to_check = scope.find_var(input_to_check).get_tensor()
    tensor_size = product(tensor_to_check.get_dims())
105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127
    tensor_to_check_dtype = tensor_to_check.dtype()
    if tensor_to_check_dtype == core.DataType.FP32:
        tensor_to_check_dtype = np.float32
    elif tensor_to_check_dtype == core.DataType.FP64:
        tensor_to_check_dtype = np.float64
    else:
        raise ValueError("Not supported data type " + str(
            tensor_to_check_dtype))

    gradient_flat = np.zeros(shape=(tensor_size, ), dtype=tensor_to_check_dtype)

    def __get_elem__(tensor, i):
        if tensor_to_check_dtype == np.float32:
            return tensor.get_float_element(i)
        else:
            return tensor.get_double_element(i)

    def __set_elem__(tensor, i, e):
        if tensor_to_check_dtype == np.float32:
            tensor.set_float_element(i, e)
        else:
            tensor.set_double_element(i, e)

128 129 130 131
    # we only compute gradient of one element each time.
    # we use a for loop to compute the gradient of every element.
    for i in xrange(tensor_size):
        if in_place:
Q
qijun 已提交
132
            set_input(scope, op, inputs, core.CPUPlace())
133 134

        # get one input element throw it's index i.
135
        origin = __get_elem__(tensor_to_check, i)
136 137
        # add delta to it, run op and then get the sum of the result tensor.
        x_pos = origin + delta
138
        __set_elem__(tensor_to_check, i, x_pos)
139 140 141
        y_pos = get_output()

        if in_place:
Q
qijun 已提交
142
            set_input(scope, op, inputs, core.CPUPlace())
143 144

        x_neg = origin - delta
145
        __set_elem__(tensor_to_check, i, x_neg)
146 147
        y_neg = get_output()

148
        __set_elem__(tensor_to_check, i, origin)
149 150 151 152 153
        gradient_flat[i] = (y_pos - y_neg) / delta / 2

    return gradient_flat.reshape(tensor_to_check.get_dims())


Y
Yang Yang(Tony) 已提交
154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180
def append_input_output(block, op_proto, np_list, is_input):
    '''Insert VarDesc and generate Python variable instance'''
    proto_list = op_proto.inputs if is_input else op_proto.outputs

    def create_var(block, name, np_list, var_proto):
        if name not in np_list:
            assert var_proto.intermediate, "{} not found".format(name)
            shape = None
            lod_level = None
        else:
            np_value = np_list[name]
            if isinstance(np_value, tuple):
                shape = list(np_value[0].shape)
                lod_level = len(np_value[1])
            else:
                shape = list(np_value.shape)
                lod_level = 0
        return block.create_var(
            dtype="float32", shape=shape, lod_level=lod_level, name=name)

    var_dict = {}
    for var_proto in proto_list:
        var_name = str(var_proto.name)
        if is_input:
            if (var_name not in np_list) and var_proto.dispensable:
                continue
            assert (var_name in np_list) or (var_proto.dispensable), \
181
                "Missing {} as input".format(var_name)
Y
Yang Yang(Tony) 已提交
182 183 184 185 186 187 188 189 190 191 192 193 194 195
        if var_proto.duplicable:
            assert isinstance(np_list[var_name], list), \
                "Duplicable {} should be set as list".format(var_name)
            var_list = []
            for (name, np_value) in np_list[var_name]:
                var_list.append(
                    create_var(block, name, {name: np_value}, var_proto))
            var_dict[var_name] = var_list
        else:
            var_dict[var_name] = create_var(block, var_name, np_list, var_proto)

    return var_dict


196
class OpTest(unittest.TestCase):
197 198 199 200 201 202 203 204 205 206 207 208 209 210 211
    @classmethod
    def setUpClass(cls):
        '''Fix random seeds to remove randomness from tests'''
        cls._np_rand_state = np.random.get_state()
        cls._py_rand_state = random.getstate()

        np.random.seed(123)
        random.seed(124)

    @classmethod
    def tearDownClass(cls):
        '''Restore random seeds'''
        np.random.set_state(cls._np_rand_state)
        random.setstate(cls._py_rand_state)

Y
Yang Yang(Tony) 已提交
212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230
    def feed_var(self, input_vars, place):
        feed_map = {}
        for var_name in input_vars:
            if isinstance(input_vars[var_name], list):
                for name, np_value in self.inputs[var_name]:
                    tensor = core.LoDTensor()
                    tensor.set(np_value, place)
                    feed_map[name] = tensor
            else:
                tensor = core.LoDTensor()
                if isinstance(self.inputs[var_name], tuple):
                    tensor.set(self.inputs[var_name][0], place)
                    tensor.set_lod(self.inputs[var_name][1])
                else:
                    tensor.set(self.inputs[var_name], place)
                feed_map[var_name] = tensor

        return feed_map

231
    def check_output_with_place(self, place, atol):
Y
Yang Yang(Tony) 已提交
232 233 234 235 236 237 238 239 240 241 242 243 244
        op_proto = OpProtoHolder.instance().get_op_proto(self.op_type)

        program = Program()
        block = program.global_block()

        inputs = append_input_output(block, op_proto, self.inputs, True)
        outputs = append_input_output(block, op_proto, self.outputs, False)

        op = block.append_op(
            type=self.op_type,
            inputs=inputs,
            outputs=outputs,
            attrs=self.attrs if hasattr(self, "attrs") else dict())
Q
QI JUN 已提交
245 246 247
        # infer variable type and infer shape in compile-time
        op.desc.infer_var_type(block.desc)
        op.desc.infer_shape(block.desc)
Y
Yang Yang(Tony) 已提交
248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263

        fetch_list = []
        for var_name, var in outputs.iteritems():
            if var_name in self.outputs:
                if isinstance(var, list):
                    for v in var:
                        fetch_list.append(v)
                else:
                    fetch_list.append(var)

        feed_map = self.feed_var(inputs, place)

        exe = Executor(place)
        outs = exe.run(program, feed=feed_map, fetch_list=fetch_list)

        for out_name, out_dup in Operator.get_op_outputs(self.op_type):
264 265 266
            if out_name not in self.outputs:
                continue

Y
Yang Yang(Tony) 已提交
267 268 269 270 271 272 273 274 275 276
            def find_actual(target_name, fetch_list):
                found = [
                    i for i, var in enumerate(fetch_list)
                    if var.name == target_name
                ]
                self.assertTrue(
                    len(found) == 1, "Found {} {}".format(
                        len(found), target_name))
                return found[0]

277 278
            if out_dup:
                sub_out = self.outputs[out_name]
Y
Yancey 已提交
279 280 281 282
                if not isinstance(sub_out, list):
                    raise AssertionError("sub_out type %s is not list",
                                         type(sub_out))
                for sub_out_name, expect in sub_out:
Y
Yang Yang(Tony) 已提交
283
                    idx = find_actual(sub_out_name, fetch_list)
284 285 286
                    actual_t = np.array(outs[idx])
                    expect_t = expect[0] \
                        if isinstance(expect, tuple) else expect
287 288
                    self.assertTrue(
                        np.allclose(
289
                            actual_t, expect_t, atol=atol),
Y
Yang Yang(Tony) 已提交
290 291
                        "Output (" + sub_out_name + ") has diff at " +
                        str(place))
292 293 294 295
                    if isinstance(expect, tuple):
                        self.assertListEqual(
                            actual_t.lod(), expect[1], "Output (" + sub_out_name
                            + ") has different lod at " + str(place))
296
            else:
Y
Yang Yang(Tony) 已提交
297
                idx = find_actual(out_name, fetch_list)
298
                actual_t = outs[idx]
299
                expect = self.outputs[out_name]
300
                expect_t = expect[0] if isinstance(expect, tuple) else expect
301 302
                self.assertTrue(
                    np.allclose(
303
                        actual_t, expect_t, atol=atol),
D
dangqingqing 已提交
304
                    "Output (" + out_name + ") has diff at " + str(place))
305 306 307 308
                if isinstance(expect, tuple):
                    self.assertListEqual(actual_t.lod(), expect[1],
                                         "Output (" + out_name +
                                         ") has different lod at " + str(place))
309

310
    def check_output(self, atol=1e-5):
Q
qijun 已提交
311
        places = [core.CPUPlace()]
Y
Yang Yang(Tony) 已提交
312
        if core.is_compile_gpu() and core.op_support_gpu(self.op_type):
Q
qijun 已提交
313 314
            places.append(core.GPUPlace(0))
        for place in places:
315
            self.check_output_with_place(place, atol)
Q
qijun 已提交
316

317 318 319 320 321 322 323 324 325 326 327 328
    def __assert_is_close(self, numeric_grads, analytic_grads, names,
                          max_relative_error, msg_prefix):

        for a, b, name in itertools.izip(numeric_grads, analytic_grads, names):
            abs_a = np.abs(a)
            abs_a[abs_a < 1e-3] = 1

            diff_mat = np.abs(a - b) / abs_a
            max_diff = np.max(diff_mat)

            def err_msg():
                offset = np.argmax(diff_mat > max_relative_error)
329
                return ("%s Variable %s max gradient diff %f over limit %f, "
330
                        "the first error element is %d, %f, %f") % (
331
                            msg_prefix, name, max_diff, max_relative_error,
332
                            offset, a.flatten()[offset], b.flatten()[offset])
333 334 335 336 337

            self.assertLessEqual(max_diff, max_relative_error, err_msg())

    def check_grad(self,
                   inputs_to_check,
Y
Yancey 已提交
338
                   output_names,
339
                   no_grad_set=None,
340
                   numeric_grad_delta=0.005,
341
                   in_place=False,
Q
Qiao Longfei 已提交
342 343
                   max_relative_error=0.005,
                   user_defined_grads=None):
344
        self.scope = core.Scope()
Q
qijun 已提交
345
        op_inputs = self.inputs if hasattr(self, "inputs") else dict()
346
        op_outputs = self.outputs if hasattr(self, "outputs") else dict()
Q
qijun 已提交
347
        op_attrs = self.attrs if hasattr(self, "attrs") else dict()
348
        self.op = create_op(self.scope, self.op_type, op_inputs, op_outputs,
Q
qijun 已提交
349
                            op_attrs)
Y
Yu Yang 已提交
350

351 352 353
        if no_grad_set is None:
            no_grad_set = set()

Y
Yancey 已提交
354 355 356
        if not type(output_names) is list:
            output_names = [output_names]

Q
Qiao Longfei 已提交
357
        numeric_grads = user_defined_grads or [
358 359 360 361 362
            get_numeric_gradient(
                self.scope,
                self.op,
                self.inputs,
                input_to_check,
Y
Yancey 已提交
363
                output_names,
364
                delta=numeric_grad_delta,
365 366
                in_place=in_place) for input_to_check in inputs_to_check
        ]
Q
qijun 已提交
367
        cpu_place = core.CPUPlace()
Y
Yu Yang 已提交
368 369
        cpu_analytic_grads = self._get_gradient(inputs_to_check, cpu_place,
                                                output_names, no_grad_set)
370

Y
Yu Yang 已提交
371 372
        self.__assert_is_close(numeric_grads, cpu_analytic_grads,
                               inputs_to_check, max_relative_error,
Q
qijun 已提交
373 374 375 376
                               "Gradient Check On %s" % str(cpu_place))

        if core.is_compile_gpu() and self.op.support_gpu():
            gpu_place = core.GPUPlace(0)
Y
Yu Yang 已提交
377 378
            gpu_analytic_grads = self._get_gradient(inputs_to_check, gpu_place,
                                                    output_names, no_grad_set)
379

Q
qijun 已提交
380
            self.__assert_is_close(numeric_grads, gpu_analytic_grads,
Y
Yu Yang 已提交
381
                                   inputs_to_check, max_relative_error,
Q
qijun 已提交
382 383
                                   "Gradient Check On %s" % str(gpu_place))

Y
Yu Yang 已提交
384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440
    @staticmethod
    def _create_var_descs_(block, var_dict):
        # FIXME: Try unify with `append_input_output`
        for param_name in var_dict:
            var = var_dict[param_name]
            if not isinstance(var, list) and not isinstance(var, tuple):
                var = [(param_name, var, None)]
            if not isinstance(var[0], list) and not isinstance(var[0], tuple):
                var = [(param_name, var[0], var[1])]

            for i, item in enumerate(var):
                if not isinstance(item[0], basestring):
                    item = [[param_name] + list(item)]
                if len(item) == 2:
                    # only set var name and value, set lod to None
                    var[i] = list(item) + [None]

            var_descs = [(block.create_var(
                name=name, shape=each.shape, dtype=each.dtype), each, lod)
                         for name, each, lod in var]

            yield param_name, var_descs

    @staticmethod
    def _merge_list(iterable):
        return reduce(lambda a, b: list(a) + list(b), iterable, [])

    @staticmethod
    def _numpy_to_lod_tensor(np_value, lod, place):
        tensor = core.LoDTensor()
        tensor.set(np_value, place)
        if lod is not None:
            tensor.set_lod(lod)
        return tensor

    def _get_gradient(self, input_to_check, place, output_names, no_grad_set):
        prog = Program()
        block = prog.global_block()
        inputs_with_np = {
            key: value
            for (key, value) in OpTest._create_var_descs_(
                block, getattr(self, 'inputs', {}))
        }
        outputs_with_np = {
            key: val
            for (key, val) in OpTest._create_var_descs_(
                block, getattr(self, 'outputs', {}))
        }
        inputs = {
            k: [item[0] for item in inputs_with_np[k]]
            for k in inputs_with_np
        }
        outputs = {
            k: [item[0] for item in outputs_with_np[k]]
            for k in outputs_with_np
        }

Q
QI JUN 已提交
441
        op = block.append_op(
Y
Yu Yang 已提交
442 443 444 445 446
            type=self.op_type,
            inputs=inputs,
            outputs=outputs,
            attrs=getattr(self, 'attrs', {}))

Q
QI JUN 已提交
447 448 449 450
        # infer variable type and infer shape in compile-time
        op.desc.infer_var_type(block.desc)
        op.desc.infer_shape(block.desc)

Y
Yu Yang 已提交
451 452 453 454
        mean_inputs = map(block.var, output_names)

        if len(mean_inputs) == 1:
            loss = block.create_var(dtype=mean_inputs[0].data_type, shape=[1])
Q
QI JUN 已提交
455
            op = block.append_op(
Y
Yu Yang 已提交
456
                inputs={"X": mean_inputs}, outputs={"Out": loss}, type='mean')
Q
QI JUN 已提交
457 458
            op.desc.infer_var_type(block.desc)
            op.desc.infer_shape(block.desc)
Y
Yu Yang 已提交
459 460 461 462 463
        else:
            avg_sum = []
            for cur_loss in mean_inputs:
                cur_avg_loss = block.create_var(
                    dtype=cur_loss.data_type, shape=[1])
Q
QI JUN 已提交
464
                op = block.append_op(
Y
Yu Yang 已提交
465 466 467
                    inputs={"X": [cur_loss]},
                    outputs={"Out": [cur_avg_loss]},
                    type="mean")
Q
QI JUN 已提交
468 469
                op.desc.infer_var_type(block.desc)
                op.desc.infer_shape(block.desc)
Y
Yu Yang 已提交
470 471 472
                avg_sum.append(cur_avg_loss)

            loss_sum = block.create_var(dtype=avg_sum[0].data_type, shape=[1])
Q
QI JUN 已提交
473
            op_sum = block.append_op(
Y
Yu Yang 已提交
474
                inputs={"X": avg_sum}, outputs={"Out": loss_sum}, type='sum')
Q
QI JUN 已提交
475 476
            op_sum.desc.infer_var_type(block.desc)
            op_sum.desc.infer_shape(block.desc)
Y
Yu Yang 已提交
477 478

            loss = block.create_var(dtype=loss_sum.data_type, shape=[1])
Q
QI JUN 已提交
479
            op_loss = block.append_op(
Y
Yu Yang 已提交
480 481 482 483
                inputs={"X": loss_sum},
                outputs={"Out": loss},
                type='scale',
                attrs={'scale': 1.0 / float(len(avg_sum))})
Q
QI JUN 已提交
484 485
            op_loss.desc.infer_var_type(block.desc)
            op_loss.desc.infer_shape(block.desc)
Y
Yu Yang 已提交
486 487 488 489 490 491 492 493 494 495 496 497 498

        param_grad_list = append_backward_ops(
            loss=loss, parameter_list=input_to_check, no_grad_set=no_grad_set)

        feed_dict = {
            item[0].name: OpTest._numpy_to_lod_tensor(item[1], item[2], place)
            for p_name in inputs_with_np for item in inputs_with_np[p_name]
        }

        fetch_list = [g for p, g in param_grad_list]
        executor = Executor(place)
        result = executor.run(prog, feed_dict, fetch_list)
        return map(np.array, result)