op_test.py 15.9 KB
Newer Older
1 2
import unittest
import numpy as np
3
import random
4 5 6
import itertools
import paddle.v2.framework.core as core
from paddle.v2.framework.op import Operator
Y
Yang Yang(Tony) 已提交
7 8
from paddle.v2.framework.executor import Executor
from paddle.v2.framework.framework import Program, OpProtoHolder
9 10 11 12 13 14


def grad_var_name(var_name):
    return var_name + "@GRAD"


Q
qijun 已提交
15
def create_op(scope, op_type, inputs, outputs, attrs):
16 17
    kwargs = dict()

Y
Yu Yang 已提交
18
    def __create_var__(name, var_name):
D
dongzhihong 已提交
19
        scope.var(var_name)
Y
Yu Yang 已提交
20 21
        kwargs[name].append(var_name)

Q
qijun 已提交
22
    for in_name, in_dup in Operator.get_op_inputs(op_type):
23 24 25 26
        if in_name in inputs:
            kwargs[in_name] = []
            if in_dup:
                sub_in = inputs[in_name]
Q
qijun 已提交
27
                for sub_in_name, _ in sub_in:
Y
Yu Yang 已提交
28
                    __create_var__(in_name, sub_in_name)
29
            else:
Y
Yu Yang 已提交
30
                __create_var__(in_name, in_name)
31

Q
qijun 已提交
32
    for out_name, out_dup in Operator.get_op_outputs(op_type):
33 34 35
        if out_name in outputs:
            kwargs[out_name] = []
            if out_dup:
36 37
                sub_out = outputs[out_name]
                for sub_out_name, _ in sub_out:
Y
Yu Yang 已提交
38
                    __create_var__(out_name, sub_out_name)
39
            else:
Y
Yu Yang 已提交
40
                __create_var__(out_name, out_name)
41

Q
qijun 已提交
42
    for attr_name in Operator.get_op_attr_names(op_type):
Q
qijun 已提交
43 44
        if attr_name in attrs:
            kwargs[attr_name] = attrs[attr_name]
45

46 47 48 49
    return Operator(op_type, **kwargs)


def set_input(scope, op, inputs, place):
Y
Yu Yang 已提交
50
    def __set_input__(var_name, var):
51 52 53 54 55 56 57 58 59 60 61
        if isinstance(var, tuple) or isinstance(var, np.ndarray):
            tensor = scope.find_var(var_name).get_tensor()
            if isinstance(var, tuple):
                tensor.set_lod(var[1])
                var = var[0]
            tensor.set_dims(var.shape)
            tensor.set(var, place)
        elif isinstance(var, float):
            scope.find_var(var_name).set_float(var)
        elif isinstance(var, int):
            scope.find_var(var_name).set_int(var)
Y
Yu Yang 已提交
62

Q
qijun 已提交
63
    for in_name, in_dup in Operator.get_op_inputs(op.type()):
64 65 66
        if in_name in inputs:
            if in_dup:
                sub_in = inputs[in_name]
67
                for sub_in_name, sub_in_val in sub_in:
Y
Yu Yang 已提交
68
                    __set_input__(sub_in_name, sub_in_val)
69
            else:
Y
Yu Yang 已提交
70
                __set_input__(in_name, inputs[in_name])
71 72 73


def set_output_grad(scope, op, outputs, place):
74 75
    def __set_tensor__(name):
        out_tensor = scope.find_var(name).get_tensor()
D
dongzhihong 已提交
76
        grad_tensor = scope.var(grad_var_name(name)).get_tensor()
77 78 79 80 81 82 83 84 85 86
        out_dtype = out_tensor.dtype()
        if out_dtype == core.DataType.FP64:
            data = np.ones(out_tensor.shape(), dtype=np.float64)
        elif out_dtype == core.DataType.FP32:
            data = np.ones(out_tensor.shape(), dtype=np.float32)
        else:
            raise ValueError("Not supported data type " + str(out_dtype))

        grad_tensor.set(data, place)

Q
qijun 已提交
87
    for out_name, out_dup in Operator.get_op_outputs(op.type()):
88 89 90
        if out_name in outputs:
            if out_dup:
                sub_out = outputs[out_name]
91
                for sub_out_name, _ in sub_out:
92
                    __set_tensor__(sub_out_name)
93
            else:
94
                __set_tensor__(out_name)
95 96 97 98 99 100


def get_numeric_gradient(scope,
                         op,
                         inputs,
                         input_to_check,
Y
Yancey 已提交
101
                         output_names,
102 103 104 105 106 107 108 109 110 111 112 113
                         delta=0.005,
                         in_place=False):
    set_input(scope, op, inputs, core.CPUPlace())

    tensor_to_check = scope.find_var(input_to_check).get_tensor()

    def product(dim):
        return reduce(lambda a, b: a * b, dim, 1)

    ctx = core.DeviceContext.create(core.CPUPlace())

    def get_output():
Y
Yancey 已提交
114 115 116 117 118
        sum = 0.0
        for output_name in output_names:
            op.run(scope, ctx)
            sum += np.array(scope.find_var(output_name).get_tensor()).sum()
        return sum
119 120 121

    tensor_to_check = scope.find_var(input_to_check).get_tensor()
    tensor_size = product(tensor_to_check.get_dims())
122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144
    tensor_to_check_dtype = tensor_to_check.dtype()
    if tensor_to_check_dtype == core.DataType.FP32:
        tensor_to_check_dtype = np.float32
    elif tensor_to_check_dtype == core.DataType.FP64:
        tensor_to_check_dtype = np.float64
    else:
        raise ValueError("Not supported data type " + str(
            tensor_to_check_dtype))

    gradient_flat = np.zeros(shape=(tensor_size, ), dtype=tensor_to_check_dtype)

    def __get_elem__(tensor, i):
        if tensor_to_check_dtype == np.float32:
            return tensor.get_float_element(i)
        else:
            return tensor.get_double_element(i)

    def __set_elem__(tensor, i, e):
        if tensor_to_check_dtype == np.float32:
            tensor.set_float_element(i, e)
        else:
            tensor.set_double_element(i, e)

145 146 147 148
    # we only compute gradient of one element each time.
    # we use a for loop to compute the gradient of every element.
    for i in xrange(tensor_size):
        if in_place:
Q
qijun 已提交
149
            set_input(scope, op, inputs, core.CPUPlace())
150 151

        # get one input element throw it's index i.
152
        origin = __get_elem__(tensor_to_check, i)
153 154
        # add delta to it, run op and then get the sum of the result tensor.
        x_pos = origin + delta
155
        __set_elem__(tensor_to_check, i, x_pos)
156 157 158
        y_pos = get_output()

        if in_place:
Q
qijun 已提交
159
            set_input(scope, op, inputs, core.CPUPlace())
160 161

        x_neg = origin - delta
162
        __set_elem__(tensor_to_check, i, x_neg)
163 164
        y_neg = get_output()

165
        __set_elem__(tensor_to_check, i, origin)
166 167 168 169 170 171 172
        gradient_flat[i] = (y_pos - y_neg) / delta / 2

    return gradient_flat.reshape(tensor_to_check.get_dims())


def get_backward_op(scope, op, no_grad_set):
    backward_op = core.Operator.backward(op, no_grad_set)
Q
qijun 已提交
173
    for input in backward_op.input_vars():
D
dongzhihong 已提交
174
        var = scope.var(input)
175
        var.get_tensor()
Q
qijun 已提交
176
    for output in backward_op.output_vars():
D
dongzhihong 已提交
177
        var = scope.var(output)
178 179 180 181
        var.get_tensor()
    return backward_op


Y
Yu Yang 已提交
182 183 184 185 186 187
def get_gradient(scope,
                 op,
                 inputs,
                 outputs,
                 grad_names,
                 place,
188 189 190 191 192 193 194 195 196 197 198 199 200 201 202
                 no_grad_set=None):
    ctx = core.DeviceContext.create(place)

    set_input(scope, op, inputs, place)

    op.run(scope, ctx)

    if no_grad_set is None:
        no_grad_set = set()

    backward_op = get_backward_op(scope, op, no_grad_set)
    set_output_grad(scope, op, outputs, place)

    backward_op.run(scope, ctx)

Y
Yu Yang 已提交
203 204 205 206
    return [
        np.array(scope.find_var(grad_name).get_tensor())
        for grad_name in grad_names
    ]
207 208


Y
Yang Yang(Tony) 已提交
209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250
def append_input_output(block, op_proto, np_list, is_input):
    '''Insert VarDesc and generate Python variable instance'''
    proto_list = op_proto.inputs if is_input else op_proto.outputs

    def create_var(block, name, np_list, var_proto):
        if name not in np_list:
            assert var_proto.intermediate, "{} not found".format(name)
            shape = None
            lod_level = None
        else:
            np_value = np_list[name]
            if isinstance(np_value, tuple):
                shape = list(np_value[0].shape)
                lod_level = len(np_value[1])
            else:
                shape = list(np_value.shape)
                lod_level = 0
        return block.create_var(
            dtype="float32", shape=shape, lod_level=lod_level, name=name)

    var_dict = {}
    for var_proto in proto_list:
        var_name = str(var_proto.name)
        if is_input:
            if (var_name not in np_list) and var_proto.dispensable:
                continue
            assert (var_name in np_list) or (var_proto.dispensable), \
                            "Missing {} as input".format(var_name)
        if var_proto.duplicable:
            assert isinstance(np_list[var_name], list), \
                "Duplicable {} should be set as list".format(var_name)
            var_list = []
            for (name, np_value) in np_list[var_name]:
                var_list.append(
                    create_var(block, name, {name: np_value}, var_proto))
            var_dict[var_name] = var_list
        else:
            var_dict[var_name] = create_var(block, var_name, np_list, var_proto)

    return var_dict


251
class OpTest(unittest.TestCase):
252 253 254 255 256 257 258 259 260 261 262 263 264 265 266
    @classmethod
    def setUpClass(cls):
        '''Fix random seeds to remove randomness from tests'''
        cls._np_rand_state = np.random.get_state()
        cls._py_rand_state = random.getstate()

        np.random.seed(123)
        random.seed(124)

    @classmethod
    def tearDownClass(cls):
        '''Restore random seeds'''
        np.random.set_state(cls._np_rand_state)
        random.setstate(cls._py_rand_state)

Y
Yang Yang(Tony) 已提交
267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285
    def feed_var(self, input_vars, place):
        feed_map = {}
        for var_name in input_vars:
            if isinstance(input_vars[var_name], list):
                for name, np_value in self.inputs[var_name]:
                    tensor = core.LoDTensor()
                    tensor.set(np_value, place)
                    feed_map[name] = tensor
            else:
                tensor = core.LoDTensor()
                if isinstance(self.inputs[var_name], tuple):
                    tensor.set(self.inputs[var_name][0], place)
                    tensor.set_lod(self.inputs[var_name][1])
                else:
                    tensor.set(self.inputs[var_name], place)
                feed_map[var_name] = tensor

        return feed_map

286
    def check_output_with_place(self, place, atol):
Y
Yang Yang(Tony) 已提交
287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315
        op_proto = OpProtoHolder.instance().get_op_proto(self.op_type)

        program = Program()
        block = program.global_block()

        inputs = append_input_output(block, op_proto, self.inputs, True)
        outputs = append_input_output(block, op_proto, self.outputs, False)

        op = block.append_op(
            type=self.op_type,
            inputs=inputs,
            outputs=outputs,
            attrs=self.attrs if hasattr(self, "attrs") else dict())

        fetch_list = []
        for var_name, var in outputs.iteritems():
            if var_name in self.outputs:
                if isinstance(var, list):
                    for v in var:
                        fetch_list.append(v)
                else:
                    fetch_list.append(var)

        feed_map = self.feed_var(inputs, place)

        exe = Executor(place)
        outs = exe.run(program, feed=feed_map, fetch_list=fetch_list)

        for out_name, out_dup in Operator.get_op_outputs(self.op_type):
316 317 318
            if out_name not in self.outputs:
                continue

Y
Yang Yang(Tony) 已提交
319 320 321 322 323 324 325 326 327 328
            def find_actual(target_name, fetch_list):
                found = [
                    i for i, var in enumerate(fetch_list)
                    if var.name == target_name
                ]
                self.assertTrue(
                    len(found) == 1, "Found {} {}".format(
                        len(found), target_name))
                return found[0]

329 330
            if out_dup:
                sub_out = self.outputs[out_name]
Y
Yancey 已提交
331 332 333 334
                if not isinstance(sub_out, list):
                    raise AssertionError("sub_out type %s is not list",
                                         type(sub_out))
                for sub_out_name, expect in sub_out:
Y
Yang Yang(Tony) 已提交
335
                    idx = find_actual(sub_out_name, fetch_list)
336 337 338
                    actual_t = np.array(outs[idx])
                    expect_t = expect[0] \
                        if isinstance(expect, tuple) else expect
339 340
                    self.assertTrue(
                        np.allclose(
341
                            actual_t, expect_t, atol=atol),
Y
Yang Yang(Tony) 已提交
342 343
                        "Output (" + sub_out_name + ") has diff at " +
                        str(place))
344 345 346 347
                    if isinstance(expect, tuple):
                        self.assertListEqual(
                            actual_t.lod(), expect[1], "Output (" + sub_out_name
                            + ") has different lod at " + str(place))
348
            else:
Y
Yang Yang(Tony) 已提交
349
                idx = find_actual(out_name, fetch_list)
350
                actual_t = outs[idx]
351
                expect = self.outputs[out_name]
352
                expect_t = expect[0] if isinstance(expect, tuple) else expect
353 354
                self.assertTrue(
                    np.allclose(
355
                        actual_t, expect_t, atol=atol),
D
dangqingqing 已提交
356
                    "Output (" + out_name + ") has diff at " + str(place))
357 358 359 360
                if isinstance(expect, tuple):
                    self.assertListEqual(actual_t.lod(), expect[1],
                                         "Output (" + out_name +
                                         ") has different lod at " + str(place))
361

362
    def check_output(self, atol=1e-5):
Q
qijun 已提交
363
        places = [core.CPUPlace()]
Y
Yang Yang(Tony) 已提交
364
        if core.is_compile_gpu() and core.op_support_gpu(self.op_type):
Q
qijun 已提交
365 366
            places.append(core.GPUPlace(0))
        for place in places:
367
            self.check_output_with_place(place, atol)
Q
qijun 已提交
368

369 370 371 372 373 374 375 376 377 378 379 380
    def __assert_is_close(self, numeric_grads, analytic_grads, names,
                          max_relative_error, msg_prefix):

        for a, b, name in itertools.izip(numeric_grads, analytic_grads, names):
            abs_a = np.abs(a)
            abs_a[abs_a < 1e-3] = 1

            diff_mat = np.abs(a - b) / abs_a
            max_diff = np.max(diff_mat)

            def err_msg():
                offset = np.argmax(diff_mat > max_relative_error)
381 382 383 384
                return ("%s Variable %s max gradient diff %f over limit %f, "
                        "the first error element is %d") % (
                            msg_prefix, name, max_diff, max_relative_error,
                            offset)
385 386 387 388 389

            self.assertLessEqual(max_diff, max_relative_error, err_msg())

    def check_grad(self,
                   inputs_to_check,
Y
Yancey 已提交
390
                   output_names,
391 392 393 394
                   no_grad_set=None,
                   in_place=False,
                   max_relative_error=0.005):
        self.scope = core.Scope()
Q
qijun 已提交
395
        op_inputs = self.inputs if hasattr(self, "inputs") else dict()
396
        op_outputs = self.outputs if hasattr(self, "outputs") else dict()
Q
qijun 已提交
397
        op_attrs = self.attrs if hasattr(self, "attrs") else dict()
398
        self.op = create_op(self.scope, self.op_type, op_inputs, op_outputs,
Q
qijun 已提交
399
                            op_attrs)
400 401 402
        if no_grad_set is None:
            no_grad_set = set()

Y
Yancey 已提交
403 404 405
        if not type(output_names) is list:
            output_names = [output_names]

406 407 408 409 410 411
        numeric_grads = [
            get_numeric_gradient(
                self.scope,
                self.op,
                self.inputs,
                input_to_check,
Y
Yancey 已提交
412
                output_names,
413 414 415 416 417 418
                in_place=in_place) for input_to_check in inputs_to_check
        ]
        grad_names = [
            grad_var_name(input_to_check) for input_to_check in inputs_to_check
        ]

Q
qijun 已提交
419
        cpu_place = core.CPUPlace()
Y
Yu Yang 已提交
420 421 422
        cpu_analytic_grads = get_gradient(self.scope, self.op, self.inputs,
                                          self.outputs, grad_names, cpu_place,
                                          no_grad_set)
423

Q
qijun 已提交
424 425 426 427 428 429
        self.__assert_is_close(numeric_grads, cpu_analytic_grads, grad_names,
                               max_relative_error,
                               "Gradient Check On %s" % str(cpu_place))

        if core.is_compile_gpu() and self.op.support_gpu():
            gpu_place = core.GPUPlace(0)
Y
Yu Yang 已提交
430 431 432
            gpu_analytic_grads = get_gradient(self.scope, self.op, self.inputs,
                                              self.outputs, grad_names,
                                              gpu_place, no_grad_set)
433

Q
qijun 已提交
434 435 436 437 438 439 440
            self.__assert_is_close(numeric_grads, gpu_analytic_grads,
                                   grad_names, max_relative_error,
                                   "Gradient Check On %s" % str(gpu_place))

            for c_grad, g_grad, name in itertools.izip(
                    cpu_analytic_grads, gpu_analytic_grads, grad_names):
                self.assertTrue(
Q
qijun 已提交
441
                    np.allclose(
Q
qijun 已提交
442 443
                        c_grad, g_grad, atol=1e-4),
                    "output name: " + name + " has diff")