gradient_checker.py 12.1 KB
Newer Older
1 2 3
import unittest

import numpy
4
import itertools
Y
Yu Yang 已提交
5
import paddle.v2.framework.core as core
Y
Yu Yang 已提交
6
from paddle.v2.framework.op import Operator
Y
Yu Yang 已提交
7

Y
Yu Yang 已提交
8 9
__all__ = ['get_numeric_gradient']

Y
Yu Yang 已提交
10

11
def create_op(op_type):
12
    # TODO need to set attrs
13 14 15 16 17 18 19 20 21 22 23 24 25
    kwargs = dict()
    for in_name in Operator.get_op_input_names(op_type):
        kwargs[in_name] = in_name
    for out_name in Operator.get_op_output_names(op_type):
        kwargs[out_name] = out_name

    return Operator(op_type, **kwargs)


def grad_var_name(var_name):
    return var_name + "@GRAD"


26 27 28 29
def empty_var_name():
    return "@EMPTY@"


Y
Yu Yang 已提交
30 31 32 33
def get_numeric_gradient(op,
                         input_values,
                         output_name,
                         input_to_check,
34
                         delta=0.005,
Z
zchen0211 已提交
35 36
                         local_scope=None,
                         in_place=False):
Y
Yu Yang 已提交
37 38
    """
    Get Numeric Gradient for an operator's input.
X
Xinghai Sun 已提交
39 40 41

    :param op: C++ operator instance, could be an network
    :param input_values: The input variables. Should be an dictionary, key is
Y
Yu Yang 已提交
42
    variable name. Value is numpy array.
X
Xinghai Sun 已提交
43
    :param output_name: The final output variable name.
Y
Yu Yang 已提交
44
    :param input_to_check: The input variable need to get gradient.
X
Xinghai Sun 已提交
45
    :param delta: The perturbation value for numeric gradient method. The
Y
Yu Yang 已提交
46 47 48 49 50
    smaller delta is, the more accurate result will get. But if that delta is
     too small, it could occur numerical stability problem.
    :param local_scope: The local scope used for get_numeric_gradient.
    :return: The gradient array in numpy format.
    """
Y
Yu Yang 已提交
51 52
    if local_scope is None:
        local_scope = core.Scope()
Y
Yu Yang 已提交
53 54

    # Create all input variable in local_scope
Y
Yu Yang 已提交
55 56 57 58
    for var_name in input_values:
        var = local_scope.new_var(var_name)
        tensor = var.get_tensor()
        tensor.set_dims(input_values[var_name].shape)
Y
Yu Yang 已提交
59 60
        tensor.alloc_float(core.CPUPlace())
        tensor.set(input_values[var_name], core.CPUPlace())
Y
Yu Yang 已提交
61

Y
Yu Yang 已提交
62
    # Create all output variable in local_scope
Y
Yu Yang 已提交
63 64 65 66 67
    opts = op.outputs()
    for key in opts:
        for output in opts[key]:
            if local_scope.find_var(output) is None:
                local_scope.new_var(output).get_tensor()
Y
Yu Yang 已提交
68 69
    op.infer_shape(local_scope)

Y
Yu Yang 已提交
70
    # allocate output memory
Y
Yu Yang 已提交
71 72 73 74
    for key in opts:
        for output in opts[key]:
            local_scope.find_var(output).get_tensor().alloc_float(core.CPUPlace(
            ))
Y
Yu Yang 已提交
75

Y
Yu Yang 已提交
76
    cpu_ctx = core.DeviceContext.create(core.CPUPlace())
Y
Yu Yang 已提交
77 78 79 80 81 82 83 84

    def get_output():
        op.run(local_scope, cpu_ctx)
        return numpy.array(local_scope.find_var(output_name).get_tensor()).sum()

    def product(dim):
        return reduce(lambda a, b: a * b, dim, 1)

Z
zchen0211 已提交
85
    def restore_inputs():
Z
zchen0211 已提交
86 87 88 89
        for var_name in input_values:
            tensor_ = local_scope.find_var(var_name).get_tensor()
            tensor_.set(numpy.copy(input_values[var_name]), core.CPUPlace())

Q
qiaolongfei 已提交
90
    # get the input tensor that we want to get it's numeric gradient.
Y
Yu Yang 已提交
91 92
    tensor_to_check = local_scope.find_var(input_to_check).get_tensor()
    tensor_size = product(tensor_to_check.get_dims())
Q
qiaolongfei 已提交
93
    # prepare a numpy array to store the gradient.
Y
Yu Yang 已提交
94
    gradient_flat = numpy.zeros(shape=(tensor_size, ), dtype='float32')
Q
qiaolongfei 已提交
95 96 97

    # we only compute gradient of one element each time.
    # we use a for loop to compute the gradient of every element.
Y
Yu Yang 已提交
98
    for i in xrange(tensor_size):
Z
zchen0211 已提交
99
        if in_place:
Z
zchen0211 已提交
100
            restore_inputs()
Q
qiaolongfei 已提交
101
        # get one input element throw it's index i.
Y
Yu Yang 已提交
102
        origin = tensor_to_check.get_float_element(i)
Q
qiaolongfei 已提交
103 104

        # add delta to it, run op and then get the sum of the result tensor.
Y
Yu Yang 已提交
105 106 107 108
        x_pos = origin + delta
        tensor_to_check.set_float_element(i, x_pos)
        y_pos = get_output()

Q
qiaolongfei 已提交
109
        # plus delta to this element, run op and get the sum of the result tensor.
Z
zchen0211 已提交
110
        if in_place:
Z
zchen0211 已提交
111
            restore_inputs()
Y
Yu Yang 已提交
112 113 114 115
        x_neg = origin - delta
        tensor_to_check.set_float_element(i, x_neg)
        y_neg = get_output()

Q
qiaolongfei 已提交
116 117 118 119
        # restore old value
        tensor_to_check.set_float_element(i, origin)

        # compute the gradient of this element and store it into a numpy array.
Y
Yu Yang 已提交
120
        gradient_flat[i] = (y_pos - y_neg) / delta / 2
Q
qiaolongfei 已提交
121 122

    # reshape the gradient result to the shape of the source tensor.
Y
Yu Yang 已提交
123 124 125
    return gradient_flat.reshape(tensor_to_check.get_dims())


126
class GradientChecker(unittest.TestCase):
127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144
    def __get_gradient(self, forward_op, backward_op, input_value, grad_names,
                       place):
        """Get the input gradients after running forward and backward operators
        on the given places.

        :param forward_op: forward operator
        :type forward_op: Operator
        :param backward_op: backward operator
        :type backward_op: Operator
        :param input_value: input values.
        :type input_value: dict{string:numpy.array}
        :param grad_names: the names of returned input gradients.
        :type input_value: a list of string
        :param place: the device type.
        :type place: CPUPlace or GPUPlace
        :return: the input grdients of given grad_names.
        :rtype: a list of numpy.array
        """
145 146
        scope = core.Scope()
        ctx = core.DeviceContext.create(place)
147

148 149 150 151 152 153
        inputs = forward_op.inputs()
        in_names = [item for k in inputs for item in inputs[k]]
        outputs = forward_op.outputs()
        out_names = [item for k in outputs for item in outputs[k]]

        # create input var and set value
154
        for name, value in input_value.iteritems():
155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176
            if name not in in_names:
                raise ValueError(name + "does not exist in Op's inputs.")
            var = scope.new_var(name).get_tensor()
            var.set_dims(value.shape)
            var.set(value, place)

        # run forward op
        for out_name in out_names:
            scope.new_var(out_name)
        forward_op.infer_shape(scope)
        forward_op.run(scope, ctx)

        # set output var's shape
        # set output grad to ones
        for name in out_names:
            out_tensor = scope.find_var(name).get_tensor()
            grad_tensor = scope.new_var(grad_var_name(name)).get_tensor()
            grad_tensor.set_dims(out_tensor.shape())
            data = numpy.ones(out_tensor.shape(), dtype=numpy.float32)
            grad_tensor.set(data, place)

        # run backward op
Y
Yu Yang 已提交
177 178 179 180 181
        backward_outs = backward_op.outputs()
        backward_names = [
            item for key in backward_outs for item in backward_outs[key]
        ]
        for name in backward_names:
182
            scope.new_var(name)
Y
Yu Yang 已提交
183

184 185 186 187 188 189 190 191 192
        backward_op.infer_shape(scope)
        backward_op.run(scope, ctx)

        outs = [
            numpy.array(scope.find_var(name).get_tensor())
            for name in grad_names
        ]
        return outs

193
    def compare_grad(self, forward_op, input_value, no_grad_set=None):
194 195 196 197 198 199 200
        """ Compare the input gradients between CPU and GPU for the given forward
        operator.

        :param forward_op: forward operator
        :type forward_op: Operator
        :param input_value: input values.
        :type input_value: dict{string:numpy.array}
201 202
        :param no_grad_set: the set of variables names without gradients.
        :type no_grad_set: a set of string
203 204
        :raises: AssertionError, there is different gradient value.
        """
205 206 207
        if no_grad_set is None:
            no_grad_set = set()
        backward_op = core.Operator.backward(forward_op, no_grad_set)
D
dangqingqing 已提交
208
        # return if not compile with GPU or not implementing GPU kernel
209 210
        if not (core.is_compile_gpu() and backward_op.support_gpu()):
            return
211

212 213
        outputs = backward_op.outputs()
        out_names = [item for k in outputs for item in outputs[k]]
214
        out_names = filter(lambda x: x != empty_var_name(), out_names)
215 216 217 218
        cpu_grads = self.__get_gradient(forward_op, backward_op, input_value,
                                        out_names, core.CPUPlace())
        gpu_grads = self.__get_gradient(forward_op, backward_op, input_value,
                                        out_names, core.GPUPlace(0))
219 220 221 222

        for c_grad, g_grad, name in itertools.izip(cpu_grads, gpu_grads,
                                                   out_names):
            self.assertTrue(
223 224
                numpy.allclose(
                    c_grad, g_grad, atol=1e-4),
225 226
                "output name: " + name + " has diff")

227 228 229 230 231
    def __assert_is_close(self, numeric_grads, analytic_grads, names,
                          max_relative_error, msg_prefix):
        """Use relative error for the comparison.

        :param numeric_grads: the numerical graidents.
X
Xinghai Sun 已提交
232
        :type numeric_grads: a list of numpy.array
233
        :param analytic_grads: the analytical graidents.
X
Xinghai Sun 已提交
234
        :type analytic_grads: a list of numpy.array
235 236 237 238 239
        :param name: the names of gradients, used to print for debug.
        :type names: a list of string
        :param msg_prefix: string info, used to print for debug.
        :type msf_prefix: string
        """
240
        for a, b, name in itertools.izip(numeric_grads, analytic_grads, names):
241 242 243 244 245 246 247 248 249 250 251 252 253 254 255
            abs_a = numpy.abs(a)
            # if abs_a is nearly zero, then use abs error for a, not relative
            # error.
            abs_a[abs_a < 1e-3] = 1

            diff_mat = numpy.abs(a - b) / abs_a
            max_diff = numpy.max(diff_mat)

            def err_msg():
                offset = numpy.argmax(diff_mat > max_relative_error)
                return "%s Variable %s max gradient diff %f over limit %f, the first " \
                       "error element is %d" % (
                       msg_prefix, name, max_diff, max_relative_error, offset)

            self.assertLessEqual(max_diff, max_relative_error, err_msg())
256 257 258 259 260 261 262 263

    def check_grad(self,
                   forward_op,
                   input_vars,
                   inputs_to_check,
                   output_name,
                   no_grad_set=None,
                   only_cpu=False,
Z
zchen0211 已提交
264
                   in_place=False,
265 266 267 268 269 270
                   max_relative_error=0.005):
        """
        :param forward_op: used to create backward_op
        :param input_vars: numpy value of input variable. The following
            computation will use these variables.
        :param inputs_to_check: inputs var names that should check gradient.
Q
qingqing01 已提交
271
        :param output_name: the output variable name of forward network.
272 273 274 275 276 277 278 279
        :param max_relative_error: The relative tolerance parameter.
        :param no_grad_set: used when create backward ops
        :param only_cpu: only compute and check gradient on cpu kernel.
        :return:
        """
        if no_grad_set is None:
            no_grad_set = set()

Y
Yu Yang 已提交
280
        no_tmp_out = forward_op.no_intermediate_outputs()
281 282 283
        if len(no_tmp_out) != 1:
            raise ValueError("non temp out_names should be 1")

Y
Yu Yang 已提交
284 285
        inputs = forward_op.inputs()
        in_names = [item for k in inputs for item in inputs[k]]
286 287 288 289 290 291 292 293 294
        for no_grad in no_grad_set:
            if no_grad not in in_names:
                raise ValueError("no_grad should be in in_names")
        backward_op = core.Operator.backward(forward_op, no_grad_set)

        places = [core.CPUPlace()]
        if not only_cpu and core.is_compile_gpu() and backward_op.support_gpu():
            places.append(core.GPUPlace(0))

295 296
        # get numerical gradients
        numeric_grads = [
Z
zchen0211 已提交
297 298
            get_numeric_gradient(
                forward_op, input_vars, output_name, name, in_place=in_place)
299 300
            for name in inputs_to_check
        ]
301

302
        check_names = [grad_var_name(name) for name in inputs_to_check]
303
        for place in places:
304
            # get analytical gradients according to different device
305 306
            analytic_grads = self.__get_gradient(forward_op, backward_op,
                                                 input_vars, check_names, place)
X
Xinghai Sun 已提交
307 308 309 310 311 312 313
            #print(numeric_grads[0], numeric_grads[0].shape)
            print("dim0: ", numeric_grads[0], numeric_grads[0].shape)
            print("dim0: ", analytic_grads[0], analytic_grads[0].shape)
            print("---------------------")
            print("dim1: ", numeric_grads[1], numeric_grads[1].shape)
            print("dim1: ", analytic_grads[1], analytic_grads[1].shape)
            assert False
314 315 316
            self.__assert_is_close(numeric_grads, analytic_grads, check_names,
                                   max_relative_error,
                                   "Gradient Check On %s" % str(place))