math_op_patch.py 14.6 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
2
#
Y
Yang Yu 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
6
#
Y
Yang Yu 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
8
#
Y
Yang Yu 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15 16
from __future__ import print_function

17
import warnings
18
import inspect
19
import paddle
20

21
from .. import core
22
from ..framework import Variable, unique_name
23
from .layer_function_generator import OpProtoHolder
Y
Yang Yu 已提交
24

25 26 27 28 29 30 31 32
_supported_int_dtype_ = [
    core.VarDesc.VarType.UINT8,
    core.VarDesc.VarType.INT8,
    core.VarDesc.VarType.INT16,
    core.VarDesc.VarType.INT32,
    core.VarDesc.VarType.INT64,
]

33 34
compare_ops = ['__eq__', '__ne__', '__lt__', '__le__', '__gt__', '__ge__']

35 36 37 38 39 40 41 42 43 44 45 46 47 48
EXPRESSION_MAP = {
    "__add__": "A + B",
    "__radd__": "A += B",
    "__sub__": "A - B",
    "__rsub__": "A -= B",
    "__mul__": "A * B",
    "__rmul__": "A *= B",
    "__div__": "A / B",
    "__truediv__": "A / B",
    "__rdiv__": "A /= B",
    "__rtruediv__": "A /= B",
    "__pow__": "A ** B",
    "__rpow__": "A **= B",
    "__floordiv__": "A //B",
49
    "__rfloordiv__": "A //= B",
50 51 52 53 54 55 56 57 58
    "__mod__": "A % B",
    "__eq__": "A == B",
    "__ne__": "A != B",
    "__lt__": "A < B",
    "__le__": "A <= B",
    "__gt__": "A > B",
    "__ge__": "A >= B"
}

59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83
# method for Tensor from paddle.tensor
# edit it when paddle.tensor has new method about Tensor operation
common_methods = [
    'exp', 'tanh', 'atan', 'sqrt', 'rsqrt', 'abs', 'ceil', 'floor', 'cos',
    'acos', 'asin', 'sin', 'sinh', 'cosh', 'round', 'reciprocal', 'square',
    'rank', 'matmul', 'dot', 'norm', 'transpose', 'dist', 't', 'cross',
    'cholesky', 'bmm', 'histogram', 'equal', 'greater_equal', 'greater_than',
    'is_empty', 'isfinite', 'less_equal', 'less_than', 'logical_and',
    'logical_not', 'logical_or', 'logical_xor', 'not_equal', 'reduce_all',
    'reduce_any', 'allclose', 'equal_all', 'cast', 'expand', 'expand_as',
    'tile', 'flatten', 'gather', 'gather_nd', 'reshape', 'reverse', 'scatter',
    'scatter_nd_add', 'scatter_nd', 'shard_index', 'slice', 'split', 'squeeze',
    'strided_slice', 'unique', 'unique_with_counts', 'unsqueeze', 'flip',
    'unbind', 'roll', 'cumsum', 'increment', 'log', 'pow', 'reciprocal',
    'round', 'rsqrt', 'scale', 'sign', 'stanh', 'sum', 'reduce_prod', 'max',
    'min', 'mm', 'div', 'multiply', 'add', 'logsumexp', 'log1p', 'erf',
    'addcmul', 'addmm', 'clamp', 'trace', 'kron', 'argmax', 'argmin', 'argsort',
    'has_inf', 'has_nan', 'topk', 'index_select', 'nonzero', 'sort',
    'index_sample', 'mean', 'std', 'var', 'elementwise_add', 'elementwise_div',
    'elementwise_floordiv', 'elementwise_mod', 'elementwise_pow',
    'elementwise_sub'
]

_already_patch_variable = False

Y
Yang Yu 已提交
84 85

def monkey_patch_variable():
Y
Yang Yu 已提交
86
    def unique_tmp_name():
Y
Yu Yang 已提交
87
        return unique_name.generate("tmp")
Y
Yang Yu 已提交
88 89 90 91 92 93 94 95

    def safe_get_dtype(var):
        try:
            dtype = var.dtype
        except:
            raise ValueError("Cannot get data type from %s", var.name)
        return dtype

96
    def current_block(var):
97
        return var.block.program.current_block()
98 99 100 101 102

    def create_new_tmp_var(block, dtype):
        tmp_name = unique_tmp_name()
        return block.create_var(name=tmp_name, dtype=dtype)

Y
Yang Yu 已提交
103 104
    def create_tensor(block, value, dtype, shape):
        value = float(value)
105
        var = create_new_tmp_var(block, dtype)
Y
Yang Yu 已提交
106 107 108
        block.append_op(
            type="fill_constant",
            outputs={'Out': [var]},
109 110 111 112
            attrs={
                'dtype': var.dtype,
                'shape': shape,
                'value': value,
113
                'force_cpu': False
H
Hongyu Liu 已提交
114 115 116
            },
            stop_gradient=True)
        var.stop_gradient = True
Y
Yang Yu 已提交
117 118
        return var

Y
Yang Yu 已提交
119 120 121
    def create_scalar(block, value, dtype):
        return create_tensor(block, value, dtype, shape=[1])

Y
Yang Yu 已提交
122 123 124
    def create_tensor_with_batchsize(ref_var, value, dtype):
        assert isinstance(ref_var, Variable)
        value = float(value)
125 126
        block = current_block(ref_var)
        var = create_new_tmp_var(block, dtype)
127
        batch_dim = -1
128
        out_shape = []
129 130
        for i, d in enumerate(ref_var.shape):
            if d < 0:
131 132 133 134 135 136 137
                if batch_dim < 0:
                    batch_dim = i
                    out_shape.append(d)
                else:
                    out_shape.append(1)
            else:
                out_shape.append(d)
138
        assert batch_dim != -1
139
        block.append_op(
Y
Yang Yu 已提交
140 141 142
            type='fill_constant_batch_size_like',
            outputs={'Out': [var]},
            inputs={'Input': [ref_var]},
143
            attrs={
144
                'shape': out_shape,
145 146 147
                'value': value,
                'input_dim_idx': batch_dim,
                'output_dim_idx': batch_dim
H
Hongyu Liu 已提交
148 149 150 151
            },
            stop_gradient=True)

        var.stop_gradient = True
Y
Yang Yu 已提交
152 153 154 155
        return var

    def astype(self, dtype):
        """
J
Jiabin Yang 已提交
156 157 158
        **Notes**:
            **The variable must be a** :ref:`api_fluid_Tensor`

Y
Yang Yu 已提交
159
        Cast a variable to a specified data type.
J
Jiabin Yang 已提交
160

Y
Yang Yu 已提交
161
        Args:
J
Jiabin Yang 已提交
162

Y
Yang Yu 已提交
163
            self(Variable): The source variable
J
Jiabin Yang 已提交
164 165

            dtype: The target data type
Y
Yang Yu 已提交
166 167

        Returns:
J
Jiabin Yang 已提交
168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197
            Variable: Variable with new dtype

        Examples:
            In Static Graph Mode:

            .. code-block:: python

                import paddle.fluid as fluid

                startup_prog = fluid.Program()
                main_prog = fluid.Program()
                with fluid.program_guard(startup_prog, main_prog):
                    original_variable = fluid.data(name = "new_variable", shape=[2,2], dtype='float32')
                    new_variable = original_variable.astype('int64')
                    print("new var's dtype is: {}".format(new_variable.dtype))

            In Dygraph Mode:

            .. code-block:: python

                import paddle.fluid as fluid
                import numpy as np

                x = np.ones([2, 2], np.float32)
                with fluid.dygraph.guard():
                    original_variable = fluid.dygraph.to_variable(x)
                    print("original var's dtype is: {}, numpy dtype is {}".format(original_variable.dtype, original_variable.numpy().dtype))
                    new_variable = original_variable.astype('int64')
                    print("new var's dtype is: {}, numpy dtype is {}".format(new_variable.dtype, new_variable.numpy().dtype))

Y
Yang Yu 已提交
198
        """
199 200 201
        block = current_block(self)
        out = create_new_tmp_var(block, dtype)
        block.append_op(
Y
Yang Yu 已提交
202 203 204 205 206 207 208
            type="cast",
            inputs={"X": [self]},
            outputs={"Out": [out]},
            attrs={"in_dtype": self.dtype,
                   "out_dtype": out.dtype})
        return out

209
    def _scalar_op_(var, scale, bias):
210 211 212 213 214 215 216 217 218 219
        block = current_block(var)
        out = create_new_tmp_var(block, var.dtype)
        block.append_op(
            type="scale",
            inputs={"X": [var]},
            outputs={"Out": [out]},
            attrs={"scale": scale,
                   "bias": bias})
        return out

220
    def _neg_(var):
221
        return _scalar_op_(var, -1.0, 0.0)
222

223 224
    def _scalar_add_(var, value):
        return _scalar_op_(var, 1.0, value)
225

226 227
    def _scalar_sub_(var, value):
        return _scalar_op_(var, 1.0, -value)
228

229 230
    def _scalar_rsub_(var, value):
        return _scalar_op_(var, -1.0, value)
231

232 233
    def _scalar_mul_(var, value):
        return _scalar_op_(var, value, 0.0)
234

235 236
    def _scalar_div_(var, value):
        return _scalar_op_(var, 1.0 / value, 0.0)
237

238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256
    # TODO(shenliang03):  currently, it supports divide, floor_divide, remainder
    # for binary operator by using the api to achieve the type promotion
    def _binary_method_creator_(op_type, reverse=False):
        import paddle

        def __impl__(self, other_var):
            op = getattr(paddle, op_type)
            if reverse:
                return op(other_var, self)
            else:
                return op(self, other_var)

        __impl__.__doc__ = """

        See paddle.{}""".format(op_type)
        __impl__.__name__ = op_type

        return __impl__

257 258 259 260
    def _binary_creator_(method_name,
                         op_type,
                         reverse=False,
                         scalar_method=None):
Y
Yang Yu 已提交
261
        def __impl__(self, other_var):
262 263 264 265 266
            # FIXME(zjl): elementwise_div between integers cannot be converted to scale,
            # which may lose accuracy. This is a hot fix for release 1.6.
            if scalar_method is not None and not (
                    op_type == 'elementwise_div' and
                    self.dtype in _supported_int_dtype_):
267 268 269 270 271 272 273 274
                if isinstance(other_var, float):
                    if self.dtype in _supported_int_dtype_:
                        assert other_var == int(other_var), \
                            "float value {} cannot convert to integer".format(other_var)
                    return scalar_method(self, other_var)
                elif isinstance(other_var, int):
                    return scalar_method(self, float(other_var))

Y
Yang Yu 已提交
275 276 277 278 279 280 281 282 283 284 285
            lhs_dtype = safe_get_dtype(self)

            if not isinstance(other_var, Variable):
                if reverse:
                    has_batch_size = False
                    for elem in self.shape:
                        if elem < 0:
                            has_batch_size = True
                            break
                    if not has_batch_size:
                        other_var = create_tensor(
286
                            current_block(self),
Y
Yang Yu 已提交
287 288 289 290 291 292 293
                            other_var,
                            dtype=lhs_dtype,
                            shape=self.shape)
                    else:
                        other_var = create_tensor_with_batchsize(
                            self, other_var, lhs_dtype)
                else:
294
                    # add fill_op to current_block
Y
Yang Yu 已提交
295
                    other_var = create_scalar(
296
                        current_block(self), value=other_var, dtype=lhs_dtype)
Y
Yang Yu 已提交
297 298 299 300 301 302 303 304 305

            rhs_dtype = safe_get_dtype(other_var)
            if lhs_dtype != rhs_dtype:
                other_var = astype(other_var, lhs_dtype)
            if reverse:
                tmp = self
                self = other_var
                other_var = tmp

306 307 308 309 310 311
            # NOTE(zhiqiu): the output of compare operator should be bool.
            if method_name in compare_ops:
                out = create_new_tmp_var(current_block(self), dtype="bool")
            else:
                out = create_new_tmp_var(current_block(self), dtype=lhs_dtype)

312 313
            axis = -1
            if other_var.shape[0] == -1:
314 315 316
                stack = inspect.stack()[1]
                file_name = stack[1]
                line_num = stack[2]
317
                warnings.warn(
318 319 320 321 322
                    "%s:%s\nThe behavior of expression %s has been unified with %s(X, Y, axis=-1) from Paddle 2.0. "
                    "If your code works well in the older versions but crashes in this version, try to use "
                    "%s(X, Y, axis=0) instead of %s. This transitional warning will be dropped in the future."
                    % (file_name, line_num, EXPRESSION_MAP[method_name],
                       op_type, op_type, EXPRESSION_MAP[method_name]))
323
            current_block(self).append_op(
Y
Yang Yu 已提交
324 325 326
                type=op_type,
                inputs={'X': [self],
                        'Y': [other_var]},
327
                outputs={'Out': out},
328
                attrs={'axis': axis})
Y
Yang Yu 已提交
329 330 331 332 333 334 335 336
            return out

        comment = OpProtoHolder.instance().get_op_proto(op_type).comment

        __impl__.__doc__ = """
        {0}
        Args:
            self(Variable): left hand variable
337
            other_var(Variable|float|int): right hand variable
Y
Yang Yu 已提交
338 339 340 341 342 343 344

        Returns:
            Variable
        """.format(comment)
        __impl__.__name__ = method_name
        return __impl__

345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366
    variable_methods = [
        #   b=-a
        ('__neg__', _neg_),
        ('astype', astype),
        ('__add__', _binary_creator_('__add__', 'elementwise_add', False,
                                     _scalar_add_)),
        #  a+b == b+a. Do not need to reverse explicitly
        ('__radd__',
         _binary_creator_('__radd__', 'elementwise_add', False, _scalar_add_)),
        ('__sub__', _binary_creator_('__sub__', 'elementwise_sub', False,
                                     _scalar_sub_)),
        ('__rsub__', _binary_creator_('__rsub__', 'elementwise_sub', True,
                                      _scalar_rsub_)),
        ('__mul__', _binary_creator_('__mul__', 'elementwise_mul', False,
                                     _scalar_mul_)),
        #  a*b == b*a. Do not need to reverse explicitly
        ('__rmul__',
         _binary_creator_('__rmul__', 'elementwise_mul', False, _scalar_mul_)),
        ('__pow__', _binary_creator_('__pow__', 'elementwise_pow', False,
                                     None)),
        ('__rpow__', _binary_creator_('__rpow__', 'elementwise_pow', True,
                                      None)),
367 368 369 370 371 372 373 374
        # These binary use paddle.optype
        ('__div__', _binary_method_creator_('divide', False)),
        ('__rdiv__', _binary_method_creator_('divide', True)),
        ('__truediv__', _binary_method_creator_('divide', False)),
        ('__rtruediv__', _binary_method_creator_('divide', True)),
        ('__floordiv__', _binary_method_creator_('floor_divide', False)),
        ('__rfloordiv__', _binary_method_creator_('floor_divide', True)),
        ('__mod__', _binary_method_creator_('remainder', False)),
375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397
        #  for logical compare
        ('__eq__', _binary_creator_('__eq__', 'equal', False, None)),
        ('__ne__', _binary_creator_('__ne__', 'not_equal', False, None)),
        ('__lt__', _binary_creator_('__lt__', 'less_than', False, None)),
        ('__le__', _binary_creator_('__le__', 'less_equal', False, None)),
        ('__gt__', _binary_creator_('__gt__', 'greater_than', False, None)),
        ('__ge__', _binary_creator_('__ge__', 'greater_equal', False, None))
    ]

    global _already_patch_variable
    if not _already_patch_variable:
        for method in variable_methods:
            method_name = method[0]
            method_impl = method[1]
            setattr(Variable, method_name, method_impl)
    else:
        import paddle.tensor
        for method_name in common_methods:
            if hasattr(Variable, method_name): continue
            method_impl = getattr(paddle.tensor, method_name, None)
            if method_impl: setattr(Variable, method_name, method_impl)

    _already_patch_variable = True