math_op_patch.py 20.1 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
2
#
Y
Yang Yu 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
6
#
Y
Yang Yu 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
8
#
Y
Yang Yu 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15
import warnings
16 17
import inspect

18
from .. import core
19
from ..framework import Variable, unique_name, static_only
20
from .layer_function_generator import OpProtoHolder
21
from paddle.fluid.dygraph.base import in_declarative_mode
Y
Yang Yu 已提交
22

23
_supported_int_dtype_ = [
24
    core.VarDesc.VarType.BOOL,
25 26 27 28 29 30 31
    core.VarDesc.VarType.UINT8,
    core.VarDesc.VarType.INT8,
    core.VarDesc.VarType.INT16,
    core.VarDesc.VarType.INT32,
    core.VarDesc.VarType.INT64,
]

32 33
compare_ops = ['__eq__', '__ne__', '__lt__', '__le__', '__gt__', '__ge__']

34 35 36 37 38 39 40
EXPRESSION_MAP = {
    "__add__": "A + B",
    "__radd__": "A += B",
    "__sub__": "A - B",
    "__rsub__": "A -= B",
    "__mul__": "A * B",
    "__rmul__": "A *= B",
41
    "__div__": "A / B",
42
    "__truediv__": "A / B",
43
    "__rdiv__": "A /= B",
44 45 46 47 48
    "__rtruediv__": "A /= B",
    "__pow__": "A ** B",
    "__rpow__": "A **= B",
    "__floordiv__": "A //B",
    "__mod__": "A % B",
49
    "__matmul__": "A @ B",
50 51 52 53 54
    "__eq__": "A == B",
    "__ne__": "A != B",
    "__lt__": "A < B",
    "__le__": "A <= B",
    "__gt__": "A > B",
55
    "__ge__": "A >= B",
56 57
}

58 59
_already_patch_variable = False

Y
Yang Yu 已提交
60 61

def monkey_patch_variable():
Y
Yang Yu 已提交
62
    def unique_tmp_name():
Y
Yu Yang 已提交
63
        return unique_name.generate("tmp")
Y
Yang Yu 已提交
64 65 66 67 68 69 70 71

    def safe_get_dtype(var):
        try:
            dtype = var.dtype
        except:
            raise ValueError("Cannot get data type from %s", var.name)
        return dtype

72
    def current_block(var):
73
        return var.block.program.current_block()
74 75 76 77 78

    def create_new_tmp_var(block, dtype):
        tmp_name = unique_tmp_name()
        return block.create_var(name=tmp_name, dtype=dtype)

79 80 81 82
    def create_new_tmp_sparse_var(block, dtype, type):
        tmp_name = unique_tmp_name()
        return block.create_var(name=tmp_name, dtype=dtype, type=type)

Y
Yang Yu 已提交
83 84
    def create_tensor(block, value, dtype, shape):
        value = float(value)
85
        var = create_new_tmp_var(block, dtype)
86 87 88 89 90 91 92 93 94 95 96
        block.append_op(
            type="fill_constant",
            outputs={'Out': [var]},
            attrs={
                'dtype': var.dtype,
                'shape': shape,
                'value': value,
                'force_cpu': False,
            },
            stop_gradient=True,
        )
H
Hongyu Liu 已提交
97
        var.stop_gradient = True
Y
Yang Yu 已提交
98 99
        return var

Y
Yang Yu 已提交
100
    def create_scalar(block, value, dtype):
101 102
        # TODO(zhouwei): will change to [] which is 0-D Tensor
        return create_tensor(block, value, dtype, shape=[1])
Y
Yang Yu 已提交
103

Y
Yang Yu 已提交
104 105 106
    def create_tensor_with_batchsize(ref_var, value, dtype):
        assert isinstance(ref_var, Variable)
        value = float(value)
107 108
        block = current_block(ref_var)
        var = create_new_tmp_var(block, dtype)
109
        batch_dim = -1
110
        out_shape = []
111 112
        for i, d in enumerate(ref_var.shape):
            if d < 0:
113 114 115 116 117 118 119
                if batch_dim < 0:
                    batch_dim = i
                    out_shape.append(d)
                else:
                    out_shape.append(1)
            else:
                out_shape.append(d)
120
        assert batch_dim != -1
121 122 123 124 125 126 127 128 129 130 131 132
        block.append_op(
            type='fill_constant_batch_size_like',
            outputs={'Out': [var]},
            inputs={'Input': [ref_var]},
            attrs={
                'shape': out_shape,
                'value': value,
                'input_dim_idx': batch_dim,
                'output_dim_idx': batch_dim,
            },
            stop_gradient=True,
        )
H
Hongyu Liu 已提交
133 134

        var.stop_gradient = True
Y
Yang Yu 已提交
135 136
        return var

137 138
    @static_only
    def cpu(self):
139
        """
140 141 142
        Variable should not have cpu() and cuda() interface.
        But this interface can greatly facilitate dy2static.
        We do nothing here.
143 144 145 146 147
        """
        return self

    @static_only
    def cuda(self):
148
        """
149 150 151
        Variable should not have cpu() and cuda() interface.
        But this interface can greatly facilitate dy2static.
        We do nothing here.
152 153 154
        """
        return self

155 156 157
    @static_only
    def place(self):
        """
158
        Variable don't have 'place' interface in static graph mode
159 160 161 162
        But this interface can greatly facilitate dy2static.
        So we give a warnning here and return None.
        """
        warnings.warn(
163
            "Variable do not have 'place' interface for static graph mode, try not to use it. None will be returned."
164 165 166
        )
        return None

Y
Yang Yu 已提交
167 168
    def astype(self, dtype):
        """
J
Jiabin Yang 已提交
169 170 171
        **Notes**:
            **The variable must be a** :ref:`api_fluid_Tensor`

Y
Yang Yu 已提交
172
        Cast a variable to a specified data type.
J
Jiabin Yang 已提交
173

Y
Yang Yu 已提交
174
        Args:
J
Jiabin Yang 已提交
175

Y
Yang Yu 已提交
176
            self(Variable): The source variable
J
Jiabin Yang 已提交
177 178

            dtype: The target data type
Y
Yang Yu 已提交
179 180

        Returns:
J
Jiabin Yang 已提交
181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210
            Variable: Variable with new dtype

        Examples:
            In Static Graph Mode:

            .. code-block:: python

                import paddle.fluid as fluid

                startup_prog = fluid.Program()
                main_prog = fluid.Program()
                with fluid.program_guard(startup_prog, main_prog):
                    original_variable = fluid.data(name = "new_variable", shape=[2,2], dtype='float32')
                    new_variable = original_variable.astype('int64')
                    print("new var's dtype is: {}".format(new_variable.dtype))

            In Dygraph Mode:

            .. code-block:: python

                import paddle.fluid as fluid
                import numpy as np

                x = np.ones([2, 2], np.float32)
                with fluid.dygraph.guard():
                    original_variable = fluid.dygraph.to_variable(x)
                    print("original var's dtype is: {}, numpy dtype is {}".format(original_variable.dtype, original_variable.numpy().dtype))
                    new_variable = original_variable.astype('int64')
                    print("new var's dtype is: {}, numpy dtype is {}".format(new_variable.dtype, new_variable.numpy().dtype))

Y
Yang Yu 已提交
211
        """
212 213
        block = current_block(self)
        out = create_new_tmp_var(block, dtype)
214 215 216 217 218 219
        block.append_op(
            type="cast",
            inputs={"X": [self]},
            outputs={"Out": [out]},
            attrs={"in_dtype": self.dtype, "out_dtype": out.dtype},
        )
220
        out.stop_gradient = self.stop_gradient
Y
Yang Yu 已提交
221 222
        return out

223 224 225
    @static_only
    def append(self, var):
        """
226 227
        **Notes**:
           **The type variable must be LoD Tensor Array.
228

229 230
        """
        if not isinstance(var, Variable):
231
            if in_declarative_mode():
232
                """in dy2static mode, x may be tensorable values such as int, float, np.array"""
233
                from paddle.tensor.creation import to_tensor
234

235 236 237
                var = to_tensor(var)
            else:
                raise TypeError(
238 239 240 241
                    "Required input var should be Variable, but received {}".format(
                        type(var)
                    )
                )
242 243
        if self.type != core.VarDesc.VarType.LOD_TENSOR_ARRAY:
            raise TypeError(
244 245 246 247
                "Only Variable with VarType.LOD_TENSOR_ARRAY support `append` method, but received type: {}".format(
                    self.type
                )
            )
248
        from paddle.tensor.array import array_length, array_write
249

250 251
        array_write(x=var, i=array_length(self), array=self)

252 253
    @static_only
    def _item(self):
254 255
        """
        In order to be compatible with the item interface introduced by the dynamic graph, it does nothing but returns self.
256 257 258 259
        It will check that the shape must be a 1-D tensor
        """
        if len(self.shape) > 1:
            raise TypeError(
260 261 262 263
                "Required input var should be 1-D Variable, but received {}".format(
                    self.shape
                )
            )
264 265
        return self

266 267 268
    @static_only
    def pop(self, *args):
        """
269
        The type variable must be LoD Tensor Array.
270
        When self is LoDTensorArray, calling pop is similar to Python's pop on list.
271 272 273 274 275 276 277
        This interface is used to simplify dygraph to static graph operations.

        Args:
            self(Variable): The source variable, which must be LOD_TENSOR_ARRAY
            *args: optional, a int means index.
        Returns:
            Variable: self[index]
278
        """
279
        from paddle.jit.dy2static.convert_operators import (
280 281 282
            _run_paddle_pop,
        )

283 284
        if self.type != core.VarDesc.VarType.LOD_TENSOR_ARRAY:
            raise TypeError(
285 286 287 288
                "Only Variable with VarType.LOD_TENSOR_ARRAY support `append` method, but received type: {}".format(
                    self.type
                )
            )
289 290
        return _run_paddle_pop(self, *args)

291
    def _scalar_op_(var, scale, bias):
292 293
        block = current_block(var)
        out = create_new_tmp_var(block, var.dtype)
294 295 296 297 298 299
        block.append_op(
            type="scale",
            inputs={"X": [var]},
            outputs={"Out": [out]},
            attrs={"scale": scale, "bias": bias},
        )
300 301
        return out

302
    def _neg_(var):
303
        return _scalar_op_(var, -1.0, 0.0)
304

305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326
    @property
    def _ndim_(self):
        """
        Returns the dimension of current Variable

        Returns:
            the dimension

        Examples:
            .. code-block:: python

                import paddle

                paddle.enable_static()

                # create a static Variable
                x = paddle.static.data(name='x', shape=[3, 2, 1])
                # print the dimension of the Variable
                print(x.ndim)
        """
        return len(self.shape)

327 328
    def _scalar_add_(var, value):
        return _scalar_op_(var, 1.0, value)
329

330 331
    def _scalar_sub_(var, value):
        return _scalar_op_(var, 1.0, -value)
332

333 334
    def _scalar_rsub_(var, value):
        return _scalar_op_(var, -1.0, value)
335

336 337
    def _scalar_mul_(var, value):
        return _scalar_op_(var, value, 0.0)
338

339 340 341
    def _scalar_div_(var, value):
        return _scalar_op_(var, 1.0 / value, 0.0)

342 343 344
    def _binary_creator_(
        method_name, op_type, reverse=False, scalar_method=None
    ):
Y
Yang Yu 已提交
345
        def __impl__(self, other_var):
346 347 348 349 350 351 352 353 354
            # 1. scalar exists cases
            # we need combine the tensor.dtype and scalar.dtype, cast correct object
            if isinstance(other_var, float):
                # in all cases(+, -, *, /, **, //, %), we need cast tensor.dtype to float
                if self.dtype in _supported_int_dtype_:
                    self = astype(self, 'float32')
                # here use `scale` replace `elementwise` to get better performance
                # but only +, -, *, / can use this method
                if scalar_method is not None:
355
                    return scalar_method(self, other_var)
356 357 358 359 360 361
            elif isinstance(other_var, int):
                # in all cases(+, -, *, /, **, //, %), we can cast it to float
                # because the output tensor.dtype depend on the type of input tensor
                other_var = float(other_var)
                # division is a special case
                # NOTE(chenweihang): because we cast tensor to float32 instead float64,
362 363 364
                # the division result can only guarantee the numerical accuracy of 6 digits
                # after the decimal point. The result of numpy calculation is of float64 type,
                # so the calculation result here and the calculation result of numpy are
365 366
                # different after 6 decimal point. If necessary, we can also use float64 here.
                # torch's behavior here is consistent with ours
367 368 369 370
                if (
                    op_type == 'elementwise_div'
                    and self.dtype in _supported_int_dtype_
                ):
371 372
                    self = astype(self, 'float32')
                # here use `scale` replace `elementwise` to get better performance
373
                # but only +, -, *, / can use this method
374 375 376 377 378
                if scalar_method is not None:
                    return scalar_method(self, other_var)
            else:
                # do nothing
                pass
379

380
            # 2. create variable for scalar
Y
Yang Yu 已提交
381 382 383 384 385 386 387 388 389
            lhs_dtype = safe_get_dtype(self)
            if not isinstance(other_var, Variable):
                if reverse:
                    has_batch_size = False
                    for elem in self.shape:
                        if elem < 0:
                            has_batch_size = True
                            break
                    if not has_batch_size:
390 391 392 393 394 395
                        other_var = create_tensor(
                            current_block(self),
                            other_var,
                            dtype=lhs_dtype,
                            shape=self.shape,
                        )
Y
Yang Yu 已提交
396 397
                    else:
                        other_var = create_tensor_with_batchsize(
398 399
                            self, other_var, lhs_dtype
                        )
Y
Yang Yu 已提交
400
                else:
401
                    # add fill_op to current_block
402 403 404
                    other_var = create_scalar(
                        current_block(self), value=other_var, dtype=lhs_dtype
                    )
Y
Yang Yu 已提交
405

406
            # 3. unify right var type to left var
Y
Yang Yu 已提交
407 408 409 410 411 412 413 414
            rhs_dtype = safe_get_dtype(other_var)
            if lhs_dtype != rhs_dtype:
                other_var = astype(other_var, lhs_dtype)
            if reverse:
                tmp = self
                self = other_var
                other_var = tmp

415 416 417 418 419 420
            # NOTE(zhiqiu): the output of compare operator should be bool.
            if method_name in compare_ops:
                out = create_new_tmp_var(current_block(self), dtype="bool")
            else:
                out = create_new_tmp_var(current_block(self), dtype=lhs_dtype)

421
            axis = -1
422
            if other_var.ndim > 0 and other_var.shape[0] == -1:
423 424 425
                stack = inspect.stack()[1]
                file_name = stack[1]
                line_num = stack[2]
426
                warnings.warn(
427 428 429
                    "%s:%s\nThe behavior of expression %s has been unified with %s(X, Y, axis=-1) from Paddle 2.0. "
                    "If your code works well in the older versions but crashes in this version, try to use "
                    "%s(X, Y, axis=0) instead of %s. This transitional warning will be dropped in the future."
430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445
                    % (
                        file_name,
                        line_num,
                        EXPRESSION_MAP[method_name],
                        op_type,
                        op_type,
                        EXPRESSION_MAP[method_name],
                    ),
                    category=DeprecationWarning,
                )
            current_block(self).append_op(
                type=op_type,
                inputs={'X': [self], 'Y': [other_var]},
                outputs={'Out': out},
                attrs={'axis': axis},
            )
Y
Yang Yu 已提交
446 447 448 449 450 451 452 453
            return out

        comment = OpProtoHolder.instance().get_op_proto(op_type).comment

        __impl__.__doc__ = """
        {0}
        Args:
            self(Variable): left hand variable
454
            other_var(Variable|float|int): right hand variable
Y
Yang Yu 已提交
455 456 457

        Returns:
            Variable
458 459 460
        """.format(
            comment
        )
Y
Yang Yu 已提交
461 462 463
        __impl__.__name__ = method_name
        return __impl__

464 465 466
    def values(var):
        block = current_block(var)
        out = create_new_tmp_var(block, var.dtype)
467 468 469 470 471 472
        block.append_op(
            type="sparse_values",
            inputs={"x": [var]},
            outputs={"out": [out]},
            attrs={},
        )
473 474 475 476 477
        return out

    def indices(var):
        block = current_block(var)
        out = create_new_tmp_var(block, var.dtype)
478 479 480 481 482 483
        block.append_op(
            type="sparse_indices",
            inputs={"x": [var]},
            outputs={"out": [out]},
            attrs={},
        )
484 485 486 487 488
        return out

    def to_dense(var):
        block = current_block(var)
        out = create_new_tmp_var(block, var.dtype)
489 490 491 492 493 494
        block.append_op(
            type="sparse_to_dense",
            inputs={"x": [var]},
            outputs={"out": [out]},
            attrs={},
        )
495 496
        return out

497 498 499 500
    variable_methods = [
        #   b=-a
        ('__neg__', _neg_),
        ('astype', astype),
501 502
        ('cpu', cpu),
        ('cuda', cuda),
503
        ('place', place),
504
        ('append', append),
505
        ('item', _item),
506
        ('pop', pop),
507 508 509
        ('dim', lambda x: len(x.shape)),
        ('ndimension', lambda x: len(x.shape)),
        ('ndim', _ndim_),
510 511 512 513
        (
            '__add__',
            _binary_creator_('__add__', 'elementwise_add', False, _scalar_add_),
        ),
514
        #  a+b == b+a. Do not need to reverse explicitly
515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534
        (
            '__radd__',
            _binary_creator_(
                '__radd__', 'elementwise_add', False, _scalar_add_
            ),
        ),
        (
            '__sub__',
            _binary_creator_('__sub__', 'elementwise_sub', False, _scalar_sub_),
        ),
        (
            '__rsub__',
            _binary_creator_(
                '__rsub__', 'elementwise_sub', True, _scalar_rsub_
            ),
        ),
        (
            '__mul__',
            _binary_creator_('__mul__', 'elementwise_mul', False, _scalar_mul_),
        ),
535
        #  a*b == b*a. Do not need to reverse explicitly
536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581
        (
            '__rmul__',
            _binary_creator_(
                '__rmul__', 'elementwise_mul', False, _scalar_mul_
            ),
        ),
        (
            '__div__',
            _binary_creator_('__div__', 'elementwise_div', False, _scalar_div_),
        ),
        (
            '__truediv__',
            _binary_creator_(
                '__truediv__', 'elementwise_div', False, _scalar_div_
            ),
        ),
        (
            '__rdiv__',
            _binary_creator_('__rdiv__', 'elementwise_div', True, None),
        ),
        (
            '__rtruediv__',
            _binary_creator_('__rtruediv__', 'elementwise_div', True, None),
        ),
        (
            '__pow__',
            _binary_creator_('__pow__', 'elementwise_pow', False, None),
        ),
        (
            '__rpow__',
            _binary_creator_('__rpow__', 'elementwise_pow', True, None),
        ),
        (
            '__floordiv__',
            _binary_creator_(
                '__floordiv__', 'elementwise_floordiv', False, None
            ),
        ),
        (
            '__mod__',
            _binary_creator_('__mod__', 'elementwise_mod', False, None),
        ),
        (
            '__matmul__',
            _binary_creator_('__matmul__', "matmul_v2", False, None),
        ),
582 583 584 585 586 587
        #  for logical compare
        ('__eq__', _binary_creator_('__eq__', 'equal', False, None)),
        ('__ne__', _binary_creator_('__ne__', 'not_equal', False, None)),
        ('__lt__', _binary_creator_('__lt__', 'less_than', False, None)),
        ('__le__', _binary_creator_('__le__', 'less_equal', False, None)),
        ('__gt__', _binary_creator_('__gt__', 'greater_than', False, None)),
588 589 590 591
        ('__ge__', _binary_creator_('__ge__', 'greater_equal', False, None)),
        ('values', values),
        ('indices', indices),
        ('to_dense', to_dense),
592 593 594 595 596 597 598 599 600 601
    ]

    global _already_patch_variable
    if not _already_patch_variable:
        for method in variable_methods:
            method_name = method[0]
            method_impl = method[1]
            setattr(Variable, method_name, method_impl)
    else:
        import paddle.tensor
602

603
        for method_name in paddle.tensor.tensor_method_func:
604 605
            if hasattr(Variable, method_name):
                continue
606
            method_impl = getattr(paddle.tensor, method_name, None)
607 608
            if method_impl:
                setattr(Variable, method_name, method_impl)
609

610 611
        for magic_method, origin_method in paddle.tensor.magic_method_func:
            impl = getattr(paddle.tensor, origin_method, None)
612 613
            if impl:
                setattr(Variable, magic_method, impl)
614

615
    _already_patch_variable = True