math_op_patch.py 13.2 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from __future__ import print_function

from .. import core
C
chentianyu03 已提交
18
from ..framework import Variable, convert_np_dtype_to_dtype_, _varbase_creator
19
from ..layers.layer_function_generator import OpProtoHolder
20
from . import no_grad
21

22 23
import numpy as np
import six
24
import warnings
25

26 27 28 29 30 31
_supported_int_dtype_ = [
    core.VarDesc.VarType.UINT8,
    core.VarDesc.VarType.INT8,
    core.VarDesc.VarType.INT16,
    core.VarDesc.VarType.INT32,
    core.VarDesc.VarType.INT64,
32
    core.VarDesc.VarType.BOOL,
33 34
]

35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53
# NOTE(chenweihang): We currently do not fully support the type promotion 
# between tensors. Parting support here is because the interoperation of 
# real and complex numbers in paddle quantum is very frequent, such as the 
# binary operation between `float` and `complex64`, so we must support the 
# correct type promotion on the APIs paddle quantum used.
# Now only check in dygraph (paddle quantum based dygraph)
# Full type promotion support will need to be fully verified later.
_supported_promote_complex_types_ = [
    '__add__',
    '__radd__',
    '__sub__',
    '__rsub__',
    '__mul__',
    '__rmul__',
    '__truediv__',
    '__rtruediv__',
    '__matmul__',
]

54 55 56 57 58
_complex_dtypes = [
    core.VarDesc.VarType.COMPLEX64,
    core.VarDesc.VarType.COMPLEX128,
]

59 60
_already_patch_varbase = False

61 62 63 64 65 66 67

def monkey_patch_math_varbase():
    """
    Similar to monkey_patch_variable.
    The difference is, in dygraph mode, use auto-generated op functions for better performance.
    """

68
    @no_grad
69
    def create_tensor(value, dtype, shape):
70 71 72 73 74
        out = _varbase_creator(dtype=dtype)
        out = core.ops.fill_constant(out, 'dtype', dtype, 'shape', shape,
                                     'value', value, 'force_cpu', False)
        out.stop_gradient = True
        return out
75 76 77 78 79 80 81

    def create_scalar(value, dtype):
        return create_tensor(value, dtype, shape=[1])

    def astype(self, dtype):
        """

82
        Cast a Tensor to a specified data type.
83 84

        Args:
85
            dtype: The target data type.
86 87

        Returns:
88
            Tensor: a new Tensor with target dtype
89 90 91 92

        Examples:
            .. code-block:: python

93
                import paddle
94 95
                import numpy as np

96 97 98 99
                original_tensor = paddle.ones([2, 2])
                print("original tensor's dtype is: {}".format(original_tensor.dtype))
                new_tensor = original_tensor.astype('float32')
                print("new tensor's dtype is: {}".format(new_tensor.dtype))
100 101

        """
102 103 104
        if not isinstance(dtype, core.VarDesc.VarType):
            dtype = convert_np_dtype_to_dtype_(dtype)
        return core.ops.cast(self, 'in_dtype', self.dtype, 'out_dtype', dtype)
105 106

    def _scalar_elementwise_op_(var, scale, bias):
107
        return core.ops.scale(var, 'scale', scale, 'bias', bias)
108

109 110 111
    def _neg_(var):
        return _scalar_elementwise_op_(var, -1.0, 0.0)

112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148
    def _float_(var):
        numel = np.prod(var.shape)
        assert numel == 1, "only one element variable can be converted to float."
        tensor = var.value().get_tensor()
        assert tensor._is_initialized(), "variable's tensor is not initialized"
        return float(var.numpy().flatten()[0])

    def _long_(var):
        numel = np.prod(var.shape)
        assert numel == 1, "only one element variable can be converted to long."
        tensor = var.value().get_tensor()
        assert tensor._is_initialized(), "variable's tensor is not initialized"
        if six.PY2:
            return long(var.numpy().flatten()[0])
        else:
            return int(var.numpy().flatten()[0])

    def _int_(var):
        numel = np.prod(var.shape)
        assert numel == 1, "only one element variable can be converted to int."
        tensor = var.value().get_tensor()
        assert tensor._is_initialized(), "variable's tensor is not initialized"
        return int(var.numpy().flatten()[0])

    def _len_(var):
        return var.shape[0]

    def _index_(var):
        numel = np.prod(var.shape)
        assert numel == 1, "only one element variable can be converted to python index."
        tensor = var.value().get_tensor()
        assert tensor._is_initialized(), "variable's tensor is not initialized"
        if six.PY2:
            return long(var.numpy().flatten()[0])
        else:
            return int(var.numpy().flatten()[0])

149 150 151 152
    @property
    def _ndim_(var):
        return len(var.shape)

153 154 155 156
    @property
    def _size_(var):
        return np.prod(var.shape)

157
    def _scalar_add_(var, value):
158 159
        return _scalar_elementwise_op_(var, 1.0, value)

160
    def _scalar_sub_(var, value):
161 162
        return _scalar_elementwise_op_(var, 1.0, -value)

163
    def _scalar_rsub_(var, value):
164 165
        return _scalar_elementwise_op_(var, -1.0, value)

166
    def _scalar_mul_(var, value):
167 168
        return _scalar_elementwise_op_(var, value, 0.0)

169 170 171 172 173
    # for binary operator such as elementwise, compare
    def _binary_creator_(method_name,
                         op_type,
                         reverse=False,
                         scalar_method=None):
174
        def __impl__(self, other_var):
175 176 177 178 179 180 181 182 183
            # 1. scalar exists cases
            # we need combine the tensor.dtype and scalar.dtype, cast correct object
            if isinstance(other_var, float):
                # in all cases(+, -, *, /, **, //, %), we need cast tensor.dtype to float
                if self.dtype in _supported_int_dtype_:
                    self = astype(self, 'float32')
                # here use `scale` replace `elementwise` to get better performance
                # but only +, -, *, / can use this method
                if scalar_method is not None:
184
                    return scalar_method(self, other_var)
185 186 187 188 189 190 191 192 193 194 195 196 197 198
            elif isinstance(other_var, int):
                # in all cases(+, -, *, /, **, //, %), we can cast it to float
                # because the output tensor.dtype depend on the type of input tensor
                other_var = float(other_var)
                # division is a special case
                # NOTE(chenweihang): because we cast tensor to float32 instead float64,
                # the division result can only guarantee the numerical accuracy of 6 digits 
                # after the decimal point. The result of numpy calculation is of float64 type, 
                # so the calculation result here and the calculation result of numpy are 
                # different after 6 decimal point. If necessary, we can also use float64 here.
                # torch's behavior here is consistent with ours
                if op_type == 'elementwise_div' and self.dtype in _supported_int_dtype_:
                    self = astype(self, 'float32')
                # here use `scale` replace `elementwise` to get better performance
199 200 201 202
                # but only +, -, * can use this method
                # NOTE(chentianyu03): / can not use `scale` method,because the result of
                # `scale` method (self*(1/other_var)) do not exactly equal with the result 
                # of `elementwise_div` method.
203 204 205 206 207
                if scalar_method is not None:
                    return scalar_method(self, other_var)
            else:
                # do nothing
                pass
208

209
            # 2. create varbase for scalar
210
            lhs_dtype = self.dtype
211 212 213 214 215 216 217 218
            if not isinstance(other_var, core.VarBase):
                if reverse:
                    other_var = create_tensor(
                        other_var, dtype=lhs_dtype, shape=self.shape)
                else:
                    # add fill_op 
                    other_var = create_scalar(value=other_var, dtype=lhs_dtype)

219
            # 3. promote types or unify right var type to left var
220
            rhs_dtype = other_var.dtype
221
            if lhs_dtype != rhs_dtype:
222 223 224
                if method_name in _supported_promote_complex_types_ and (
                        lhs_dtype in _complex_dtypes or
                        rhs_dtype in _complex_dtypes):
225 226 227 228 229 230 231 232 233 234
                    # only when lhs_dtype or rhs_dtype is complex type,
                    # the dtype will promote, in other cases, directly
                    # use lhs_dtype, this is consistent will original rule
                    promote_dtype = core._promote_types_if_complex_exists(
                        lhs_dtype, rhs_dtype)
                    self = self if lhs_dtype == promote_dtype else astype(
                        self, promote_dtype)
                    other_var = other_var if rhs_dtype == promote_dtype else astype(
                        other_var, promote_dtype)
                else:
235 236 237
                    warnings.warn(
                        'The dtype of left and right variables are not the same, left dtype is {}, but right dtype is {}, the right dtype will convert to {}'.
                        format(lhs_dtype, rhs_dtype, lhs_dtype))
238 239
                    other_var = astype(other_var, lhs_dtype)

240 241 242 243 244
            if reverse:
                tmp = self
                self = other_var
                other_var = tmp

245
            # 4. calculation
246
            axis = -1
247
            math_op = getattr(core.ops, op_type)
L
Leo Chen 已提交
248
            return math_op(self, other_var, 'axis', axis)
249 250 251 252 253 254

        comment = OpProtoHolder.instance().get_op_proto(op_type).comment

        __impl__.__doc__ = """
        {0}
        Args:
255
            other_var(Tensor|float|int): right hand Tensor
256 257

        Returns:
258
            Tensor
259 260 261 262
        """.format(comment)
        __impl__.__name__ = method_name
        return __impl__

263 264 265 266 267 268 269 270 271 272 273
    varbase_methods = [
        ('__neg__', _neg_),
        ('__float__', _float_),
        ('__long__', _long_),
        ('__int__', _int_),
        ('__len__', _len_),
        ('__index__', _index_),
        ('astype', astype),
        ('dim', lambda x: len(x.shape)),
        ('ndimension', lambda x: len(x.shape)),
        ('ndim', _ndim_),
274
        ('size', _size_),
275 276 277 278 279 280 281 282 283 284 285 286 287 288
        ('__add__',
         _binary_creator_('__add__', 'elementwise_add', False, _scalar_add_)),
        ##  a+b == b+a. Do not need to reverse explicitly
        ('__radd__',
         _binary_creator_('__radd__', 'elementwise_add', False, _scalar_add_)),
        ('__sub__', _binary_creator_('__sub__', 'elementwise_sub', False,
                                     _scalar_sub_)),
        ('__rsub__', _binary_creator_('__rsub__', 'elementwise_sub', True,
                                      _scalar_rsub_)),
        ('__mul__', _binary_creator_('__mul__', 'elementwise_mul', False,
                                     _scalar_mul_)),
        ## a*b == b*a. Do not need to reverse explicitly
        ('__rmul__',
         _binary_creator_('__rmul__', 'elementwise_mul', False, _scalar_mul_)),
S
ShenLiang 已提交
289
        ('__truediv__', _binary_creator_('__truediv__', 'elementwise_div',
290
                                         False, None)),
291 292 293 294 295 296
        ('__rtruediv__', _binary_creator_('rtruediv__', 'elementwise_div', True,
                                          None)),
        ('__pow__', _binary_creator_('__pow__', 'elementwise_pow', False,
                                     None)),
        ('__rpow__', _binary_creator_('__rpow__', 'elementwise_pow', True,
                                      None)),
S
ShenLiang 已提交
297 298 299 300
        ('__floordiv__', _binary_creator_('__floordiv__',
                                          'elementwise_floordiv', False, None)),
        ('__mod__', _binary_creator_('__mod__', 'elementwise_mod', False,
                                     None)),
301 302
        ('__matmul__', _binary_creator_('__matmul__', "matmul_v2", False,
                                        None)),
303 304 305 306 307 308 309
        ## for logical compare
        ('__eq__', _binary_creator_('__eq__', 'equal', False, None)),
        ('__ne__', _binary_creator_('__ne__', 'not_equal', False, None)),
        ('__lt__', _binary_creator_('__lt__', 'less_than', False, None)),
        ('__le__', _binary_creator_('__le__', 'less_equal', False, None)),
        ('__gt__', _binary_creator_('__gt__', 'greater_than', False, None)),
        ('__ge__', _binary_creator_('__ge__', 'greater_equal', False, None)),
310
        ('__array_ufunc__', None)
311 312 313 314 315 316 317 318 319 320
    ]

    global _already_patch_varbase
    if not _already_patch_varbase:
        for method in varbase_methods:
            method_name = method[0]
            method_impl = method[1]
            setattr(core.VarBase, method_name, method_impl)
    else:
        import paddle.tensor
321
        # Tensor method from module paddle.tensor
322
        for method_name in paddle.tensor.tensor_method_func:
323 324 325 326
            if hasattr(core.VarBase, method_name): continue
            method_impl = getattr(paddle.tensor, method_name, None)
            if method_impl: setattr(core.VarBase, method_name, method_impl)

327 328 329 330
        for magic_method, origin_method in paddle.tensor.magic_method_func:
            impl = getattr(paddle.tensor, origin_method, None)
            if impl: setattr(core.VarBase, magic_method, impl)

331
    _already_patch_varbase = True