math_op_patch.py 13.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from __future__ import print_function

from .. import core
C
chentianyu03 已提交
18
from ..framework import Variable, convert_np_dtype_to_dtype_, _varbase_creator
19
from ..layers.layer_function_generator import OpProtoHolder
20
from . import no_grad
21

22
import numpy as np
23
import warnings
W
wanghuancoder 已提交
24
from paddle import _C_ops
25

26 27 28 29 30 31
_supported_int_dtype_ = [
    core.VarDesc.VarType.UINT8,
    core.VarDesc.VarType.INT8,
    core.VarDesc.VarType.INT16,
    core.VarDesc.VarType.INT32,
    core.VarDesc.VarType.INT64,
32
    core.VarDesc.VarType.BOOL,
33 34
]

35 36 37 38 39 40 41 42 43 44 45 46 47 48
# NOTE(chenweihang): We currently do not fully support the type promotion 
# between tensors. Parting support here is because the interoperation of 
# real and complex numbers in paddle quantum is very frequent, such as the 
# binary operation between `float` and `complex64`, so we must support the 
# correct type promotion on the APIs paddle quantum used.
# Now only check in dygraph (paddle quantum based dygraph)
# Full type promotion support will need to be fully verified later.
_supported_promote_complex_types_ = [
    '__add__',
    '__radd__',
    '__sub__',
    '__rsub__',
    '__mul__',
    '__rmul__',
49
    '__div__',
50
    '__truediv__',
51
    '__rdiv__',
52 53 54 55
    '__rtruediv__',
    '__matmul__',
]

56 57 58 59 60
_complex_dtypes = [
    core.VarDesc.VarType.COMPLEX64,
    core.VarDesc.VarType.COMPLEX128,
]

61 62
_already_patch_varbase = False

63 64 65 66 67 68 69

def monkey_patch_math_varbase():
    """
    Similar to monkey_patch_variable.
    The difference is, in dygraph mode, use auto-generated op functions for better performance.
    """

70
    @no_grad
71
    def create_tensor(value, dtype, shape):
72
        out = _varbase_creator(dtype=dtype)
W
wanghuancoder 已提交
73 74
        out = _C_ops.fill_constant(out, 'dtype', dtype, 'shape', shape, 'value',
                                   value, 'force_cpu', False)
75 76
        out.stop_gradient = True
        return out
77 78 79 80 81 82 83

    def create_scalar(value, dtype):
        return create_tensor(value, dtype, shape=[1])

    def astype(self, dtype):
        """

84
        Cast a Tensor to a specified data type.
85 86

        Args:
87
            dtype: The target data type.
88 89

        Returns:
90
            Tensor: a new Tensor with target dtype
91 92 93 94

        Examples:
            .. code-block:: python

95
                import paddle
96 97
                import numpy as np

98 99 100 101
                original_tensor = paddle.ones([2, 2])
                print("original tensor's dtype is: {}".format(original_tensor.dtype))
                new_tensor = original_tensor.astype('float32')
                print("new tensor's dtype is: {}".format(new_tensor.dtype))
102 103

        """
104 105
        if not isinstance(dtype, core.VarDesc.VarType):
            dtype = convert_np_dtype_to_dtype_(dtype)
W
wanghuancoder 已提交
106
        return _C_ops.cast(self, 'in_dtype', self.dtype, 'out_dtype', dtype)
107 108

    def _scalar_elementwise_op_(var, scale, bias):
W
wanghuancoder 已提交
109
        return _C_ops.scale(var, 'scale', scale, 'bias', bias)
110

111 112 113
    def _neg_(var):
        return _scalar_elementwise_op_(var, -1.0, 0.0)

114 115 116 117 118 119 120 121 122 123 124 125
    def _float_(var):
        numel = np.prod(var.shape)
        assert numel == 1, "only one element variable can be converted to float."
        tensor = var.value().get_tensor()
        assert tensor._is_initialized(), "variable's tensor is not initialized"
        return float(var.numpy().flatten()[0])

    def _long_(var):
        numel = np.prod(var.shape)
        assert numel == 1, "only one element variable can be converted to long."
        tensor = var.value().get_tensor()
        assert tensor._is_initialized(), "variable's tensor is not initialized"
T
tianshuo78520a 已提交
126
        return int(var.numpy().flatten()[0])
127 128 129 130 131 132 133 134 135

    def _int_(var):
        numel = np.prod(var.shape)
        assert numel == 1, "only one element variable can be converted to int."
        tensor = var.value().get_tensor()
        assert tensor._is_initialized(), "variable's tensor is not initialized"
        return int(var.numpy().flatten()[0])

    def _len_(var):
S
Steffy-zxf 已提交
136 137 138 139 140 141
        if var.type == core.VarDesc.VarType.VOCAB:
            return len(var.value().get_map_tensor())
        elif var.type == core.VarDesc.VarType.STRINGS:
            return len(var.value().get_string_tensor())
        else:
            return var.shape[0]
142 143 144 145 146 147

    def _index_(var):
        numel = np.prod(var.shape)
        assert numel == 1, "only one element variable can be converted to python index."
        tensor = var.value().get_tensor()
        assert tensor._is_initialized(), "variable's tensor is not initialized"
T
tianshuo78520a 已提交
148
        return int(var.numpy().flatten()[0])
149

150 151 152 153
    @property
    def _ndim_(var):
        return len(var.shape)

154 155 156 157
    @property
    def _size_(var):
        return np.prod(var.shape)

158 159 160 161 162 163 164 165 166 167
    @property
    def _T_(var):
        if len(var.shape) == 1:
            return var
        perm = []
        for i in range(len(var.shape)):
            perm.insert(0, i)
        out, _ = _C_ops.transpose2(var, 'axis', perm)
        return out

168
    def _scalar_add_(var, value):
169 170
        return _scalar_elementwise_op_(var, 1.0, value)

171
    def _scalar_sub_(var, value):
172 173
        return _scalar_elementwise_op_(var, 1.0, -value)

174
    def _scalar_rsub_(var, value):
175 176
        return _scalar_elementwise_op_(var, -1.0, value)

177
    def _scalar_mul_(var, value):
178 179
        return _scalar_elementwise_op_(var, value, 0.0)

180 181 182
    def _scalar_div_(var, value):
        return _scalar_elementwise_op_(var, 1.0 / value, 0.0)

183 184 185 186 187
    # for binary operator such as elementwise, compare
    def _binary_creator_(method_name,
                         op_type,
                         reverse=False,
                         scalar_method=None):
188
        def __impl__(self, other_var):
189 190 191 192 193 194 195 196 197
            # 1. scalar exists cases
            # we need combine the tensor.dtype and scalar.dtype, cast correct object
            if isinstance(other_var, float):
                # in all cases(+, -, *, /, **, //, %), we need cast tensor.dtype to float
                if self.dtype in _supported_int_dtype_:
                    self = astype(self, 'float32')
                # here use `scale` replace `elementwise` to get better performance
                # but only +, -, *, / can use this method
                if scalar_method is not None:
198
                    return scalar_method(self, other_var)
199 200 201 202 203 204 205 206 207 208 209 210 211 212
            elif isinstance(other_var, int):
                # in all cases(+, -, *, /, **, //, %), we can cast it to float
                # because the output tensor.dtype depend on the type of input tensor
                other_var = float(other_var)
                # division is a special case
                # NOTE(chenweihang): because we cast tensor to float32 instead float64,
                # the division result can only guarantee the numerical accuracy of 6 digits 
                # after the decimal point. The result of numpy calculation is of float64 type, 
                # so the calculation result here and the calculation result of numpy are 
                # different after 6 decimal point. If necessary, we can also use float64 here.
                # torch's behavior here is consistent with ours
                if op_type == 'elementwise_div' and self.dtype in _supported_int_dtype_:
                    self = astype(self, 'float32')
                # here use `scale` replace `elementwise` to get better performance
213
                # but only +, -, *, / can use this method
214 215 216 217 218
                if scalar_method is not None:
                    return scalar_method(self, other_var)
            else:
                # do nothing
                pass
219

220
            # 2. create varbase for scalar
221
            lhs_dtype = self.dtype
222
            if not isinstance(other_var, core.VarBase):
223 224 225
                if isinstance(other_var, complex):
                    import paddle
                    other_var = paddle.to_tensor(other_var, dtype='complex64')
226
                else:
227 228 229 230 231 232 233
                    if reverse:
                        other_var = create_tensor(
                            other_var, dtype=lhs_dtype, shape=self.shape)
                    else:
                        # add fill_op
                        other_var = create_scalar(
                            value=other_var, dtype=lhs_dtype)
234

235
            # 3. promote types or unify right var type to left var
236
            rhs_dtype = other_var.dtype
237
            if lhs_dtype != rhs_dtype:
238 239 240
                if method_name in _supported_promote_complex_types_ and (
                        lhs_dtype in _complex_dtypes or
                        rhs_dtype in _complex_dtypes):
241 242 243 244 245 246 247 248 249 250
                    # only when lhs_dtype or rhs_dtype is complex type,
                    # the dtype will promote, in other cases, directly
                    # use lhs_dtype, this is consistent will original rule
                    promote_dtype = core._promote_types_if_complex_exists(
                        lhs_dtype, rhs_dtype)
                    self = self if lhs_dtype == promote_dtype else astype(
                        self, promote_dtype)
                    other_var = other_var if rhs_dtype == promote_dtype else astype(
                        other_var, promote_dtype)
                else:
251 252 253
                    warnings.warn(
                        'The dtype of left and right variables are not the same, left dtype is {}, but right dtype is {}, the right dtype will convert to {}'.
                        format(lhs_dtype, rhs_dtype, lhs_dtype))
254 255
                    other_var = astype(other_var, lhs_dtype)

256 257 258 259 260
            if reverse:
                tmp = self
                self = other_var
                other_var = tmp

261
            # 4. calculation
262
            axis = -1
W
wanghuancoder 已提交
263
            math_op = getattr(_C_ops, op_type)
L
Leo Chen 已提交
264
            return math_op(self, other_var, 'axis', axis)
265 266 267 268 269 270

        comment = OpProtoHolder.instance().get_op_proto(op_type).comment

        __impl__.__doc__ = """
        {0}
        Args:
271
            other_var(Tensor|float|int): right hand Tensor
272 273

        Returns:
274
            Tensor
275 276 277 278
        """.format(comment)
        __impl__.__name__ = method_name
        return __impl__

279 280 281 282 283 284 285 286 287 288 289
    varbase_methods = [
        ('__neg__', _neg_),
        ('__float__', _float_),
        ('__long__', _long_),
        ('__int__', _int_),
        ('__len__', _len_),
        ('__index__', _index_),
        ('astype', astype),
        ('dim', lambda x: len(x.shape)),
        ('ndimension', lambda x: len(x.shape)),
        ('ndim', _ndim_),
290
        ('size', _size_),
291
        ('T', _T_),
292 293 294 295 296 297 298 299 300 301 302 303 304 305
        ('__add__',
         _binary_creator_('__add__', 'elementwise_add', False, _scalar_add_)),
        ##  a+b == b+a. Do not need to reverse explicitly
        ('__radd__',
         _binary_creator_('__radd__', 'elementwise_add', False, _scalar_add_)),
        ('__sub__', _binary_creator_('__sub__', 'elementwise_sub', False,
                                     _scalar_sub_)),
        ('__rsub__', _binary_creator_('__rsub__', 'elementwise_sub', True,
                                      _scalar_rsub_)),
        ('__mul__', _binary_creator_('__mul__', 'elementwise_mul', False,
                                     _scalar_mul_)),
        ## a*b == b*a. Do not need to reverse explicitly
        ('__rmul__',
         _binary_creator_('__rmul__', 'elementwise_mul', False, _scalar_mul_)),
306 307
        ('__div__', _binary_creator_('__div__', 'elementwise_div', False,
                                     _scalar_div_)),
S
ShenLiang 已提交
308
        ('__truediv__', _binary_creator_('__truediv__', 'elementwise_div',
309 310 311
                                         False, _scalar_div_)),
        ('__rdiv__', _binary_creator_('__rdiv__', 'elementwise_div', True,
                                      None)),
312 313 314 315 316 317
        ('__rtruediv__', _binary_creator_('rtruediv__', 'elementwise_div', True,
                                          None)),
        ('__pow__', _binary_creator_('__pow__', 'elementwise_pow', False,
                                     None)),
        ('__rpow__', _binary_creator_('__rpow__', 'elementwise_pow', True,
                                      None)),
S
ShenLiang 已提交
318 319 320 321
        ('__floordiv__', _binary_creator_('__floordiv__',
                                          'elementwise_floordiv', False, None)),
        ('__mod__', _binary_creator_('__mod__', 'elementwise_mod', False,
                                     None)),
322 323
        ('__matmul__', _binary_creator_('__matmul__', "matmul_v2", False,
                                        None)),
324 325 326 327 328 329 330
        ## for logical compare
        ('__eq__', _binary_creator_('__eq__', 'equal', False, None)),
        ('__ne__', _binary_creator_('__ne__', 'not_equal', False, None)),
        ('__lt__', _binary_creator_('__lt__', 'less_than', False, None)),
        ('__le__', _binary_creator_('__le__', 'less_equal', False, None)),
        ('__gt__', _binary_creator_('__gt__', 'greater_than', False, None)),
        ('__ge__', _binary_creator_('__ge__', 'greater_equal', False, None)),
331
        ('__array_ufunc__', None)
332 333 334 335 336 337 338 339 340 341
    ]

    global _already_patch_varbase
    if not _already_patch_varbase:
        for method in varbase_methods:
            method_name = method[0]
            method_impl = method[1]
            setattr(core.VarBase, method_name, method_impl)
    else:
        import paddle.tensor
342
        # Tensor method from module paddle.tensor
343
        for method_name in paddle.tensor.tensor_method_func:
344 345 346 347
            if hasattr(core.VarBase, method_name): continue
            method_impl = getattr(paddle.tensor, method_name, None)
            if method_impl: setattr(core.VarBase, method_name, method_impl)

348 349 350 351
        for magic_method, origin_method in paddle.tensor.magic_method_func:
            impl = getattr(paddle.tensor, origin_method, None)
            if impl: setattr(core.VarBase, magic_method, impl)

352
    _already_patch_varbase = True