math_op_patch.py 13.2 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from __future__ import print_function

from .. import core
C
chentianyu03 已提交
18
from ..framework import Variable, convert_np_dtype_to_dtype_, _varbase_creator
19
from ..layers.layer_function_generator import OpProtoHolder
20
from . import no_grad
21

22 23
import numpy as np
import six
24
import warnings
25

26 27 28 29 30 31
_supported_int_dtype_ = [
    core.VarDesc.VarType.UINT8,
    core.VarDesc.VarType.INT8,
    core.VarDesc.VarType.INT16,
    core.VarDesc.VarType.INT32,
    core.VarDesc.VarType.INT64,
32
    core.VarDesc.VarType.BOOL,
33 34
]

35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55
# NOTE(chenweihang): We currently do not fully support the type promotion 
# between tensors. Parting support here is because the interoperation of 
# real and complex numbers in paddle quantum is very frequent, such as the 
# binary operation between `float` and `complex64`, so we must support the 
# correct type promotion on the APIs paddle quantum used.
# Now only check in dygraph (paddle quantum based dygraph)
# Full type promotion support will need to be fully verified later.
_supported_promote_complex_types_ = [
    '__add__',
    '__radd__',
    '__sub__',
    '__rsub__',
    '__mul__',
    '__rmul__',
    '__div__',
    '__truediv__',
    '__rdiv__',
    '__rtruediv__',
    '__matmul__',
]

56 57 58 59 60
_complex_dtypes = [
    core.VarDesc.VarType.COMPLEX64,
    core.VarDesc.VarType.COMPLEX128,
]

61 62
_already_patch_varbase = False

63 64 65 66 67 68 69

def monkey_patch_math_varbase():
    """
    Similar to monkey_patch_variable.
    The difference is, in dygraph mode, use auto-generated op functions for better performance.
    """

70
    @no_grad
71
    def create_tensor(value, dtype, shape):
72 73 74 75 76
        out = _varbase_creator(dtype=dtype)
        out = core.ops.fill_constant(out, 'dtype', dtype, 'shape', shape,
                                     'value', value, 'force_cpu', False)
        out.stop_gradient = True
        return out
77 78 79 80 81 82 83

    def create_scalar(value, dtype):
        return create_tensor(value, dtype, shape=[1])

    def astype(self, dtype):
        """

84
        Cast a Tensor to a specified data type.
85 86

        Args:
87
            dtype: The target data type.
88 89

        Returns:
90
            Tensor: a new Tensor with target dtype
91 92 93 94

        Examples:
            .. code-block:: python

95
                import paddle
96 97
                import numpy as np

98 99 100 101
                original_tensor = paddle.ones([2, 2])
                print("original tensor's dtype is: {}".format(original_tensor.dtype))
                new_tensor = original_tensor.astype('float32')
                print("new tensor's dtype is: {}".format(new_tensor.dtype))
102 103

        """
104 105 106
        if not isinstance(dtype, core.VarDesc.VarType):
            dtype = convert_np_dtype_to_dtype_(dtype)
        return core.ops.cast(self, 'in_dtype', self.dtype, 'out_dtype', dtype)
107 108

    def _scalar_elementwise_op_(var, scale, bias):
109
        return core.ops.scale(var, 'scale', scale, 'bias', bias)
110

111 112 113
    def _neg_(var):
        return _scalar_elementwise_op_(var, -1.0, 0.0)

114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150
    def _float_(var):
        numel = np.prod(var.shape)
        assert numel == 1, "only one element variable can be converted to float."
        tensor = var.value().get_tensor()
        assert tensor._is_initialized(), "variable's tensor is not initialized"
        return float(var.numpy().flatten()[0])

    def _long_(var):
        numel = np.prod(var.shape)
        assert numel == 1, "only one element variable can be converted to long."
        tensor = var.value().get_tensor()
        assert tensor._is_initialized(), "variable's tensor is not initialized"
        if six.PY2:
            return long(var.numpy().flatten()[0])
        else:
            return int(var.numpy().flatten()[0])

    def _int_(var):
        numel = np.prod(var.shape)
        assert numel == 1, "only one element variable can be converted to int."
        tensor = var.value().get_tensor()
        assert tensor._is_initialized(), "variable's tensor is not initialized"
        return int(var.numpy().flatten()[0])

    def _len_(var):
        return var.shape[0]

    def _index_(var):
        numel = np.prod(var.shape)
        assert numel == 1, "only one element variable can be converted to python index."
        tensor = var.value().get_tensor()
        assert tensor._is_initialized(), "variable's tensor is not initialized"
        if six.PY2:
            return long(var.numpy().flatten()[0])
        else:
            return int(var.numpy().flatten()[0])

151 152 153 154
    @property
    def _ndim_(var):
        return len(var.shape)

155 156 157 158
    @property
    def _size_(var):
        return np.prod(var.shape)

159
    def _scalar_add_(var, value):
160 161
        return _scalar_elementwise_op_(var, 1.0, value)

162
    def _scalar_sub_(var, value):
163 164
        return _scalar_elementwise_op_(var, 1.0, -value)

165
    def _scalar_rsub_(var, value):
166 167
        return _scalar_elementwise_op_(var, -1.0, value)

168
    def _scalar_mul_(var, value):
169 170
        return _scalar_elementwise_op_(var, value, 0.0)

171
    def _scalar_div_(var, value):
172 173
        return _scalar_elementwise_op_(var, 1.0 / value, 0.0)

174 175 176 177 178
    # for binary operator such as elementwise, compare
    def _binary_creator_(method_name,
                         op_type,
                         reverse=False,
                         scalar_method=None):
179
        def __impl__(self, other_var):
180 181 182 183 184 185 186 187 188
            # 1. scalar exists cases
            # we need combine the tensor.dtype and scalar.dtype, cast correct object
            if isinstance(other_var, float):
                # in all cases(+, -, *, /, **, //, %), we need cast tensor.dtype to float
                if self.dtype in _supported_int_dtype_:
                    self = astype(self, 'float32')
                # here use `scale` replace `elementwise` to get better performance
                # but only +, -, *, / can use this method
                if scalar_method is not None:
189
                    return scalar_method(self, other_var)
190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209
            elif isinstance(other_var, int):
                # in all cases(+, -, *, /, **, //, %), we can cast it to float
                # because the output tensor.dtype depend on the type of input tensor
                other_var = float(other_var)
                # division is a special case
                # NOTE(chenweihang): because we cast tensor to float32 instead float64,
                # the division result can only guarantee the numerical accuracy of 6 digits 
                # after the decimal point. The result of numpy calculation is of float64 type, 
                # so the calculation result here and the calculation result of numpy are 
                # different after 6 decimal point. If necessary, we can also use float64 here.
                # torch's behavior here is consistent with ours
                if op_type == 'elementwise_div' and self.dtype in _supported_int_dtype_:
                    self = astype(self, 'float32')
                # here use `scale` replace `elementwise` to get better performance
                # but only +, -, *, / can use this method
                if scalar_method is not None:
                    return scalar_method(self, other_var)
            else:
                # do nothing
                pass
210

211
            # 2. create varbase for scalar
212
            lhs_dtype = self.dtype
213 214 215 216 217 218 219 220
            if not isinstance(other_var, core.VarBase):
                if reverse:
                    other_var = create_tensor(
                        other_var, dtype=lhs_dtype, shape=self.shape)
                else:
                    # add fill_op 
                    other_var = create_scalar(value=other_var, dtype=lhs_dtype)

221
            # 3. promote types or unify right var type to left var
222
            rhs_dtype = other_var.dtype
223
            if lhs_dtype != rhs_dtype:
224 225 226
                if method_name in _supported_promote_complex_types_ and (
                        lhs_dtype in _complex_dtypes or
                        rhs_dtype in _complex_dtypes):
227 228 229 230 231 232 233 234 235 236
                    # only when lhs_dtype or rhs_dtype is complex type,
                    # the dtype will promote, in other cases, directly
                    # use lhs_dtype, this is consistent will original rule
                    promote_dtype = core._promote_types_if_complex_exists(
                        lhs_dtype, rhs_dtype)
                    self = self if lhs_dtype == promote_dtype else astype(
                        self, promote_dtype)
                    other_var = other_var if rhs_dtype == promote_dtype else astype(
                        other_var, promote_dtype)
                else:
237 238 239
                    warnings.warn(
                        'The dtype of left and right variables are not the same, left dtype is {}, but right dtype is {}, the right dtype will convert to {}'.
                        format(lhs_dtype, rhs_dtype, lhs_dtype))
240 241
                    other_var = astype(other_var, lhs_dtype)

242 243 244 245 246
            if reverse:
                tmp = self
                self = other_var
                other_var = tmp

247
            # 4. calculation
248
            axis = -1
249
            math_op = getattr(core.ops, op_type)
L
Leo Chen 已提交
250
            return math_op(self, other_var, 'axis', axis)
251 252 253 254 255 256

        comment = OpProtoHolder.instance().get_op_proto(op_type).comment

        __impl__.__doc__ = """
        {0}
        Args:
257
            other_var(Tensor|float|int): right hand Tensor
258 259

        Returns:
260
            Tensor
261 262 263 264
        """.format(comment)
        __impl__.__name__ = method_name
        return __impl__

265 266 267 268 269 270 271 272 273 274 275
    varbase_methods = [
        ('__neg__', _neg_),
        ('__float__', _float_),
        ('__long__', _long_),
        ('__int__', _int_),
        ('__len__', _len_),
        ('__index__', _index_),
        ('astype', astype),
        ('dim', lambda x: len(x.shape)),
        ('ndimension', lambda x: len(x.shape)),
        ('ndim', _ndim_),
276
        ('size', _size_),
277 278 279 280 281 282 283 284 285 286 287 288 289 290
        ('__add__',
         _binary_creator_('__add__', 'elementwise_add', False, _scalar_add_)),
        ##  a+b == b+a. Do not need to reverse explicitly
        ('__radd__',
         _binary_creator_('__radd__', 'elementwise_add', False, _scalar_add_)),
        ('__sub__', _binary_creator_('__sub__', 'elementwise_sub', False,
                                     _scalar_sub_)),
        ('__rsub__', _binary_creator_('__rsub__', 'elementwise_sub', True,
                                      _scalar_rsub_)),
        ('__mul__', _binary_creator_('__mul__', 'elementwise_mul', False,
                                     _scalar_mul_)),
        ## a*b == b*a. Do not need to reverse explicitly
        ('__rmul__',
         _binary_creator_('__rmul__', 'elementwise_mul', False, _scalar_mul_)),
S
ShenLiang 已提交
291 292 293 294 295 296
        ('__div__', _binary_creator_('__div__', 'elementwise_div', False,
                                     _scalar_div_)),
        ('__truediv__', _binary_creator_('__truediv__', 'elementwise_div',
                                         False, _scalar_div_)),
        ('__rdiv__', _binary_creator_('__rdiv__', 'elementwise_div', True,
                                      None)),
297 298 299 300 301 302
        ('__rtruediv__', _binary_creator_('rtruediv__', 'elementwise_div', True,
                                          None)),
        ('__pow__', _binary_creator_('__pow__', 'elementwise_pow', False,
                                     None)),
        ('__rpow__', _binary_creator_('__rpow__', 'elementwise_pow', True,
                                      None)),
S
ShenLiang 已提交
303 304 305 306
        ('__floordiv__', _binary_creator_('__floordiv__',
                                          'elementwise_floordiv', False, None)),
        ('__mod__', _binary_creator_('__mod__', 'elementwise_mod', False,
                                     None)),
307 308
        ('__matmul__', _binary_creator_('__matmul__', "matmul_v2", False,
                                        None)),
309 310 311 312 313 314 315
        ## for logical compare
        ('__eq__', _binary_creator_('__eq__', 'equal', False, None)),
        ('__ne__', _binary_creator_('__ne__', 'not_equal', False, None)),
        ('__lt__', _binary_creator_('__lt__', 'less_than', False, None)),
        ('__le__', _binary_creator_('__le__', 'less_equal', False, None)),
        ('__gt__', _binary_creator_('__gt__', 'greater_than', False, None)),
        ('__ge__', _binary_creator_('__ge__', 'greater_equal', False, None)),
316
        ('__array_ufunc__', None)
317 318 319 320 321 322 323 324 325 326
    ]

    global _already_patch_varbase
    if not _already_patch_varbase:
        for method in varbase_methods:
            method_name = method[0]
            method_impl = method[1]
            setattr(core.VarBase, method_name, method_impl)
    else:
        import paddle.tensor
327
        # Tensor method from module paddle.tensor
328
        tensor_methods = paddle.tensor.tensor_method_func
329
        for method_name in tensor_methods:
330 331 332 333 334
            if hasattr(core.VarBase, method_name): continue
            method_impl = getattr(paddle.tensor, method_name, None)
            if method_impl: setattr(core.VarBase, method_name, method_impl)

    _already_patch_varbase = True