math_op_patch.py 10.4 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
2
#
Y
Yang Yu 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
6
#
Y
Yang Yu 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
8
#
Y
Yang Yu 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15 16
from __future__ import print_function

17
from .. import core
18
from ..framework import Variable, unique_name
19
from .layer_function_generator import OpProtoHolder
20
from ..initializer import force_init_on_cpu
Y
Yang Yu 已提交
21

22 23 24 25 26 27 28 29
_supported_int_dtype_ = [
    core.VarDesc.VarType.UINT8,
    core.VarDesc.VarType.INT8,
    core.VarDesc.VarType.INT16,
    core.VarDesc.VarType.INT32,
    core.VarDesc.VarType.INT64,
]

30 31
compare_ops = ['__eq__', '__ne__', '__lt__', '__le__', '__gt__', '__ge__']

Y
Yang Yu 已提交
32 33

def monkey_patch_variable():
Y
Yang Yu 已提交
34
    def unique_tmp_name():
Y
Yu Yang 已提交
35
        return unique_name.generate("tmp")
Y
Yang Yu 已提交
36 37 38 39 40 41 42 43

    def safe_get_dtype(var):
        try:
            dtype = var.dtype
        except:
            raise ValueError("Cannot get data type from %s", var.name)
        return dtype

44
    def current_block(var):
45
        return var.block.program.current_block()
46 47 48 49 50

    def create_new_tmp_var(block, dtype):
        tmp_name = unique_tmp_name()
        return block.create_var(name=tmp_name, dtype=dtype)

Y
Yang Yu 已提交
51 52
    def create_tensor(block, value, dtype, shape):
        value = float(value)
53
        var = create_new_tmp_var(block, dtype)
Y
Yang Yu 已提交
54 55 56
        block.append_op(
            type="fill_constant",
            outputs={'Out': [var]},
57 58 59 60 61
            attrs={
                'dtype': var.dtype,
                'shape': shape,
                'value': value,
                'force_cpu': force_init_on_cpu()
H
Hongyu Liu 已提交
62 63 64
            },
            stop_gradient=True)
        var.stop_gradient = True
Y
Yang Yu 已提交
65 66
        return var

Y
Yang Yu 已提交
67 68 69
    def create_scalar(block, value, dtype):
        return create_tensor(block, value, dtype, shape=[1])

Y
Yang Yu 已提交
70 71 72
    def create_tensor_with_batchsize(ref_var, value, dtype):
        assert isinstance(ref_var, Variable)
        value = float(value)
73 74
        block = current_block(ref_var)
        var = create_new_tmp_var(block, dtype)
75 76 77 78 79 80
        batch_dim = -1
        for i, d in enumerate(ref_var.shape):
            if d < 0:
                batch_dim = i
                break
        assert batch_dim != -1
81
        block.append_op(
Y
Yang Yu 已提交
82 83 84
            type='fill_constant_batch_size_like',
            outputs={'Out': [var]},
            inputs={'Input': [ref_var]},
85 86 87 88 89
            attrs={
                'shape': ref_var.shape,
                'value': value,
                'input_dim_idx': batch_dim,
                'output_dim_idx': batch_dim
H
Hongyu Liu 已提交
90 91 92 93
            },
            stop_gradient=True)

        var.stop_gradient = True
Y
Yang Yu 已提交
94 95 96 97
        return var

    def astype(self, dtype):
        """
J
Jiabin Yang 已提交
98 99 100
        **Notes**:
            **The variable must be a** :ref:`api_fluid_Tensor`

Y
Yang Yu 已提交
101
        Cast a variable to a specified data type.
J
Jiabin Yang 已提交
102

Y
Yang Yu 已提交
103
        Args:
J
Jiabin Yang 已提交
104

Y
Yang Yu 已提交
105
            self(Variable): The source variable
J
Jiabin Yang 已提交
106 107

            dtype: The target data type
Y
Yang Yu 已提交
108 109

        Returns:
J
Jiabin Yang 已提交
110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139
            Variable: Variable with new dtype

        Examples:
            In Static Graph Mode:

            .. code-block:: python

                import paddle.fluid as fluid

                startup_prog = fluid.Program()
                main_prog = fluid.Program()
                with fluid.program_guard(startup_prog, main_prog):
                    original_variable = fluid.data(name = "new_variable", shape=[2,2], dtype='float32')
                    new_variable = original_variable.astype('int64')
                    print("new var's dtype is: {}".format(new_variable.dtype))

            In Dygraph Mode:

            .. code-block:: python

                import paddle.fluid as fluid
                import numpy as np

                x = np.ones([2, 2], np.float32)
                with fluid.dygraph.guard():
                    original_variable = fluid.dygraph.to_variable(x)
                    print("original var's dtype is: {}, numpy dtype is {}".format(original_variable.dtype, original_variable.numpy().dtype))
                    new_variable = original_variable.astype('int64')
                    print("new var's dtype is: {}, numpy dtype is {}".format(new_variable.dtype, new_variable.numpy().dtype))

Y
Yang Yu 已提交
140
        """
141 142 143
        block = current_block(self)
        out = create_new_tmp_var(block, dtype)
        block.append_op(
Y
Yang Yu 已提交
144 145 146 147 148 149 150
            type="cast",
            inputs={"X": [self]},
            outputs={"Out": [out]},
            attrs={"in_dtype": self.dtype,
                   "out_dtype": out.dtype})
        return out

151 152 153 154 155 156 157 158 159 160 161
    def _scalar_elementwise_op_(var, scale, bias):
        block = current_block(var)
        out = create_new_tmp_var(block, var.dtype)
        block.append_op(
            type="scale",
            inputs={"X": [var]},
            outputs={"Out": [out]},
            attrs={"scale": scale,
                   "bias": bias})
        return out

162 163 164
    def _neg_(var):
        return _scalar_elementwise_op_(var, -1.0, 0.0)

165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183
    def _scalar_elementwise_add_(var, value):
        return _scalar_elementwise_op_(var, 1.0, value)

    def _scalar_elementwise_sub_(var, value):
        return _scalar_elementwise_op_(var, 1.0, -value)

    def _scalar_elementwise_rsub_(var, value):
        return _scalar_elementwise_op_(var, -1.0, value)

    def _scalar_elementwise_mul_(var, value):
        return _scalar_elementwise_op_(var, value, 0.0)

    def _scalar_elementwise_div_(var, value):
        return _scalar_elementwise_op_(var, 1.0 / value, 0.0)

    def _elemwise_method_creator_(method_name,
                                  op_type,
                                  reverse=False,
                                  scalar_method=None):
Y
Yang Yu 已提交
184
        def __impl__(self, other_var):
185 186 187 188 189
            # FIXME(zjl): elementwise_div between integers cannot be converted to scale,
            # which may lose accuracy. This is a hot fix for release 1.6.
            if scalar_method is not None and not (
                    op_type == 'elementwise_div' and
                    self.dtype in _supported_int_dtype_):
190 191 192 193 194 195 196 197
                if isinstance(other_var, float):
                    if self.dtype in _supported_int_dtype_:
                        assert other_var == int(other_var), \
                            "float value {} cannot convert to integer".format(other_var)
                    return scalar_method(self, other_var)
                elif isinstance(other_var, int):
                    return scalar_method(self, float(other_var))

Y
Yang Yu 已提交
198 199 200 201 202 203 204 205 206 207 208
            lhs_dtype = safe_get_dtype(self)

            if not isinstance(other_var, Variable):
                if reverse:
                    has_batch_size = False
                    for elem in self.shape:
                        if elem < 0:
                            has_batch_size = True
                            break
                    if not has_batch_size:
                        other_var = create_tensor(
209
                            current_block(self),
Y
Yang Yu 已提交
210 211 212 213 214 215 216
                            other_var,
                            dtype=lhs_dtype,
                            shape=self.shape)
                    else:
                        other_var = create_tensor_with_batchsize(
                            self, other_var, lhs_dtype)
                else:
217
                    # add fill_op to current_block
Y
Yang Yu 已提交
218
                    other_var = create_scalar(
219
                        current_block(self), value=other_var, dtype=lhs_dtype)
Y
Yang Yu 已提交
220 221 222 223 224 225 226 227 228

            rhs_dtype = safe_get_dtype(other_var)
            if lhs_dtype != rhs_dtype:
                other_var = astype(other_var, lhs_dtype)
            if reverse:
                tmp = self
                self = other_var
                other_var = tmp

229 230 231 232 233 234
            # NOTE(zhiqiu): the output of compare operator should be bool.
            if method_name in compare_ops:
                out = create_new_tmp_var(current_block(self), dtype="bool")
            else:
                out = create_new_tmp_var(current_block(self), dtype=lhs_dtype)

235 236 237
            axis = -1
            if other_var.shape[0] == -1:
                axis = 0
238
            current_block(self).append_op(
Y
Yang Yu 已提交
239 240 241
                type=op_type,
                inputs={'X': [self],
                        'Y': [other_var]},
242
                outputs={'Out': out},
243
                attrs={'axis': axis})
Y
Yang Yu 已提交
244 245 246 247 248 249 250 251
            return out

        comment = OpProtoHolder.instance().get_op_proto(op_type).comment

        __impl__.__doc__ = """
        {0}
        Args:
            self(Variable): left hand variable
252
            other_var(Variable|float|int): right hand variable
Y
Yang Yu 已提交
253 254 255 256 257 258 259 260

        Returns:
            Variable
        """.format(comment)
        __impl__.__name__ = method_name
        return __impl__

    # inject methods
261 262
    for method_name, op_type, reverse, scalar_method in (
        ("__add__", "elementwise_add", False, _scalar_elementwise_add_),
Y
Yang Yu 已提交
263
            # a+b == b+a. Do not need to reverse explicitly
264 265 266 267
        ("__radd__", "elementwise_add", False, _scalar_elementwise_add_),
        ("__sub__", "elementwise_sub", False, _scalar_elementwise_sub_),
        ("__rsub__", "elementwise_sub", True, _scalar_elementwise_rsub_),
        ("__mul__", "elementwise_mul", False, _scalar_elementwise_mul_),
Y
Yang Yu 已提交
268
            # a*b == b*a. Do not need to reverse explicitly
269 270 271 272 273 274 275 276 277
        ("__rmul__", "elementwise_mul", False, _scalar_elementwise_mul_),
        ("__div__", "elementwise_div", False, _scalar_elementwise_div_),
        ("__truediv__", "elementwise_div", False, _scalar_elementwise_div_),
        ("__rdiv__", "elementwise_div", True, None),
        ("__rtruediv__", "elementwise_div", True, None),
        ("__pow__", "elementwise_pow", False, None),
        ("__rpow__", "elementwise_pow", True, None),
        ("__floordiv__", "elementwise_floordiv", False, None),
        ("__mod__", "elementwise_mod", False, None),
278
            # for logical compare
279 280 281 282 283 284
        ("__eq__", "equal", False, None),
        ("__ne__", "not_equal", False, None),
        ("__lt__", "less_than", False, None),
        ("__le__", "less_equal", False, None),
        ("__gt__", "greater_than", False, None),
        ("__ge__", "greater_equal", False, None)):
Y
Yang Yu 已提交
285
        setattr(Variable, method_name,
286 287
                _elemwise_method_creator_(method_name, op_type, reverse,
                                          scalar_method))
288 289
    # b = -a
    Variable.__neg__ = _neg_
Y
Yang Yu 已提交
290
    Variable.astype = astype