math_op_patch.py 10.3 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
2
#
Y
Yang Yu 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
6
#
Y
Yang Yu 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
8
#
Y
Yang Yu 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15 16
from __future__ import print_function

17
from .. import core
18
from ..framework import Variable, unique_name, in_dygraph_mode, default_main_program
19
from .layer_function_generator import OpProtoHolder
20
from ..initializer import force_init_on_cpu
Y
Yang Yu 已提交
21

22 23 24 25 26 27 28 29
_supported_int_dtype_ = [
    core.VarDesc.VarType.UINT8,
    core.VarDesc.VarType.INT8,
    core.VarDesc.VarType.INT16,
    core.VarDesc.VarType.INT32,
    core.VarDesc.VarType.INT64,
]

Y
Yang Yu 已提交
30 31

def monkey_patch_variable():
Y
Yang Yu 已提交
32
    def unique_tmp_name():
Y
Yu Yang 已提交
33
        return unique_name.generate("tmp")
Y
Yang Yu 已提交
34 35 36 37 38 39 40 41

    def safe_get_dtype(var):
        try:
            dtype = var.dtype
        except:
            raise ValueError("Cannot get data type from %s", var.name)
        return dtype

42
    def current_block(var):
43 44
        if in_dygraph_mode():
            return default_main_program().global_block()
45
        return var.block.program.current_block()
46 47 48 49 50

    def create_new_tmp_var(block, dtype):
        tmp_name = unique_tmp_name()
        return block.create_var(name=tmp_name, dtype=dtype)

Y
Yang Yu 已提交
51 52
    def create_tensor(block, value, dtype, shape):
        value = float(value)
53
        var = create_new_tmp_var(block, dtype)
Y
Yang Yu 已提交
54 55 56
        block.append_op(
            type="fill_constant",
            outputs={'Out': [var]},
57 58 59 60 61
            attrs={
                'dtype': var.dtype,
                'shape': shape,
                'value': value,
                'force_cpu': force_init_on_cpu()
H
Hongyu Liu 已提交
62 63 64
            },
            stop_gradient=True)
        var.stop_gradient = True
Y
Yang Yu 已提交
65 66
        return var

Y
Yang Yu 已提交
67 68 69
    def create_scalar(block, value, dtype):
        return create_tensor(block, value, dtype, shape=[1])

Y
Yang Yu 已提交
70 71 72
    def create_tensor_with_batchsize(ref_var, value, dtype):
        assert isinstance(ref_var, Variable)
        value = float(value)
73 74
        block = current_block(ref_var)
        var = create_new_tmp_var(block, dtype)
75 76 77 78 79 80
        batch_dim = -1
        for i, d in enumerate(ref_var.shape):
            if d < 0:
                batch_dim = i
                break
        assert batch_dim != -1
81
        block.append_op(
Y
Yang Yu 已提交
82 83 84
            type='fill_constant_batch_size_like',
            outputs={'Out': [var]},
            inputs={'Input': [ref_var]},
85 86 87 88 89
            attrs={
                'shape': ref_var.shape,
                'value': value,
                'input_dim_idx': batch_dim,
                'output_dim_idx': batch_dim
H
Hongyu Liu 已提交
90 91 92 93
            },
            stop_gradient=True)

        var.stop_gradient = True
Y
Yang Yu 已提交
94 95 96 97
        return var

    def astype(self, dtype):
        """
J
Jiabin Yang 已提交
98 99 100
        **Notes**:
            **The variable must be a** :ref:`api_fluid_Tensor`

Y
Yang Yu 已提交
101
        Cast a variable to a specified data type.
J
Jiabin Yang 已提交
102

Y
Yang Yu 已提交
103
        Args:
J
Jiabin Yang 已提交
104

Y
Yang Yu 已提交
105
            self(Variable): The source variable
J
Jiabin Yang 已提交
106 107

            dtype: The target data type
Y
Yang Yu 已提交
108 109

        Returns:
J
Jiabin Yang 已提交
110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139
            Variable: Variable with new dtype

        Examples:
            In Static Graph Mode:

            .. code-block:: python

                import paddle.fluid as fluid

                startup_prog = fluid.Program()
                main_prog = fluid.Program()
                with fluid.program_guard(startup_prog, main_prog):
                    original_variable = fluid.data(name = "new_variable", shape=[2,2], dtype='float32')
                    new_variable = original_variable.astype('int64')
                    print("new var's dtype is: {}".format(new_variable.dtype))

            In Dygraph Mode:

            .. code-block:: python

                import paddle.fluid as fluid
                import numpy as np

                x = np.ones([2, 2], np.float32)
                with fluid.dygraph.guard():
                    original_variable = fluid.dygraph.to_variable(x)
                    print("original var's dtype is: {}, numpy dtype is {}".format(original_variable.dtype, original_variable.numpy().dtype))
                    new_variable = original_variable.astype('int64')
                    print("new var's dtype is: {}, numpy dtype is {}".format(new_variable.dtype, new_variable.numpy().dtype))

Y
Yang Yu 已提交
140
        """
141 142 143
        block = current_block(self)
        out = create_new_tmp_var(block, dtype)
        block.append_op(
Y
Yang Yu 已提交
144 145 146 147 148 149 150
            type="cast",
            inputs={"X": [self]},
            outputs={"Out": [out]},
            attrs={"in_dtype": self.dtype,
                   "out_dtype": out.dtype})
        return out

151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180
    def _scalar_elementwise_op_(var, scale, bias):
        block = current_block(var)
        out = create_new_tmp_var(block, var.dtype)
        block.append_op(
            type="scale",
            inputs={"X": [var]},
            outputs={"Out": [out]},
            attrs={"scale": scale,
                   "bias": bias})
        return out

    def _scalar_elementwise_add_(var, value):
        return _scalar_elementwise_op_(var, 1.0, value)

    def _scalar_elementwise_sub_(var, value):
        return _scalar_elementwise_op_(var, 1.0, -value)

    def _scalar_elementwise_rsub_(var, value):
        return _scalar_elementwise_op_(var, -1.0, value)

    def _scalar_elementwise_mul_(var, value):
        return _scalar_elementwise_op_(var, value, 0.0)

    def _scalar_elementwise_div_(var, value):
        return _scalar_elementwise_op_(var, 1.0 / value, 0.0)

    def _elemwise_method_creator_(method_name,
                                  op_type,
                                  reverse=False,
                                  scalar_method=None):
Y
Yang Yu 已提交
181
        def __impl__(self, other_var):
182 183 184 185 186
            # FIXME(zjl): elementwise_div between integers cannot be converted to scale,
            # which may lose accuracy. This is a hot fix for release 1.6.
            if scalar_method is not None and not (
                    op_type == 'elementwise_div' and
                    self.dtype in _supported_int_dtype_):
187 188 189 190 191 192 193 194
                if isinstance(other_var, float):
                    if self.dtype in _supported_int_dtype_:
                        assert other_var == int(other_var), \
                            "float value {} cannot convert to integer".format(other_var)
                    return scalar_method(self, other_var)
                elif isinstance(other_var, int):
                    return scalar_method(self, float(other_var))

Y
Yang Yu 已提交
195 196 197 198 199 200 201 202 203 204 205
            lhs_dtype = safe_get_dtype(self)

            if not isinstance(other_var, Variable):
                if reverse:
                    has_batch_size = False
                    for elem in self.shape:
                        if elem < 0:
                            has_batch_size = True
                            break
                    if not has_batch_size:
                        other_var = create_tensor(
206
                            current_block(self),
Y
Yang Yu 已提交
207 208 209 210 211 212 213
                            other_var,
                            dtype=lhs_dtype,
                            shape=self.shape)
                    else:
                        other_var = create_tensor_with_batchsize(
                            self, other_var, lhs_dtype)
                else:
214
                    # add fill_op to current_block
Y
Yang Yu 已提交
215
                    other_var = create_scalar(
216
                        current_block(self), value=other_var, dtype=lhs_dtype)
Y
Yang Yu 已提交
217 218 219 220 221 222 223 224 225

            rhs_dtype = safe_get_dtype(other_var)
            if lhs_dtype != rhs_dtype:
                other_var = astype(other_var, lhs_dtype)
            if reverse:
                tmp = self
                self = other_var
                other_var = tmp

226
            out = create_new_tmp_var(current_block(self), dtype=lhs_dtype)
227

228
            current_block(self).append_op(
Y
Yang Yu 已提交
229 230 231
                type=op_type,
                inputs={'X': [self],
                        'Y': [other_var]},
232
                outputs={'Out': out},
233
                attrs={'axis': -1})
Y
Yang Yu 已提交
234 235 236 237 238 239 240 241
            return out

        comment = OpProtoHolder.instance().get_op_proto(op_type).comment

        __impl__.__doc__ = """
        {0}
        Args:
            self(Variable): left hand variable
242
            other_var(Variable|float|int): right hand variable
Y
Yang Yu 已提交
243 244 245 246 247 248 249 250

        Returns:
            Variable
        """.format(comment)
        __impl__.__name__ = method_name
        return __impl__

    # inject methods
251 252
    for method_name, op_type, reverse, scalar_method in (
        ("__add__", "elementwise_add", False, _scalar_elementwise_add_),
Y
Yang Yu 已提交
253
            # a+b == b+a. Do not need to reverse explicitly
254 255 256 257
        ("__radd__", "elementwise_add", False, _scalar_elementwise_add_),
        ("__sub__", "elementwise_sub", False, _scalar_elementwise_sub_),
        ("__rsub__", "elementwise_sub", True, _scalar_elementwise_rsub_),
        ("__mul__", "elementwise_mul", False, _scalar_elementwise_mul_),
Y
Yang Yu 已提交
258
            # a*b == b*a. Do not need to reverse explicitly
259 260 261 262 263 264 265 266 267
        ("__rmul__", "elementwise_mul", False, _scalar_elementwise_mul_),
        ("__div__", "elementwise_div", False, _scalar_elementwise_div_),
        ("__truediv__", "elementwise_div", False, _scalar_elementwise_div_),
        ("__rdiv__", "elementwise_div", True, None),
        ("__rtruediv__", "elementwise_div", True, None),
        ("__pow__", "elementwise_pow", False, None),
        ("__rpow__", "elementwise_pow", True, None),
        ("__floordiv__", "elementwise_floordiv", False, None),
        ("__mod__", "elementwise_mod", False, None),
268
            # for logical compare
269 270 271 272 273 274
        ("__eq__", "equal", False, None),
        ("__ne__", "not_equal", False, None),
        ("__lt__", "less_than", False, None),
        ("__le__", "less_equal", False, None),
        ("__gt__", "greater_than", False, None),
        ("__ge__", "greater_equal", False, None)):
Y
Yang Yu 已提交
275
        setattr(Variable, method_name,
276 277
                _elemwise_method_creator_(method_name, op_type, reverse,
                                          scalar_method))
278 279 280
        setattr(core.VarBase, method_name,
                _elemwise_method_creator_(method_name, op_type, reverse,
                                          scalar_method))
Y
Yang Yu 已提交
281 282

    Variable.astype = astype
283
    setattr(core.VarBase, "astype", astype)