math_op_patch.py 6.5 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
2
#
Y
Yang Yu 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
6
#
Y
Yang Yu 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
8
#
Y
Yang Yu 已提交
9 10 11 12 13 14 15
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from ..framework import Variable, unique_name
Y
Fix CI  
Yang Yu 已提交
16
from layer_function_generator import OpProtoHolder
17
from ..initializer import force_init_on_cpu
Y
Yang Yu 已提交
18 19 20


def monkey_patch_variable():
Y
Yang Yu 已提交
21
    def unique_tmp_name():
Y
Yu Yang 已提交
22
        return unique_name.generate("tmp")
Y
Yang Yu 已提交
23 24 25 26 27 28 29 30 31 32

    def safe_get_dtype(var):
        try:
            dtype = var.dtype
        except:
            raise ValueError("Cannot get data type from %s", var.name)
        return dtype

    def create_tensor(block, value, dtype, shape):
        value = float(value)
Y
Yang Yu 已提交
33
        tmp_name = unique_tmp_name()
Y
Yang Yu 已提交
34 35 36 37
        var = block.create_var(name=tmp_name, shape=shape, dtype=dtype)
        block.append_op(
            type="fill_constant",
            outputs={'Out': [var]},
38 39 40 41 42 43
            attrs={
                'dtype': var.dtype,
                'shape': shape,
                'value': value,
                'force_cpu': force_init_on_cpu()
            })
Y
Yang Yu 已提交
44 45
        return var

Y
Yang Yu 已提交
46 47 48
    def create_scalar(block, value, dtype):
        return create_tensor(block, value, dtype, shape=[1])

Y
Yang Yu 已提交
49 50 51
    def create_tensor_with_batchsize(ref_var, value, dtype):
        assert isinstance(ref_var, Variable)
        value = float(value)
Y
Yang Yu 已提交
52
        tmp_name = unique_tmp_name()
Y
Yang Yu 已提交
53
        var = ref_var.block.create_var(name=tmp_name, dtype=dtype)
54 55 56 57 58 59
        batch_dim = -1
        for i, d in enumerate(ref_var.shape):
            if d < 0:
                batch_dim = i
                break
        assert batch_dim != -1
Y
Yang Yu 已提交
60 61 62 63
        ref_var.block.append_op(
            type='fill_constant_batch_size_like',
            outputs={'Out': [var]},
            inputs={'Input': [ref_var]},
64 65 66 67 68 69
            attrs={
                'shape': ref_var.shape,
                'value': value,
                'input_dim_idx': batch_dim,
                'output_dim_idx': batch_dim
            })
Y
Yang Yu 已提交
70 71 72 73
        return var

    def astype(self, dtype):
        """
Y
Yang Yu 已提交
74
        Cast a variable to a specified data type.
Y
Yang Yu 已提交
75 76 77 78 79 80 81 82
        NOTE: The variable must be a Tensor
        Args:
            self(Variable): The source variable
            dtype: The target dtype

        Returns:
            Variable with new dtype
        """
Y
Yang Yu 已提交
83
        tmp_name = unique_tmp_name()
Y
Yang Yu 已提交
84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125
        out = self.block.create_var(name=tmp_name, dtype=dtype)
        self.block.append_op(
            type="cast",
            inputs={"X": [self]},
            outputs={"Out": [out]},
            attrs={"in_dtype": self.dtype,
                   "out_dtype": out.dtype})
        return out

    def _elemwise_method_creator_(method_name, op_type, reverse=False):
        def __impl__(self, other_var):
            lhs_dtype = safe_get_dtype(self)

            if not isinstance(other_var, Variable):
                if reverse:
                    has_batch_size = False
                    for elem in self.shape:
                        if elem < 0:
                            has_batch_size = True
                            break
                    if not has_batch_size:
                        other_var = create_tensor(
                            self.block,
                            other_var,
                            dtype=lhs_dtype,
                            shape=self.shape)
                    else:
                        other_var = create_tensor_with_batchsize(
                            self, other_var, lhs_dtype)
                else:
                    # add fill_op to self.block
                    other_var = create_scalar(
                        self.block, value=other_var, dtype=lhs_dtype)

            rhs_dtype = safe_get_dtype(other_var)
            if lhs_dtype != rhs_dtype:
                other_var = astype(other_var, lhs_dtype)
            if reverse:
                tmp = self
                self = other_var
                other_var = tmp

Y
Yang Yu 已提交
126
            tmp_name = unique_tmp_name()
Y
Yang Yu 已提交
127
            out = self.block.create_var(name=tmp_name, dtype=lhs_dtype)
128

129 130 131 132 133 134 135 136
            axis = -1
            if other_var.shape[0] == -1:
                axis = 0
            assert len(self.shape) >= len(other_var.shape), (
                "The rank of the first argument of an binary operator cannot "
                "be smaller than the rank of its second argument: %s vs %s" %
                (len(self.shape), len(other_var.shape)))

Y
Yang Yu 已提交
137 138 139 140
            self.block.append_op(
                type=op_type,
                inputs={'X': [self],
                        'Y': [other_var]},
141 142
                outputs={'Out': out},
                attrs={'axis': axis})
Y
Yang Yu 已提交
143 144 145 146 147 148 149 150
            return out

        comment = OpProtoHolder.instance().get_op_proto(op_type).comment

        __impl__.__doc__ = """
        {0}
        Args:
            self(Variable): left hand variable
151
            other_var(Variable|float|int): right hand variable
Y
Yang Yu 已提交
152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169

        Returns:
            Variable
        """.format(comment)
        __impl__.__name__ = method_name
        return __impl__

    # inject methods
    for method_name, op_type, reverse in (
        ("__add__", "elementwise_add", False),
            # a+b == b+a. Do not need to reverse explicitly
        ("__radd__", "elementwise_add", False),
        ("__sub__", "elementwise_sub", False),
        ("__rsub__", "elementwise_sub", True),
        ("__mul__", "elementwise_mul", False),
            # a*b == b*a. Do not need to reverse explicitly
        ("__rmul__", "elementwise_mul", False),
        ("__div__", "elementwise_div", False),
170
        ("__truediv__", "elementwise_div", False),
Q
Qiao Longfei 已提交
171
        ("__rdiv__", "elementwise_div", True),
172
        ("__rtruediv__", "elementwise_div", True),
Q
Qiao Longfei 已提交
173
        ("__pow__", "elementwise_pow", False),
174 175 176
        ("__rpow__", "elementwise_pow", True),
            # for logical compare
        ("__eq__", "equal", False),
Q
qiaolongfei 已提交
177
        ("__ne__", "not_equal", False),
Q
qiaolongfei 已提交
178
        ("__lt__", "less_than", False),
Q
qiaolongfei 已提交
179 180 181
        ("__le__", "less_equal", False),
        ("__gt__", "greater_than", False),
        ("__ge__", "greater_equal", False)):
Y
Yang Yu 已提交
182 183 184 185
        setattr(Variable, method_name,
                _elemwise_method_creator_(method_name, op_type, reverse))

    Variable.astype = astype