varbase_patch_methods.py 11.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15
import inspect
16 17 18
import numpy as np

import paddle
19 20
from .. import framework
from .. import core
21
from .. import unique_name
22 23
from ..framework import Variable, Parameter, ParamBase
from .base import switch_to_static_graph
24
from .math_op_patch import monkey_patch_math_varbase
25
from .parallel import scale_loss
L
Leo Chen 已提交
26
from paddle.fluid.data_feeder import convert_dtype, _PADDLE_DTYPE_2_NUMPY_DTYPE
27 28 29


def monkey_patch_varbase():
30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56
    @switch_to_static_graph
    def _to_static_var(self, to_parameter=False, **kwargs):
        """
        **Notes**:
            **This API is ONLY available in Dygraph mode**

        Transform a VarBase into static Variable with same attributes. It's a low level interface used
        in dy2static and shall not be called directly.

        Args:
            to_parameter (bool): It takes effect only if the input a VarBase. If set True,
                                 the VarBase will be converted into framework.Parameters. Otherwise, it will
                                 be converted into framework.Variable. Default False.

        Examples:
            .. code-block:: python

                import paddle.fluid as fluid
                from paddle.fluid.dygraph.base import to_variable
                import numpy as np

                data = np.ones([3, 1024], dtype='float32')
                with fluid.dygraph.guard():
                    var_base = to_variable(data)
                    static_var = var_base._to_static_var()

        """
57 58 59 60

        # Note: getattr(self, attr, None) will call x.grad=x.gradient(), but gradient() only available in dygraph. 
        # It will fail. So, for propery in dygraph only, should not let it getattr(self, attr, None).
        attr_not_need_keys = ['grad']
61 62 63
        if isinstance(self, ParamBase):
            attr_kwargs = self.__dict__.copy()
        else:
64 65 66 67 68 69
            attr_names = []
            for name in dir(self):
                if name not in attr_not_need_keys and not (
                        inspect.ismethod(getattr(self, name)) or
                        name.startswith('_')):
                    attr_names.append(name)
70 71 72 73 74 75 76 77 78 79 80 81 82 83 84
            attr_kwargs = {name: getattr(self, name) for name in attr_names}

        attr_keys = ['block', 'shape', 'dtype', 'type', 'name', 'persistable']
        for attr in attr_keys:
            attr_kwargs[attr] = getattr(self, attr, None)

        attr_kwargs.update(kwargs)

        if to_parameter or isinstance(self, ParamBase):
            del attr_kwargs['persistable']
            static_var = Parameter(**attr_kwargs)
        else:
            static_var = Variable(**attr_kwargs)
        return static_var

85 86 87 88 89
    # TODO(jiabin): move this to cplusplus end if we find some performance issue on it
    @framework.dygraph_only
    def set_value(self, value):
        """
        **Notes**:
T
tianshuo78520a 已提交
90
            **This API is ONLY available in Dygraph mode**
91 92 93 94 95 96 97 98 99 100 101

        Set a new value for this Variable.

        Args:
            value (Variable|np.ndarray): the new value.

        Examples:
            .. code-block:: python

                import paddle.fluid as fluid
                from paddle.fluid.dygraph.base import to_variable
102
                from paddle.fluid.dygraph import Linear
103 104
                import numpy as np

105
                data = np.ones([3, 1024], dtype='float32')
106
                with fluid.dygraph.guard():
107
                    linear = fluid.dygraph.Linear(1024, 4)
108
                    t = to_variable(data)
109
                    linear(t)  # call with default weight
110
                    custom_weight = np.random.randn(1024, 4).astype("float32")
111 112
                    linear.weight.set_value(custom_weight)  # change existing weight
                    out = linear(t)  # call with different weight
113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135

        """
        assert isinstance(value, (np.ndarray, core.VarBase)), \
            "Variable set_value function, arguments type only support Variable, numpy, VarBase"

        value_np = value
        if isinstance(value, core.VarBase):
            value_np = value.numpy()

        self_tensor_np = self.numpy()

        assert self_tensor_np.shape == value_np.shape, \
            "Variable Shape not match, Variable [ {} ] need tensor with shape {} but load set tensor with shape {}".format(
                self.name, self_tensor_np.shape, value_np.shape)

        assert self_tensor_np.dtype == value_np.dtype, \
            "Variable dtype not match, Variable [ {} ] need tensor with dtype {}  but load tensor with dtype {}".format(
                self.name, self_tensor_np.dtype, value_np.dtype)

        self.value().get_tensor().set(value_np,
                                      framework._current_expected_place())

    @framework.dygraph_only
136
    def backward(self, retain_graph=False):
137
        """
138
        Run backward of current Graph which starts from current Tensor.
139

140 141 142 143
        The new gradient will accumulat on previous gradient.

        You can clear gradient by ``Tensor.clear_grad()`` .

144
        Args:
145
            retain_graph(bool, optional): If False, the graph used to compute grads will be freed. If you would
146 147 148
                like to add more ops to the built graph after calling this method( :code:`backward` ), set the parameter
                :code:`retain_graph` to True, then the grads will be retained. Thus, seting it to False is much more memory-efficient.
                Defaults to False.
149 150 151 152 153 154 155

        Returns:
            NoneType: None

        Examples:
            .. code-block:: python

156 157 158 159 160 161 162 163 164 165 166 167 168 169
                x = paddle.to_tensor(5., stop_gradient=False)
                for i in range(5):
                    y = paddle.pow(x, 4.0)
                    y.backward()
                    print("{}: {}".format(i, x.grad))
                # 0: [500.]
                # 1: [1000.]
                # 2: [1500.]
                # 3: [2000.]
                # 4: [2500.]

                x.clear_grad()
                print("{}".format(x.grad))
                # 0.
170 171 172

        """
        if framework.in_dygraph_mode():
173 174 175 176 177 178
            if paddle.distributed.get_world_size() > 1:
                scaled_loss = scale_loss(self)
                scaled_loss._run_backward(framework._dygraph_tracer(),
                                          retain_graph)
            else:
                self._run_backward(framework._dygraph_tracer(), retain_graph)
179 180
        else:
            raise ValueError(
T
tianshuo78520a 已提交
181
                "Variable.backward() is only available in DyGraph mode")
182 183 184 185

    @framework.dygraph_only
    def gradient(self):
        """
186
        Get the Gradient of Current Tensor.
187 188

        Returns:
189
            ndarray: Numpy value of the gradient of current Tensor
190 191 192 193

        Examples:
            .. code-block:: python

194
                import paddle
195

196 197 198 199 200
                x = paddle.to_tensor(5., stop_gradient=False)
                y = paddle.pow(x, 4.0)
                y.backward()
                print("grad of x: {}".format(x.grad))
                # [500.]
201 202 203

        """
        if self._grad_ivar() is None:
204 205
            return None

206 207 208 209 210 211 212
        new_ivar = self._grad_ivar()._copy_to(core.CPUPlace(), True)
        if self._grad_ivar().type == core.VarDesc.VarType.SELECTED_ROWS:
            return (np.array(new_ivar.value().get_selected_rows().get_tensor()),
                    np.array(new_ivar.value().get_selected_rows().rows()))
        else:
            return np.array(new_ivar.value().get_tensor())

213 214 215 216 217 218 219 220
    @property
    def grad(self):
        """
        The alias of gradient().
        """

        return self.gradient()

221 222 223 224 225 226
    def clear_grad(self):
        """
        The alias of clear_gradient().
        """
        self.clear_gradient()

227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247
    @property
    def inplace_version(self):
        """
        The inplace version of current Tensor.
        The version number is incremented whenever the current Tensor is modified through an inplace operation.

        **Notes: This is a read-only property**

        Examples:
          .. code-block:: python

            import paddle
            var = paddle.ones(shape=[4, 2, 3], dtype="float32")
            print(var.inplace_version)  # 0

            var[1] = 2.2
            print(var.inplace_version)  # 1

        """
        return self._inplace_version()

248 249
    def __str__(self):
        """
250
        Convert a VarBase object to a readable string.
251

252
        Returns(str): A readable string.
253 254 255 256

        Examples:
            .. code-block:: python

257
                import paddle
258
                x = paddle.rand([2, 5])
259
                print(x)
260 261 262 263
                
                # Tensor(shape=[2, 5], dtype=float32, place=CPUPlace,
                #        [[0.30574632, 0.55739117, 0.30902600, 0.39413780, 0.44830436],
                #         [0.79010487, 0.53972793, 0.09495186, 0.44267157, 0.72112119]])
264
        """
265 266
        from paddle.tensor.to_string import to_string
        return to_string(self)
267

268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298
    def __deepcopy__(self, memo):
        """
        Deep copy Tensor, it will always performs Tensor copy.

        Examples:
            .. code-block:: python

                import paddle
                import copy
                x = paddle.to_tensor(2.)
                y = copy.deepcopy(x)
                
                print(x)
                # Tensor(shape=[1], dtype=float32, place=CPUPlace, stop_gradient=True,
                #        [2.])

                print(y)
                # Tensor(shape=[1], dtype=float32, place=CPUPlace, stop_gradient=True,
                #        [2.])

        """
        if not self.is_leaf:
            raise RuntimeError(
                "Only Leaf Tensor support the deepcopy at the moment, non-Leaf Tensors contains graph information that does't support deepcopy"
            )
        new_varbase = core.VarBase()
        new_varbase.name = self.name + unique_name.generate("_deepcopy")
        memo[id(self)] = new_varbase
        new_varbase.copy_(self, True)
        return new_varbase

299 300 301
    @property
    def block(self):
        return framework.default_main_program().global_block()
302

303 304 305 306 307 308 309 310 311 312 313 314
    def __nonzero__(self):
        numel = np.prod(self.shape)
        assert numel == 1, "When Variable is used as the condition of if/while , Variable can only contain one element."
        tensor = self.value().get_tensor()
        assert tensor._is_initialized(), "tensor not initialized"
        return bool(np.all(tensor.__array__() > 0))

    def __bool__(self):
        return self.__nonzero__()

    for method_name, method in (
        ("__bool__", __bool__), ("__nonzero__", __nonzero__),
315
        ("_to_static_var", _to_static_var), ("set_value", set_value),
316 317 318
        ("block", block), ("backward", backward), ("clear_grad", clear_grad),
        ("inplace_version", inplace_version), ("grad", grad),
        ("gradient", gradient), ("__str__", __str__), ("__repr__", __str__),
319 320
        ("__deepcopy__", __deepcopy__), ("__module__", "paddle"),
        ("__name__", "Tensor")):
321
        setattr(core.VarBase, method_name, method)
322

L
Leo Chen 已提交
323 324 325 326 327 328 329 330 331 332 333 334 335 336 337
    # NOTE(zhiqiu): pybind11 will set a default __str__ method of enum class.
    # So, we need to overwrite it to a more readable one.
    # See details in https://github.com/pybind/pybind11/issues/2537.
    origin = getattr(core.VarDesc.VarType, "__repr__")

    def dtype_str(dtype):
        if dtype in _PADDLE_DTYPE_2_NUMPY_DTYPE:
            prefix = 'paddle.'
            return prefix + _PADDLE_DTYPE_2_NUMPY_DTYPE[dtype]
        else:
            # for example, paddle.fluid.core.VarDesc.VarType.LOD_TENSOR
            return origin(dtype)

    setattr(core.VarDesc.VarType, "__repr__", dtype_str)

338 339
    # patch math methods for varbase
    monkey_patch_math_varbase()