varbase_patch_methods.py 11.3 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15
import inspect
16 17 18
import numpy as np

import paddle
19 20
from .. import framework
from .. import core
21
from .. import unique_name
22 23
from ..framework import Variable, Parameter, ParamBase
from .base import switch_to_static_graph
24
from .math_op_patch import monkey_patch_math_varbase
25
from .parallel import scale_loss
26 27 28


def monkey_patch_varbase():
29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55
    @switch_to_static_graph
    def _to_static_var(self, to_parameter=False, **kwargs):
        """
        **Notes**:
            **This API is ONLY available in Dygraph mode**

        Transform a VarBase into static Variable with same attributes. It's a low level interface used
        in dy2static and shall not be called directly.

        Args:
            to_parameter (bool): It takes effect only if the input a VarBase. If set True,
                                 the VarBase will be converted into framework.Parameters. Otherwise, it will
                                 be converted into framework.Variable. Default False.

        Examples:
            .. code-block:: python

                import paddle.fluid as fluid
                from paddle.fluid.dygraph.base import to_variable
                import numpy as np

                data = np.ones([3, 1024], dtype='float32')
                with fluid.dygraph.guard():
                    var_base = to_variable(data)
                    static_var = var_base._to_static_var()

        """
56 57 58 59

        # Note: getattr(self, attr, None) will call x.grad=x.gradient(), but gradient() only available in dygraph. 
        # It will fail. So, for propery in dygraph only, should not let it getattr(self, attr, None).
        attr_not_need_keys = ['grad']
60 61 62
        if isinstance(self, ParamBase):
            attr_kwargs = self.__dict__.copy()
        else:
63 64 65 66 67 68
            attr_names = []
            for name in dir(self):
                if name not in attr_not_need_keys and not (
                        inspect.ismethod(getattr(self, name)) or
                        name.startswith('_')):
                    attr_names.append(name)
69 70 71 72 73 74 75 76 77 78 79 80 81 82 83
            attr_kwargs = {name: getattr(self, name) for name in attr_names}

        attr_keys = ['block', 'shape', 'dtype', 'type', 'name', 'persistable']
        for attr in attr_keys:
            attr_kwargs[attr] = getattr(self, attr, None)

        attr_kwargs.update(kwargs)

        if to_parameter or isinstance(self, ParamBase):
            del attr_kwargs['persistable']
            static_var = Parameter(**attr_kwargs)
        else:
            static_var = Variable(**attr_kwargs)
        return static_var

84 85 86 87 88
    # TODO(jiabin): move this to cplusplus end if we find some performance issue on it
    @framework.dygraph_only
    def set_value(self, value):
        """
        **Notes**:
T
tianshuo78520a 已提交
89
            **This API is ONLY available in Dygraph mode**
90 91 92 93 94 95 96 97 98 99 100

        Set a new value for this Variable.

        Args:
            value (Variable|np.ndarray): the new value.

        Examples:
            .. code-block:: python

                import paddle.fluid as fluid
                from paddle.fluid.dygraph.base import to_variable
101
                from paddle.fluid.dygraph import Linear
102 103
                import numpy as np

104
                data = np.ones([3, 1024], dtype='float32')
105
                with fluid.dygraph.guard():
106
                    linear = fluid.dygraph.Linear(1024, 4)
107
                    t = to_variable(data)
108
                    linear(t)  # call with default weight
109
                    custom_weight = np.random.randn(1024, 4).astype("float32")
110 111
                    linear.weight.set_value(custom_weight)  # change existing weight
                    out = linear(t)  # call with different weight
112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134

        """
        assert isinstance(value, (np.ndarray, core.VarBase)), \
            "Variable set_value function, arguments type only support Variable, numpy, VarBase"

        value_np = value
        if isinstance(value, core.VarBase):
            value_np = value.numpy()

        self_tensor_np = self.numpy()

        assert self_tensor_np.shape == value_np.shape, \
            "Variable Shape not match, Variable [ {} ] need tensor with shape {} but load set tensor with shape {}".format(
                self.name, self_tensor_np.shape, value_np.shape)

        assert self_tensor_np.dtype == value_np.dtype, \
            "Variable dtype not match, Variable [ {} ] need tensor with dtype {}  but load tensor with dtype {}".format(
                self.name, self_tensor_np.dtype, value_np.dtype)

        self.value().get_tensor().set(value_np,
                                      framework._current_expected_place())

    @framework.dygraph_only
135
    def backward(self, retain_graph=False):
136
        """
137
        Run backward of current Graph which starts from current Tensor.
138

139 140 141 142
        The new gradient will accumulat on previous gradient.

        You can clear gradient by ``Tensor.clear_grad()`` .

143
        Args:
144
            retain_graph(bool, optional): If False, the graph used to compute grads will be freed. If you would
145 146 147
                like to add more ops to the built graph after calling this method( :code:`backward` ), set the parameter
                :code:`retain_graph` to True, then the grads will be retained. Thus, seting it to False is much more memory-efficient.
                Defaults to False.
148 149 150 151 152 153 154

        Returns:
            NoneType: None

        Examples:
            .. code-block:: python

155 156 157 158 159 160 161 162 163 164 165 166 167 168
                x = paddle.to_tensor(5., stop_gradient=False)
                for i in range(5):
                    y = paddle.pow(x, 4.0)
                    y.backward()
                    print("{}: {}".format(i, x.grad))
                # 0: [500.]
                # 1: [1000.]
                # 2: [1500.]
                # 3: [2000.]
                # 4: [2500.]

                x.clear_grad()
                print("{}".format(x.grad))
                # 0.
169 170 171

        """
        if framework.in_dygraph_mode():
172 173 174 175 176 177
            if paddle.distributed.get_world_size() > 1:
                scaled_loss = scale_loss(self)
                scaled_loss._run_backward(framework._dygraph_tracer(),
                                          retain_graph)
            else:
                self._run_backward(framework._dygraph_tracer(), retain_graph)
178 179
        else:
            raise ValueError(
T
tianshuo78520a 已提交
180
                "Variable.backward() is only available in DyGraph mode")
181 182 183 184

    @framework.dygraph_only
    def gradient(self):
        """
185
        Get the Gradient of Current Tensor.
186 187

        Returns:
188
            ndarray: Numpy value of the gradient of current Tensor
189 190 191 192

        Examples:
            .. code-block:: python

193
                import paddle
194

195 196 197 198 199
                x = paddle.to_tensor(5., stop_gradient=False)
                y = paddle.pow(x, 4.0)
                y.backward()
                print("grad of x: {}".format(x.grad))
                # [500.]
200 201 202

        """
        if self._grad_ivar() is None:
203 204
            return None

205 206 207 208 209 210 211
        new_ivar = self._grad_ivar()._copy_to(core.CPUPlace(), True)
        if self._grad_ivar().type == core.VarDesc.VarType.SELECTED_ROWS:
            return (np.array(new_ivar.value().get_selected_rows().get_tensor()),
                    np.array(new_ivar.value().get_selected_rows().rows()))
        else:
            return np.array(new_ivar.value().get_tensor())

212 213 214 215 216 217 218 219
    @property
    def grad(self):
        """
        The alias of gradient().
        """

        return self.gradient()

220 221 222 223 224 225
    def clear_grad(self):
        """
        The alias of clear_gradient().
        """
        self.clear_gradient()

226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246
    @property
    def inplace_version(self):
        """
        The inplace version of current Tensor.
        The version number is incremented whenever the current Tensor is modified through an inplace operation.

        **Notes: This is a read-only property**

        Examples:
          .. code-block:: python

            import paddle
            var = paddle.ones(shape=[4, 2, 3], dtype="float32")
            print(var.inplace_version)  # 0

            var[1] = 2.2
            print(var.inplace_version)  # 1

        """
        return self._inplace_version()

247 248
    def __str__(self):
        """
249
        Convert a VarBase object to a readable string.
250

251
        Returns(str): A readable string.
252 253 254 255

        Examples:
            .. code-block:: python

256
                import paddle
257
                x = paddle.rand([2, 5])
258
                print(x)
259 260 261 262
                
                # Tensor(shape=[2, 5], dtype=float32, place=CPUPlace,
                #        [[0.30574632, 0.55739117, 0.30902600, 0.39413780, 0.44830436],
                #         [0.79010487, 0.53972793, 0.09495186, 0.44267157, 0.72112119]])
263
        """
264 265
        from paddle.tensor.to_string import to_string
        return to_string(self)
266

267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297
    def __deepcopy__(self, memo):
        """
        Deep copy Tensor, it will always performs Tensor copy.

        Examples:
            .. code-block:: python

                import paddle
                import copy
                x = paddle.to_tensor(2.)
                y = copy.deepcopy(x)
                
                print(x)
                # Tensor(shape=[1], dtype=float32, place=CPUPlace, stop_gradient=True,
                #        [2.])

                print(y)
                # Tensor(shape=[1], dtype=float32, place=CPUPlace, stop_gradient=True,
                #        [2.])

        """
        if not self.is_leaf:
            raise RuntimeError(
                "Only Leaf Tensor support the deepcopy at the moment, non-Leaf Tensors contains graph information that does't support deepcopy"
            )
        new_varbase = core.VarBase()
        new_varbase.name = self.name + unique_name.generate("_deepcopy")
        memo[id(self)] = new_varbase
        new_varbase.copy_(self, True)
        return new_varbase

298 299 300
    @property
    def block(self):
        return framework.default_main_program().global_block()
301

302 303 304 305 306 307 308 309 310 311 312 313
    def __nonzero__(self):
        numel = np.prod(self.shape)
        assert numel == 1, "When Variable is used as the condition of if/while , Variable can only contain one element."
        tensor = self.value().get_tensor()
        assert tensor._is_initialized(), "tensor not initialized"
        return bool(np.all(tensor.__array__() > 0))

    def __bool__(self):
        return self.__nonzero__()

    for method_name, method in (
        ("__bool__", __bool__), ("__nonzero__", __nonzero__),
314
        ("_to_static_var", _to_static_var), ("set_value", set_value),
315 316 317
        ("block", block), ("backward", backward), ("clear_grad", clear_grad),
        ("inplace_version", inplace_version), ("grad", grad),
        ("gradient", gradient), ("__str__", __str__), ("__repr__", __str__),
318 319
        ("__deepcopy__", __deepcopy__), ("__module__", "paddle"),
        ("__name__", "Tensor")):
320
        setattr(core.VarBase, method_name, method)
321 322 323

    # patch math methods for varbase
    monkey_patch_math_varbase()