input.py 13.1 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15
import paddle
16
from paddle.fluid import Variable, core
17
from paddle.fluid.data_feeder import check_type
18 19
from paddle.fluid.framework import convert_np_dtype_to_dtype_, static_only
from paddle.fluid.layer_helper import LayerHelper
20

21
from ..fluid.variable_index import _setitem_impl_, _setitem_static
22

23 24
__all__ = []

25

26
@static_only
S
ShenLiang 已提交
27 28 29 30 31 32 33 34 35 36 37 38 39
def data(name, shape, dtype=None, lod_level=0):
    """

    This function creates a variable on the global block. The global variable
    can be accessed by all the following operators in the graph. The variable
    is a placeholder that could be fed with input, such as Executor can feed
    input into the variable. When `dtype` is None, the dtype
    will get from the global dtype by `paddle.get_default_dtype()`.

    Args:
       name (str): The name/alias of the variable, see :ref:`api_guide_Name`
           for more details.
       shape (list|tuple): List|Tuple of integers declaring the shape. You can
40 41
           set None or -1 at a dimension to indicate the dimension can be of any
           size. For example, it is useful to set changeable batch size as None or -1.
S
ShenLiang 已提交
42 43
       dtype (np.dtype|str, optional): The type of the data. Supported
           dtype: bool, float16, float32, float64, int8, int16, int32, int64,
44
           uint8. Default: None. When `dtype` is not set, the dtype will get
S
ShenLiang 已提交
45 46
           from the global dtype by `paddle.get_default_dtype()`.
       lod_level (int, optional): The LoD level of the LoDTensor. Usually users
47
           don't have to set this value. Default: 0.
S
ShenLiang 已提交
48 49 50 51 52 53 54 55 56

    Returns:
        Variable: The global variable that gives access to the data.

    Examples:
        .. code-block:: python

          import numpy as np
          import paddle
57
          paddle.enable_static()
S
ShenLiang 已提交
58 59 60 61

          # Creates a variable with fixed size [3, 2, 1]
          # User can only feed data of the same shape to x
          # the dtype is not set, so it will set "float32" by
62
          # paddle.get_default_dtype(). You can use paddle.get_default_dtype() to
S
ShenLiang 已提交
63 64 65 66 67 68 69 70 71 72 73 74 75 76
          # change the global dtype
          x = paddle.static.data(name='x', shape=[3, 2, 1])

          # Creates a variable with changeable batch size -1.
          # Users can feed data of any batch size into y,
          # but size of each data sample has to be [2, 1]
          y = paddle.static.data(name='y', shape=[-1, 2, 1], dtype='float32')

          z = x + y

          # In this example, we will feed x and y with np-ndarray "1"
          # and fetch z, like implementing "1 + 1 = 2" in PaddlePaddle
          feed_data = np.ones(shape=[3, 2, 1], dtype=np.float32)

77 78
          exe = paddle.static.Executor(paddle.framework.CPUPlace())
          out = exe.run(paddle.static.default_main_program(),
S
ShenLiang 已提交
79 80 81 82 83 84 85 86 87 88 89
                        feed={
                            'x': feed_data,
                            'y': feed_data
                        },
                        fetch_list=[z.name])

          # np-ndarray of shape=[3, 2, 1], dtype=float32, whose elements are 2
          print(out)

    """
    helper = LayerHelper('data', **locals())
90
    check_type(name, 'name', (bytes, str), 'data')
S
ShenLiang 已提交
91 92 93
    check_type(shape, 'shape', (list, tuple), 'data')

    shape = list(shape)
94
    for i in range(len(shape)):
S
ShenLiang 已提交
95 96 97 98 99 100 101 102 103 104 105 106
        if shape[i] is None:
            shape[i] = -1

    if dtype:
        return helper.create_global_variable(
            name=name,
            shape=shape,
            dtype=dtype,
            type=core.VarDesc.VarType.LOD_TENSOR,
            stop_gradient=True,
            lod_level=lod_level,
            is_data=True,
107 108
            need_check_feed=True,
        )
S
ShenLiang 已提交
109 110 111 112 113 114 115 116 117
    else:
        return helper.create_global_variable(
            name=name,
            shape=shape,
            dtype=paddle.get_default_dtype(),
            type=core.VarDesc.VarType.LOD_TENSOR,
            stop_gradient=True,
            lod_level=lod_level,
            is_data=True,
118 119
            need_check_feed=True,
        )
S
ShenLiang 已提交
120 121


122
class InputSpec:
123
    """
124 125 126 127 128
    InputSpec describes the signature information of the model input, such as ``shape`` , ``dtype`` , ``name`` .

    This interface is often used to specify input tensor information of models in high-level API.
    It's also used to specify the tensor information for each input parameter of the forward function
    decorated by `@paddle.jit.to_static`.
129 130 131 132 133 134

    Args:
        shape (tuple(integers)|list[integers]): List|Tuple of integers
            declaring the shape. You can set "None" or -1 at a dimension
            to indicate the dimension can be of any size. For example,
            it is useful to set changeable batch size as "None" or -1.
S
ShenLiang 已提交
135
        dtype (np.dtype|str, optional): The type of the data. Supported
136 137
            dtype: bool, float16, float32, float64, int8, int16, int32, int64,
            uint8. Default: float32.
138 139
        name (str): The name/alias of the variable, see :ref:`api_guide_Name`
            for more details.
140 141 142 143

    Examples:
        .. code-block:: python

144 145 146 147
            from paddle.static import InputSpec

            input = InputSpec([None, 784], 'float32', 'x')
            label = InputSpec([None, 1], 'int64', 'label')
148

149 150
            print(input)  # InputSpec(shape=(-1, 784), dtype=paddle.float32, name=x)
            print(label)  # InputSpec(shape=(-1, 1), dtype=paddle.int64, name=label)
151 152
    """

153
    def __init__(self, shape, dtype='float32', name=None, stop_gradient=False):
154 155 156 157 158 159
        # replace `None` in shape  with -1
        self.shape = self._verify(shape)
        # convert dtype into united represention
        if dtype is not None:
            if not isinstance(dtype, core.VarDesc.VarType):
                dtype = convert_np_dtype_to_dtype_(dtype)
160 161
        self.dtype = dtype
        self.name = name
162
        self.stop_gradient = stop_gradient
163 164 165 166 167

    def _create_feed_layer(self):
        return data(self.name, shape=self.shape, dtype=self.dtype)

    def __repr__(self):
168 169 170 171 172 173
        return '{}(shape={}, dtype={}, name={}, stop_gradient={})'.format(
            type(self).__name__,
            self.shape,
            self.dtype,
            self.name,
            self.stop_gradient,
174
        )
175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194

    @classmethod
    def from_tensor(cls, tensor, name=None):
        """
        Generates a InputSpec based on the description of input tensor.

        Args:
            tensor(Tensor): the source tensor to generate a InputSpec instance

        Returns:
            A InputSpec instance generated from Tensor.

        Examples:
            .. code-block:: python

                import paddle
                from paddle.static import InputSpec

                paddle.disable_static()

195
                x = paddle.ones([2, 2], dtype="float32")
196
                x_spec = InputSpec.from_tensor(x, name='x')
197
                print(x_spec)  # InputSpec(shape=(2, 2), dtype=paddle.float32, name=x)
198 199

        """
W
wanghuancoder 已提交
200
        if isinstance(tensor, (Variable, core.eager.Tensor)):
201 202 203 204
            return cls(tensor.shape, tensor.dtype, name or tensor.name)
        else:
            raise ValueError(
                "Input `tensor` should be a Tensor, but received {}.".format(
205 206 207
                    type(tensor).__name__
                )
            )
208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227

    @classmethod
    def from_numpy(cls, ndarray, name=None):
        """
        Generates a InputSpec based on the description of input np.ndarray.

        Args:
            tensor(Tensor): the source numpy ndarray to generate a InputSpec instance

        Returns:
            A InputSpec instance generated from Tensor.

        Examples:
            .. code-block:: python

                import numpy as np
                from paddle.static import InputSpec

                x = np.ones([2, 2], np.float32)
                x_spec = InputSpec.from_numpy(x, name='x')
228
                print(x_spec)  # InputSpec(shape=(2, 2), dtype=paddle.float32, name=x)
229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249

        """
        return cls(ndarray.shape, ndarray.dtype, name)

    def batch(self, batch_size):
        """
        Inserts `batch_size` in front of the `shape`.

        Args:
            batch_size(int): the inserted integer value of batch size.

        Returns:
            The original InputSpec instance by inserting `batch_size` in front of `shape`.

        Examples:
            .. code-block:: python

                from paddle.static import InputSpec

                x_spec = InputSpec(shape=[64], dtype='float32', name='x')
                x_spec.batch(4)
250
                print(x_spec) # InputSpec(shape=(4, 64), dtype=paddle.float32, name=x)
251 252 253 254 255

        """
        if isinstance(batch_size, (list, tuple)):
            if len(batch_size) != 1:
                raise ValueError(
256 257 258 259
                    "Length of batch_size: {} shall be 1, but received {}.".format(
                        batch_size, len(batch_size)
                    )
                )
260
            batch_size = batch_size[1]
261
        elif not isinstance(batch_size, int):
262 263
            raise TypeError(
                "type(batch_size) shall be `int`, but received {}.".format(
264 265 266
                    type(batch_size).__name__
                )
            )
267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286

        new_shape = [batch_size] + list(self.shape)
        self.shape = tuple(new_shape)

        return self

    def unbatch(self):
        """
        Removes the first element of `shape`.

        Returns:
            The original InputSpec instance by removing the first element of `shape` .

        Examples:
            .. code-block:: python

                from paddle.static import InputSpec

                x_spec = InputSpec(shape=[4, 64], dtype='float32', name='x')
                x_spec.unbatch()
287
                print(x_spec) # InputSpec(shape=(64,), dtype=paddle.float32, name=x)
288 289 290 291

        """
        if len(self.shape) == 0:
            raise ValueError(
292 293
                "Not support to unbatch a InputSpec when len(shape) == 0."
            )
294 295 296 297 298 299 300 301 302 303

        self.shape = self._verify(self.shape[1:])
        return self

    def _verify(self, shape):
        """
        Verifies the input shape and modifies `None` into `-1`.
        """
        if not isinstance(shape, (list, tuple)):
            raise TypeError(
304 305 306 307
                "Type of `shape` in InputSpec should be one of (tuple, list), but received {}.".format(
                    type(shape).__name__
                )
            )
308 309 310

        for i, ele in enumerate(shape):
            if ele is not None:
311
                if not isinstance(ele, int):
312
                    raise ValueError(
313 314 315 316
                        "shape[{}] should be an `int`, but received `{}`:{}.".format(
                            i, type(ele).__name__, ele
                        )
                    )
317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336
            if ele is None or ele < -1:
                shape[i] = -1

        return tuple(shape)

    def __hash__(self):
        # Note(Aurelius84): `name` is not considered as a field to compute hashkey.
        # Because it's no need to generate a new program in following cases while using
        # @paddle.jit.to_static.
        #
        # Case 1:
        #      foo(x_var)
        #      foo(y_var)
        #  x_var and y_var hold same shape and dtype, they should share a same program.
        #
        #
        # Case 2:
        #      foo(x_var)
        #      foo(x_np)  # x_np is a numpy.ndarray.
        #  x_var and x_np hold same shape and dtype, they should also share a same program.
337
        return hash((tuple(self.shape), self.dtype, self.stop_gradient))
338 339

    def __eq__(self, other):
340
        slots = ['shape', 'dtype', 'name', 'stop_gradient']
341 342 343
        return type(self) is type(other) and all(
            getattr(self, attr) == getattr(other, attr) for attr in slots
        )
344 345 346

    def __ne__(self, other):
        return not self == other
347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369


def setitem(x, index, value):
    """
    x(Tensor): input Tensor.
    index(Scalar|Tuple|List|Tensor): Where should be set value.
    value(Scalar|Tensor): The value which is going to be set.

    [How to write index?]
    1. ':' -> slice(),
       (1) a[:]=v -> setitem(a, slice(None,None,None), v)
       (2) a[1::2] -> setitem(a, slice(1,None,2), v)

    2. if there are multiple indexes for axes, use TUPLE (Not LIST) to pack them.
       (1) a[1, 2]=v -> setitem(a, (1, 2), v)
       (2) a[[1,2],[2,3]]=v -> setitem(a, ([1,2],[2,3]), v)
       (3) a[1,:, 3] = v -> setitem(a, (1, slice(None,None,None),3), v)
       (4) a[1, ..., 2]=v -> setitem(a, (1, ..., 2), v)

    3. You can always use TUPLE as index input, even there is only one index.
       (1) a[Tensor([10,10])]=v -> setitem(a, (Tensor([10,10]),), v)
       (2) a[1] = v -> setitem(a, (1,), v)
    """
370 371 372 373 374
    if core.is_compiled_with_xpu():
        # (NOTE): Currently, there is no index_put_xpu kernel.
        return _setitem_impl_(x, index, value)
    else:
        return _setitem_static(x, index, value)