未验证 提交 17babe4d 编写于 作者: W wangchaochaohu 提交者: GitHub

add full op API (#23112)

上级 bfb07aaf
......@@ -52,7 +52,7 @@ import paddle.nn
# from .tensor.creation import zeros_like #DEFINE_ALIAS
# from .tensor.creation import arrange #DEFINE_ALIAS
# from .tensor.creation import eye #DEFINE_ALIAS
# from .tensor.creation import full #DEFINE_ALIAS
from .tensor.creation import full #DEFINE_ALIAS
# from .tensor.creation import linspace #DEFINE_ALIAS
# from .tensor.creation import full_like #DEFINE_ALIAS
# from .tensor.creation import triu #DEFINE_ALIAS
......
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
from op_test import OpTest
import paddle.fluid.core as core
from paddle.fluid.op import Operator
import paddle.fluid as fluid
import paddle.tensor as tensor
from paddle.fluid import compiler, Program, program_guard
# Test python API
class TestFullAPI(unittest.TestCase):
def test_api(self):
positive_2_int32 = fluid.layers.fill_constant([1], "int32", 2)
positive_2_int64 = fluid.layers.fill_constant([1], "int64", 2)
shape_tensor_int32 = fluid.data(
name="shape_tensor_int32", shape=[2], dtype="int32")
shape_tensor_int64 = fluid.data(
name="shape_tensor_int64", shape=[2], dtype="int64")
out_1 = tensor.full(
shape=[1, 2], dtype="float32", fill_value=1.1, device='gpu')
out_2 = tensor.full(
shape=[1, positive_2_int32],
dtype="float32",
fill_value=1.1,
device='cpu')
out_3 = tensor.full(
shape=[1, positive_2_int64],
dtype="float32",
fill_value=1.1,
device='gpu')
out_4 = tensor.full(
shape=shape_tensor_int32,
dtype="float32",
fill_value=1.2,
out=out_3)
out_5 = tensor.full(
shape=shape_tensor_int64,
dtype="float32",
fill_value=1.1,
device='gpu',
stop_gradient=False)
out_6 = tensor.full(
shape=shape_tensor_int64, dtype=np.float32, fill_value=1.1)
exe = fluid.Executor(place=fluid.CPUPlace())
res_1, res_2, res_3, res_4, res_5, res_6 = exe.run(
fluid.default_main_program(),
feed={
"shape_tensor_int32": np.array([1, 2]).astype("int32"),
"shape_tensor_int64": np.array([1, 2]).astype("int64"),
},
fetch_list=[out_1, out_2, out_3, out_4, out_5, out_6])
assert np.array_equal(res_1, np.full([1, 2], 1.1, dtype="float32"))
assert np.array_equal(res_2, np.full([1, 2], 1.1, dtype="float32"))
assert np.array_equal(res_3, np.full([1, 2], 1.2, dtype="float32"))
assert np.array_equal(res_4, np.full([1, 2], 1.2, dtype="float32"))
assert np.array_equal(res_5, np.full([1, 2], 1.1, dtype="float32"))
assert np.array_equal(res_6, np.full([1, 2], 1.1, dtype="float32"))
class TestFullOpError(unittest.TestCase):
def test_errors(self):
with program_guard(Program(), Program()):
#for ci coverage
x1 = fluid.layers.data(name='x1', shape=[1], dtype="int16")
self.assertRaises(
ValueError, tensor.full, shape=[1], fill_value=5, dtype='uint4')
self.assertRaises(
TypeError,
tensor.full,
shape=[1],
fill_value=5,
dtype='int16',
out=x1)
# The argument dtype of full must be one of bool, float16,
#float32, float64, int32 or int64
x2 = fluid.layers.data(name='x2', shape=[1], dtype="int32")
self.assertRaises(
TypeError, tensor.full, shape=[1], fill_value=5, dtype='uint8')
# The argument shape's type of full_op must be list, tuple or Variable.
def test_shape_type():
tensor.full(shape=1, dtype="float32", fill_value=1)
self.assertRaises(TypeError, test_shape_type)
# The argument shape's size of full_op must not be 0.
def test_shape_size():
tensor.full(shape=[], dtype="float32", fill_value=1)
self.assertRaises(AssertionError, test_shape_size)
# The shape dtype of full op must be int32 or int64.
def test_shape_tensor_dtype():
shape = fluid.data(
name="shape_tensor", shape=[2], dtype="float32")
tensor.full(shape=shape, dtype="float32", fill_value=1)
self.assertRaises(TypeError, test_shape_tensor_dtype)
def test_shape_tensor_list_dtype():
shape = fluid.data(
name="shape_tensor_list", shape=[1], dtype="bool")
tensor.full(shape=[shape, 2], dtype="float32", fill_value=1)
self.assertRaises(TypeError, test_shape_tensor_list_dtype)
if __name__ == "__main__":
unittest.main()
......@@ -29,7 +29,7 @@
# from .creation import zeros_like #DEFINE_ALIAS
# from .creation import arrange #DEFINE_ALIAS
# from .creation import eye #DEFINE_ALIAS
# from .creation import full #DEFINE_ALIAS
from .creation import full #DEFINE_ALIAS
# from .creation import linspace #DEFINE_ALIAS
# from .creation import full_like #DEFINE_ALIAS
# from .creation import triu #DEFINE_ALIAS
......
......@@ -13,24 +13,103 @@
# limitations under the License.
# TODO: define functions to get create a tensor
# __all__ = ['create_tensor',
# 'create_lod_tensor',
# 'create_random_int_lodtensor',
# 'crop_tensor',
# 'diag', 'eye',
# 'fill_constant',
# 'get_tensor_from_selected_rows',
# 'linspace',
# 'ones',
# 'ones_like',
# 'range',
# 'zeros',
# 'zeros_like',
# 'arrange',
# 'eye',
# 'full',
# 'linspace',
# 'full_like',
# 'triu',
# 'tril',
# 'meshgrid']
from __future__ import print_function
from ..fluid.framework import Variable
from ..fluid.initializer import Constant
from ..fluid.layer_helper import LayerHelper
from ..fluid.data_feeder import check_variable_and_dtype, check_type, check_dtype, convert_dtype
from ..fluid.framework import convert_np_dtype_to_dtype_, in_dygraph_mode, _varbase_creator, device_guard
from ..fluid.layers import fill_constant
__all__ = [
'create_tensor',
# 'create_lod_tensor',
# 'create_random_int_lodtensor',
# 'crop_tensor',
# 'diag', 'eye',
# 'fill_constant',
# 'get_tensor_from_selected_rows',
# 'linspace',
# 'ones',
# 'ones_like',
# 'range',
# 'zeros',
# 'zeros_like',
# 'arrange',
# 'eye',
'full',
# 'linspace',
# 'full_like',
# 'triu',
# 'tril',
# 'meshgrid'
]
def full(shape,
fill_value,
out=None,
dtype=None,
device=None,
stop_gradient=True,
name=None):
"""
This function return a Tensor with the `fill_value` which size is same as `shape`
Args:
shape(list|tuple|Variable): Shape of the Tensor to be created.
The data type is ``int32`` or ``int64`` . If ``shape`` is a list or tuple,
the elements of it should be integers or Tensors with shape [1].
If ``shape`` is an Variable, it should be an 1-D Tensor .
value(float): The constant value used to initialize the Tensor to be created.
out(Variable, optional): Optional output which can be any created
Variable that meets the requirements to store the result of operation.
if out is None, a new Varibale will be create to store the result.
dtype(np.dtype|core.VarDesc.VarType|str, optional): Data type of the output tensor
which can be float16, float32, float64, int32, int64, if dytpe is `None`, the data
type of created tensor is `float32`
device(str, optional): This parameter specifies that the Tensor is created
on the GPU or CPU.
stop_gradient(bool, optional): Indicating if we stop gradient from current(out) Variable,
default value is True.
name(str, optional): The default value is None. Normally there is no need for user to set this
property. For more information, please refer to :ref:`api_guide_Name`.
Examples:
.. code-block:: python
import paddle.tensor as tensor
import paddle.fluid as fluid
data1 = tensor.full(shape=[2,1], full_value=0, dtype='int64') # data1=[[0],[0]]
data2 = tensor.full(shape=[2,1], full_value=5, dtype='int64', device='gpu') # data2=[[5],[5]]
# attr shape is a list which contains Variable Tensor.
positive_2 = fluid.layers.fill_constant([1], "int32", 2)
data3 = tensor.full(shape=[1, positive_2], dtype='float32', full_value=1.5) # data3=[1.5, 1.5]
# attr shape is an Variable Tensor.
shape = fluid.layers.fill_constant([1,2], "int32", 2) # shape=[2,2]
data4 = tensor.full(shape=shape, dtype='bool', full_value=True) # data4=[[True,True],[True,True]]
"""
helper = LayerHelper("full", **locals())
if dtype is None:
dtype = 'float32'
check_dtype(dtype, 'create data type',
['bool', 'float16', 'float32', 'float64', 'int32', 'int64'],
'full')
check_type(shape, 'shape', (Variable, list, tuple), 'full')
if out is None:
out = helper.create_variable_for_type_inference(dtype=dtype)
out.stop_gradient = stop_gradient
with device_guard(device):
out = fill_constant(shape=shape, dtype=dtype, value=fill_value, out=out)
return out
......@@ -113,6 +113,7 @@ packages=['paddle',
'paddle.reader',
'paddle.distributed',
'paddle.fluid',
'paddle.tensor',
'paddle.fluid.dygraph',
'paddle.fluid.dygraph.dygraph_to_static',
'paddle.fluid.proto',
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册