未验证 提交 bdc2c2db 编写于 作者: W wangchaochaohu 提交者: GitHub

full Op:remove device, out and stop_gradient parameter for API 2.0 test=develop (#25257)

上级 548cdbc5
...@@ -620,7 +620,7 @@ def assign(input, output=None): ...@@ -620,7 +620,7 @@ def assign(input, output=None):
return output return output
def fill_constant(shape, dtype, value, force_cpu=False, out=None): def fill_constant(shape, dtype, value, force_cpu=False, out=None, name=None):
""" """
:alias_main: paddle.fill_constant :alias_main: paddle.fill_constant
:alias: paddle.fill_constant,paddle.tensor.fill_constant,paddle.tensor.creation.fill_constant :alias: paddle.fill_constant,paddle.tensor.fill_constant,paddle.tensor.creation.fill_constant
...@@ -638,12 +638,14 @@ def fill_constant(shape, dtype, value, force_cpu=False, out=None): ...@@ -638,12 +638,14 @@ def fill_constant(shape, dtype, value, force_cpu=False, out=None):
If ``shape`` is an Variable, it should be an 1-D Tensor . If ``shape`` is an Variable, it should be an 1-D Tensor .
dtype(np.dtype|core.VarDesc.VarType|str): Data type of the output tensor which can dtype(np.dtype|core.VarDesc.VarType|str): Data type of the output tensor which can
be float16, float32, float64, int32, int64. be float16, float32, float64, int32, int64.
value(float16|float32|float64|int32|int64|Variable): The constant value used to initialize value(bool|float|int|Variable): The constant value used to initialize
the Tensor to be created. If value is an Variable, it should be an 1-D Tensor. the Tensor to be created. If value is an Variable, it should be an 1-D Tensor.
force_cpu(bool): data should be on CPU if it's true, default value is False. force_cpu(bool): data should be on CPU if it's true, default value is False.
out(Variable, optional): Optional output which can be any created out(Variable, optional): Optional output which can be any created
Variable that meets the requirements to store the result of operation. Variable that meets the requirements to store the result of operation.
if out is None, a new Varibale will be create to store the result. if out is None, a new Varibale will be create to store the result.
name(str, optional): The default value is None. Normally there is no need for user to set this
property. For more information, please refer to :ref:`api_guide_Name`.
Returns: Returns:
Variable: Tensor which is created according to shape and dtype. Variable: Tensor which is created according to shape and dtype.
...@@ -666,19 +668,16 @@ def fill_constant(shape, dtype, value, force_cpu=False, out=None): ...@@ -666,19 +668,16 @@ def fill_constant(shape, dtype, value, force_cpu=False, out=None):
data3 = fluid.layers.fill_constant(shape=[1, positive_2], dtype='float32', value=1.5) # data3=[1.5, 1.5] data3 = fluid.layers.fill_constant(shape=[1, positive_2], dtype='float32', value=1.5) # data3=[1.5, 1.5]
# attr shape is an Variable Tensor. # attr shape is an Variable Tensor.
shape = fluid.layers.fill_constant([1,2], "int32", 2) # shape=[2,2] shape = fluid.layers.fill_constant([2], "int32", 2) # shape=[2,2]
data4 = fluid.layers.fill_constant(shape=shape, dtype='bool', value=True) # data4=[[True,True],[True,True]] data4 = fluid.layers.fill_constant(shape=shape, dtype='bool', value=True) # data4=[[True,True],[True,True]]
# attr value is an Variable Tensor. # attr value is an Variable Tensor.
val = fluid.layers.fill_constant([1], "float32", 2.0) # val=[2.0] val = fluid.layers.fill_constant([1], "float32", 2.0) # val=[2.0]
data5 = fluid.layers.fill_constant(shape=[2,1], value=val, dtype='float32') #data5=[[2.0],[2.0]] data5 = fluid.layers.fill_constant(shape=[2,1], value=val, dtype='float32') #data5=[[2.0],[2.0]]
""" """
inputs = {}
attrs = {'force_cpu': force_cpu} attrs = {'force_cpu': force_cpu}
if isinstance(value, Variable): if not isinstance(value, Variable):
inputs['ValueTensor'] = value
else:
attrs['value'] = float(value)
if convert_dtype(dtype) in ['int64', 'int32']: if convert_dtype(dtype) in ['int64', 'int32']:
attrs['str_value'] = str(int(value)) attrs['str_value'] = str(int(value))
else: else:
...@@ -702,13 +701,19 @@ def fill_constant(shape, dtype, value, force_cpu=False, out=None): ...@@ -702,13 +701,19 @@ def fill_constant(shape, dtype, value, force_cpu=False, out=None):
out.stop_gradient = True out.stop_gradient = True
return out return out
helper = LayerHelper("fill_constant", **locals())
inputs = {}
if isinstance(value, Variable):
inputs['ValueTensor'] = value
check_dtype(dtype, 'dtype', check_dtype(dtype, 'dtype',
['bool', 'float16', 'float32', 'float64', 'int32', 'int64'], ['bool', 'float16', 'float32', 'float64', 'int32', 'int64'],
'fill_constant') 'fill_constant')
check_type(shape, 'shape', (Variable, list, tuple), 'fill_constant') check_type(shape, 'shape', (Variable, list, tuple), 'fill_constant')
if isinstance(shape, Variable): if isinstance(shape, Variable):
check_variable_and_dtype(shape, 'shape', ['int32', 'int64'], check_dtype(shape.dtype, 'shape', ['int32', 'int64'], 'fill_constant')
'fill_constant')
if out is not None: if out is not None:
check_variable_and_dtype(out, 'out', [convert_dtype(dtype)], check_variable_and_dtype(out, 'out', [convert_dtype(dtype)],
'fill_constant') 'fill_constant')
...@@ -1048,7 +1053,7 @@ def ones(shape, dtype, force_cpu=False): ...@@ -1048,7 +1053,7 @@ def ones(shape, dtype, force_cpu=False):
return fill_constant(value=1.0, **locals()) return fill_constant(value=1.0, **locals())
def zeros(shape, dtype, force_cpu=False): def zeros(shape, dtype, force_cpu=False, name=None):
""" """
The OP creates a tensor of specified :attr:`shape` and :attr:`dtype`, and fills it with 0. The OP creates a tensor of specified :attr:`shape` and :attr:`dtype`, and fills it with 0.
Its :attr:`stop_gradient` will be set to True to stop gradient computation. Its :attr:`stop_gradient` will be set to True to stop gradient computation.
...@@ -1060,6 +1065,8 @@ def zeros(shape, dtype, force_cpu=False): ...@@ -1060,6 +1065,8 @@ def zeros(shape, dtype, force_cpu=False):
force_cpu (bool, optional): Whether force to store the output tensor in CPU memory. force_cpu (bool, optional): Whether force to store the output tensor in CPU memory.
If :attr:`force_cpu` is False, the output tensor will be stored in running device memory. If :attr:`force_cpu` is False, the output tensor will be stored in running device memory.
Default: False. Default: False.
name(str, optional): The default value is None. Normally there is no need for user to set this
property. For more information, please refer to :ref:`api_guide_Name`.
Returns: Returns:
Variable: A tensor of data type :attr:`dtype` with shape :attr:`shape` and all elements set to 0. Variable: A tensor of data type :attr:`dtype` with shape :attr:`shape` and all elements set to 0.
...@@ -1070,10 +1077,6 @@ def zeros(shape, dtype, force_cpu=False): ...@@ -1070,10 +1077,6 @@ def zeros(shape, dtype, force_cpu=False):
import paddle.fluid as fluid import paddle.fluid as fluid
data = fluid.layers.zeros(shape=[3, 2], dtype='float32') # [[0., 0.], [0., 0.], [0., 0.]] data = fluid.layers.zeros(shape=[3, 2], dtype='float32') # [[0., 0.], [0., 0.], [0., 0.]]
""" """
check_type(shape, 'shape', (list, tuple), 'zeros')
check_dtype(dtype, 'create data type',
['bool', 'float16', 'float32', 'float64', 'int32', 'int64'],
'zeros')
return fill_constant(value=0.0, **locals()) return fill_constant(value=0.0, **locals())
......
...@@ -22,8 +22,8 @@ import paddle ...@@ -22,8 +22,8 @@ import paddle
import paddle.fluid.core as core import paddle.fluid.core as core
from paddle.fluid.op import Operator from paddle.fluid.op import Operator
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.fluid import compiler, Program, program_guard
import numpy as np import numpy as np
from paddle.fluid import compiler, Program, program_guard
# Situation 1: Attr(shape) is a list(without tensor) # Situation 1: Attr(shape) is a list(without tensor)
...@@ -85,16 +85,14 @@ class TestFillConstantOp4(OpTest): ...@@ -85,16 +85,14 @@ class TestFillConstantOp4(OpTest):
class TestFillConstantOp5(unittest.TestCase): class TestFillConstantOp5(unittest.TestCase):
def test_errors(self): def test_errors(self):
with fluid.program_guard(fluid.Program()): with program_guard(Program()):
data = fluid.data(name="X", shape=[1], dtype="float32") out_np = np.zeros(shape=(1), dtype='float32')
out = paddle.zeros(shape=[1], out=data, dtype="float32") out = paddle.zeros(shape=[1], dtype="float32")
place = fluid.CPUPlace() place = fluid.CPUPlace()
exe = fluid.Executor(place) exe = fluid.Executor(place)
result = exe.run(feed={"X": np.array( result = exe.run(fetch_list=[out])
[0.1], dtype="float32")}, self.assertEqual((result == out_np).all(), True)
fetch_list=[data, out]) with program_guard(Program()):
self.assertEqual(result[0], result[1])
with fluid.program_guard(fluid.Program()):
data = fluid.data(name="X", shape=[1], dtype="float32") data = fluid.data(name="X", shape=[1], dtype="float32")
out = paddle.ones(shape=[1], out=data, dtype="float32") out = paddle.ones(shape=[1], out=data, dtype="float32")
place = fluid.CPUPlace() place = fluid.CPUPlace()
...@@ -389,98 +387,5 @@ class TestFillConstantOpError(unittest.TestCase): ...@@ -389,98 +387,5 @@ class TestFillConstantOpError(unittest.TestCase):
self.assertRaises(TypeError, test_shape_tensor_list_dtype) self.assertRaises(TypeError, test_shape_tensor_list_dtype)
class ApiZerosTest(unittest.TestCase):
def test_out(self):
with fluid.program_guard(fluid.Program()):
zeros = paddle.zeros(shape=[10], dtype="float64")
place = fluid.CPUPlace()
exe = fluid.Executor(place)
result, = exe.run(fetch_list=[zeros])
expected_result = np.zeros(10, dtype="float64")
self.assertEqual((result == expected_result).all(), True)
with fluid.program_guard(fluid.Program()):
zeros = paddle.zeros(shape=[10], dtype="int64")
place = fluid.CPUPlace()
exe = fluid.Executor(place)
result, = exe.run(fetch_list=[zeros])
expected_result = np.zeros(10, dtype="int64")
self.assertEqual((result == expected_result).all(), True)
with fluid.program_guard(fluid.Program()):
zeros = paddle.zeros(shape=[10], dtype="int64", device="cpu")
place = fluid.CPUPlace()
exe = fluid.Executor(place)
result, = exe.run(fetch_list=[zeros])
expected_result = np.zeros(10, dtype="int64")
self.assertEqual((result == expected_result).all(), True)
class ApiOnesTest(unittest.TestCase):
def test_out(self):
with fluid.program_guard(fluid.Program()):
ones = paddle.ones(shape=[10], dtype="float64")
place = fluid.CPUPlace()
exe = fluid.Executor(place)
result, = exe.run(fetch_list=[ones])
expected_result = np.ones(10, dtype="float64")
self.assertEqual((result == expected_result).all(), True)
with fluid.program_guard(fluid.Program()):
ones = paddle.ones(shape=[10], dtype="int64")
place = fluid.CPUPlace()
exe = fluid.Executor(place)
result, = exe.run(fetch_list=[ones])
expected_result = np.ones(10, dtype="int64")
self.assertEqual((result == expected_result).all(), True)
with fluid.program_guard(fluid.Program()):
ones = paddle.ones(shape=[10], dtype="int64", device="cpu")
place = fluid.CPUPlace()
exe = fluid.Executor(place)
result, = exe.run(fetch_list=[ones])
expected_result = np.ones(10, dtype="int64")
self.assertEqual((result == expected_result).all(), True)
class ApiOnesZerosError(unittest.TestCase):
def test_errors(self):
def test_error1():
with fluid.program_guard(fluid.Program()):
ones = paddle.ones(shape=10, dtype="int64", device="opu")
self.assertRaises(ValueError, test_error1)
def test_error2():
with fluid.program_guard(fluid.Program()):
ones = paddle.ones(shape=10, dtype="int64", device="opu")
self.assertRaises(ValueError, test_error2)
def test_error3():
with fluid.program_guard(fluid.Program()):
ones = fluid.layers.ones(shape=10, dtype="int64")
self.assertRaises(TypeError, test_error3)
def test_error4():
with fluid.program_guard(fluid.Program()):
ones = fluid.layers.ones(shape=[10], dtype="int8")
self.assertRaises(TypeError, test_error4)
def test_error5():
with fluid.program_guard(fluid.Program()):
ones = fluid.layers.zeros(shape=10, dtype="int64")
self.assertRaises(TypeError, test_error5)
def test_error6():
with fluid.program_guard(fluid.Program()):
ones = fluid.layers.zeros(shape=[10], dtype="int8")
self.assertRaises(TypeError, test_error6)
if __name__ == "__main__": if __name__ == "__main__":
unittest.main() unittest.main()
...@@ -37,33 +37,19 @@ class TestFullAPI(unittest.TestCase): ...@@ -37,33 +37,19 @@ class TestFullAPI(unittest.TestCase):
shape_tensor_int64 = fluid.data( shape_tensor_int64 = fluid.data(
name="shape_tensor_int64", shape=[2], dtype="int64") name="shape_tensor_int64", shape=[2], dtype="int64")
out_1 = paddle.full( out_1 = paddle.full(shape=[1, 2], dtype="float32", fill_value=1.1)
shape=[1, 2], dtype="float32", fill_value=1.1, device='gpu')
out_2 = paddle.full( out_2 = paddle.full(
shape=[1, positive_2_int32], shape=[1, positive_2_int32], dtype="float32", fill_value=1.1)
dtype="float32",
fill_value=1.1,
device='cpu')
out_3 = paddle.full( out_3 = paddle.full(
shape=[1, positive_2_int64], shape=[1, positive_2_int64], dtype="float32", fill_value=1.1)
dtype="float32",
fill_value=1.1,
device='gpu')
out_4 = paddle.full( out_4 = paddle.full(
shape=shape_tensor_int32, shape=shape_tensor_int32, dtype="float32", fill_value=1.2)
dtype="float32",
fill_value=1.2,
out=out_3)
out_5 = paddle.full( out_5 = paddle.full(
shape=shape_tensor_int64, shape=shape_tensor_int64, dtype="float32", fill_value=1.1)
dtype="float32",
fill_value=1.1,
device='gpu',
stop_gradient=False)
out_6 = paddle.full( out_6 = paddle.full(
shape=shape_tensor_int64, dtype=np.float32, fill_value=1.1) shape=shape_tensor_int64, dtype=np.float32, fill_value=1.1)
...@@ -83,7 +69,7 @@ class TestFullAPI(unittest.TestCase): ...@@ -83,7 +69,7 @@ class TestFullAPI(unittest.TestCase):
assert np.array_equal(res_1, np.full([1, 2], 1.1, dtype="float32")) assert np.array_equal(res_1, np.full([1, 2], 1.1, dtype="float32"))
assert np.array_equal(res_2, np.full([1, 2], 1.1, dtype="float32")) assert np.array_equal(res_2, np.full([1, 2], 1.1, dtype="float32"))
assert np.array_equal(res_3, np.full([1, 2], 1.2, dtype="float32")) assert np.array_equal(res_3, np.full([1, 2], 1.1, dtype="float32"))
assert np.array_equal(res_4, np.full([1, 2], 1.2, dtype="float32")) assert np.array_equal(res_4, np.full([1, 2], 1.2, dtype="float32"))
assert np.array_equal(res_5, np.full([1, 2], 1.1, dtype="float32")) assert np.array_equal(res_5, np.full([1, 2], 1.1, dtype="float32"))
assert np.array_equal(res_6, np.full([1, 2], 1.1, dtype="float32")) assert np.array_equal(res_6, np.full([1, 2], 1.1, dtype="float32"))
...@@ -94,28 +80,11 @@ class TestFullOpError(unittest.TestCase): ...@@ -94,28 +80,11 @@ class TestFullOpError(unittest.TestCase):
def test_errors(self): def test_errors(self):
with program_guard(Program(), Program()): with program_guard(Program(), Program()):
#for ci coverage #for ci coverage
x1 = fluid.layers.data(name='x1', shape=[1], dtype="int16")
x2 = np.random.randn(1, 2).astype('int32')
self.assertRaises( self.assertRaises(
ValueError, paddle.full, shape=[1], fill_value=5, dtype='uint4') ValueError, paddle.full, shape=[1], fill_value=5, dtype='uint4')
self.assertRaises(
TypeError,
paddle.full,
shape=[1],
fill_value=5,
dtype='int32',
out=x2)
self.assertRaises(
TypeError,
paddle.full,
shape=[1],
fill_value=5,
dtype='int16',
out=x1)
# The argument dtype of full must be one of bool, float16, # The argument dtype of full must be one of bool, float16,
#float32, float64, int32 or int64 #float32, float64, int32 or int64
x2 = fluid.layers.data(name='x2', shape=[1], dtype="int32")
self.assertRaises( self.assertRaises(
TypeError, paddle.full, shape=[1], fill_value=5, dtype='uint8') TypeError, paddle.full, shape=[1], fill_value=5, dtype='uint8')
......
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
from op_test import OpTest
import paddle
import paddle.fluid.core as core
from paddle.fluid.op import Operator
import paddle.fluid as fluid
import numpy as np
class ApiOnesTest(unittest.TestCase):
def test_out(self):
with fluid.program_guard(fluid.Program()):
ones = paddle.ones(shape=[10], dtype="float64")
place = fluid.CPUPlace()
exe = fluid.Executor(place)
result, = exe.run(fetch_list=[ones])
expected_result = np.ones(10, dtype="float64")
self.assertEqual((result == expected_result).all(), True)
with fluid.program_guard(fluid.Program()):
ones = paddle.ones(shape=[10], dtype="int64")
place = fluid.CPUPlace()
exe = fluid.Executor(place)
result, = exe.run(fetch_list=[ones])
expected_result = np.ones(10, dtype="int64")
self.assertEqual((result == expected_result).all(), True)
with fluid.program_guard(fluid.Program()):
ones = paddle.ones(shape=[10], dtype="int64", device="cpu")
place = fluid.CPUPlace()
exe = fluid.Executor(place)
result, = exe.run(fetch_list=[ones])
expected_result = np.ones(10, dtype="int64")
self.assertEqual((result == expected_result).all(), True)
class ApiOnesZerosError(unittest.TestCase):
def test_errors(self):
def test_error1():
with fluid.program_guard(fluid.Program()):
ones = paddle.ones(shape=10, dtype="int64", device="opu")
self.assertRaises(ValueError, test_error1)
def test_error2():
with fluid.program_guard(fluid.Program()):
ones = paddle.ones(shape=10, dtype="int64", device="opu")
self.assertRaises(ValueError, test_error2)
def test_error3():
with fluid.program_guard(fluid.Program()):
ones = fluid.layers.ones(shape=10, dtype="int64")
self.assertRaises(TypeError, test_error3)
def test_error4():
with fluid.program_guard(fluid.Program()):
ones = fluid.layers.ones(shape=[10], dtype="int8")
self.assertRaises(TypeError, test_error4)
if __name__ == "__main__":
unittest.main()
...@@ -18,6 +18,7 @@ import unittest ...@@ -18,6 +18,7 @@ import unittest
import numpy as np import numpy as np
from op_test import OpTest from op_test import OpTest
import paddle
import paddle.fluid.core as core import paddle.fluid.core as core
from paddle.fluid.op import Operator from paddle.fluid.op import Operator
import paddle.fluid as fluid import paddle.fluid as fluid
...@@ -33,5 +34,47 @@ class TestZerosOpError(unittest.TestCase): ...@@ -33,5 +34,47 @@ class TestZerosOpError(unittest.TestCase):
self.assertRaises(TypeError, fluid.layers.zeros, shape, dtype) self.assertRaises(TypeError, fluid.layers.zeros, shape, dtype)
class ApiZerosTest(unittest.TestCase):
def test_out(self):
with paddle.program_guard(fluid.Program()):
zeros = paddle.zeros(shape=[10], dtype="float64")
place = fluid.CPUPlace()
exe = fluid.Executor(place)
result, = exe.run(fetch_list=[zeros])
expected_result = np.zeros(10, dtype="float64")
self.assertEqual((result == expected_result).all(), True)
with paddle.program_guard(fluid.Program()):
zeros = paddle.zeros(shape=[10], dtype="int64")
place = fluid.CPUPlace()
exe = fluid.Executor(place)
result, = exe.run(fetch_list=[zeros])
expected_result = np.zeros(10, dtype="int64")
self.assertEqual((result == expected_result).all(), True)
with paddle.program_guard(fluid.Program()):
zeros = paddle.zeros(shape=[10], dtype="int64")
place = fluid.CPUPlace()
exe = fluid.Executor(place)
result, = exe.run(fetch_list=[zeros])
expected_result = np.zeros(10, dtype="int64")
self.assertEqual((result == expected_result).all(), True)
class ApiZerosError(unittest.TestCase):
def test_errors(self):
def test_error1():
with paddle.program_guard(fluid.Program()):
ones = fluid.layers.zeros(shape=10, dtype="int64")
self.assertRaises(TypeError, test_error1)
def test_error2():
with paddle.program_guard(fluid.Program()):
ones = fluid.layers.zeros(shape=[10], dtype="int8")
self.assertRaises(TypeError, test_error2)
if __name__ == "__main__": if __name__ == "__main__":
unittest.main() unittest.main()
...@@ -223,7 +223,7 @@ def ones_like(input, dtype=None, device=None, name=None): ...@@ -223,7 +223,7 @@ def ones_like(input, dtype=None, device=None, name=None):
return out return out
def zeros(shape, dtype, out=None, device=None): def zeros(shape, dtype=None, name=None):
""" """
:alias_main: paddle.zeros :alias_main: paddle.zeros
:alias: paddle.zeros,paddle.tensor.zeros,paddle.tensor.creation.zeros :alias: paddle.zeros,paddle.tensor.zeros,paddle.tensor.creation.zeros
...@@ -232,14 +232,10 @@ def zeros(shape, dtype, out=None, device=None): ...@@ -232,14 +232,10 @@ def zeros(shape, dtype, out=None, device=None):
Args: Args:
shape(tuple|list): Shape of output tensor. shape(tuple|list): Shape of output tensor.
dtype(np.dtype|core.VarDesc.VarType|str): Data type of output tensor, it supports dtype(np.dtype|core.VarDesc.VarType|str, optional): Data type of output tensor, it supports
bool, float16, float32, float64, int32 and int64. bool, float16, float32, float64, int32 and int64. Default: if None, the date type is float32.
out(Variable, optional): Optional output which can be any created name(str, optional): The default value is None. Normally there is no need for user to set this
Variable that meets the requirements to store the result of operation. property. For more information, please refer to :ref:`api_guide_Name`.
if out is None, a new Varibale will be create to store the result.
device(str, optional): Which device to run the operator. The :attr:`device` must be
None,'cpu', 'gpu'. If :attr:`device` is None, it will be choose the device that the user set in
the paddle program. Default value is False.
Returns: Returns:
Variable: A tensor of data type :attr:`dtype` with shape :attr:`shape` and all elements set to 0. Variable: A tensor of data type :attr:`dtype` with shape :attr:`shape` and all elements set to 0.
...@@ -248,21 +244,14 @@ def zeros(shape, dtype, out=None, device=None): ...@@ -248,21 +244,14 @@ def zeros(shape, dtype, out=None, device=None):
.. code-block:: python .. code-block:: python
import paddle import paddle
paddle.enable_imperative() # Now we are in imperative mode
data = paddle.zeros(shape=[3, 2], dtype='float32') # [[0., 0.], [0., 0.], [0., 0.]] data = paddle.zeros(shape=[3, 2], dtype='float32') # [[0., 0.], [0., 0.], [0., 0.]]
data = paddle.zeros(shape=[2, 2], dtype='float32', device='cpu') # [[0., 0.], [0., 0.]] data = paddle.zeros(shape=[2, 2], dtype='int32', name='zeros') # [[0, 0], [0, 0]]
""" """
check_dtype(dtype, 'create data type', if dtype is None:
['bool', 'float16', 'float32', 'float64', 'int32', 'int64'], dtype = 'float32'
'zeros') return fill_constant(value=0.0, shape=shape, dtype=dtype, name=name)
if device is not None:
if device not in ['cpu', 'gpu']:
raise ValueError(
"The value of 'device' in zeros_op must be cpu or gpu, but received %s."
% (device))
with fluid.device_guard(device):
return fill_constant(value=0.0, shape=shape, dtype=dtype, out=out)
return fill_constant(value=0.0, shape=shape, dtype=dtype, out=out)
def zeros_like(input, dtype=None, device=None, name=None): def zeros_like(input, dtype=None, device=None, name=None):
...@@ -398,13 +387,7 @@ def eye(num_rows, ...@@ -398,13 +387,7 @@ def eye(num_rows,
return out return out
def full(shape, def full(shape, fill_value, dtype=None, name=None):
fill_value,
out=None,
dtype=None,
device=None,
stop_gradient=True,
name=None):
""" """
:alias_main: paddle.full :alias_main: paddle.full
:alias: paddle.full,paddle.tensor.full,paddle.tensor.creation.full :alias: paddle.full,paddle.tensor.full,paddle.tensor.creation.full
...@@ -418,17 +401,9 @@ def full(shape, ...@@ -418,17 +401,9 @@ def full(shape,
If ``shape`` is an Variable, it should be an 1-D Tensor . If ``shape`` is an Variable, it should be an 1-D Tensor .
fill_value(bool|float16|float32|float64|int32|int64|Variable): The constant value fill_value(bool|float16|float32|float64|int32|int64|Variable): The constant value
used to initialize the Tensor to be created. If fill_value is an Variable, it must be an 1-D Tensor. used to initialize the Tensor to be created. If fill_value is an Variable, it must be an 1-D Tensor.
out(Variable, optional): Optional output which can be any created
Variable that meets the requirements to store the result of operation.
if out is None, a new Varibale will be create to store the result.
dtype(np.dtype|core.VarDesc.VarType|str, optional): Data type of the output tensor dtype(np.dtype|core.VarDesc.VarType|str, optional): Data type of the output tensor
which can be float16, float32, float64, int32, int64, if dytpe is `None`, the data which can be float16, float32, float64, int32, int64, if dytpe is `None`, the data
type of created tensor is `float32` type of created tensor is `float32`
device(str, optional): On which device to run this Op. The :attr:`device` must be
None, 'cpu' or 'gpu'. If :attr:`device` is None, the device that the user set in
the paddle program will be chosen. Default value is None.
stop_gradient(bool, optional): Indicating if we stop gradient from current(out) Variable,
default value is True.
name(str, optional): The default value is None. Normally there is no need for user to set this name(str, optional): The default value is None. Normally there is no need for user to set this
property. For more information, please refer to :ref:`api_guide_Name`. property. For more information, please refer to :ref:`api_guide_Name`.
...@@ -437,28 +412,26 @@ def full(shape, ...@@ -437,28 +412,26 @@ def full(shape,
Raises: Raises:
TypeError: The `dtype` must be one of None, bool, float16, float32, float64, int32 and int64. TypeError: The `dtype` must be one of None, bool, float16, float32, float64, int32 and int64.
TypeError: The `out` must be a Variable.
TypeError: The `shape` must be one of Variable, list tuple. TypeError: The `shape` must be one of Variable, list tuple.
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle import paddle
import paddle.fluid as fluid
paddle.enable_imperative() # Now we are in imperative mode
data1 = paddle.full(shape=[2,1], fill_value=0, dtype='int64') # data1=[[0],[0]] data1 = paddle.full(shape=[2,1], fill_value=0, dtype='int64') # data1=[[0],[0]]
data2 = paddle.full(shape=[2,1], fill_value=5, dtype='int64', device='gpu') # data2=[[5],[5]]
# attr shape is a list which contains Variable Tensor. # attr shape is a list which contains Variable Tensor.
positive_2 = fluid.layers.fill_constant([1], "int32", 2) positive_2 = paddle.fill_constant([1], "int32", 2)
data3 = paddle.full(shape=[1, positive_2], dtype='float32', fill_value=1.5) # data3=[1.5, 1.5] data3 = paddle.full(shape=[1, positive_2], dtype='float32', fill_value=1.5) # data3=[1.5, 1.5]
# attr shape is an Variable Tensor. # attr shape is an Variable Tensor.
shape = fluid.layers.fill_constant([1,2], "int32", 2) # shape=[2,2] shape = paddle.fill_constant([2], "int32", 2) # shape=[2,2]
data4 = paddle.full(shape=shape, dtype='bool', fill_value=True) # data4=[[True,True],[True,True]] data4 = paddle.full(shape=shape, dtype='bool', fill_value=True) # data4=[[True,True],[True,True]]
# attr value is an Variable Tensor. # attr value is an Variable Tensor.
val = fluid.layers.fill_constant([1], "float32", 2.0) # val=[2.0] val = paddle.fill_constant([1], "float32", 2.0) # val=[2.0]
data5 = paddle.full(shape=[2,1], fill_value=val, dtype='float32') #data5=[[2.0],[2.0]] data5 = paddle.full(shape=[2,1], fill_value=val, dtype='float32') #data5=[[2.0],[2.0]]
""" """
...@@ -467,21 +440,7 @@ def full(shape, ...@@ -467,21 +440,7 @@ def full(shape,
if dtype is None: if dtype is None:
dtype = 'float32' dtype = 'float32'
check_dtype(dtype, 'create data type', return fill_constant(shape=shape, dtype=dtype, value=fill_value, name=name)
['bool', 'float16', 'float32', 'float64', 'int32', 'int64'],
'full')
check_type(shape, 'shape', (Variable, list, tuple), 'full')
if out is not None:
check_type(out, 'out', (Variable), 'full')
if out is None:
out = helper.create_variable_for_type_inference(dtype=dtype)
out.stop_gradient = stop_gradient
with device_guard(device):
out = fill_constant(shape=shape, dtype=dtype, value=fill_value, out=out)
return out
def arange(start, end, step=1, dtype=None, name=None): def arange(start, end, step=1, dtype=None, name=None):
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册