未验证 提交 d8c9f19a 编写于 作者: C cyber-pioneer 提交者: GitHub

move paddle.fluid.layers.tensor.create_tensor to paddle.tensor.creation.create_tensor (#48662)

上级 6cdaa371
......@@ -203,7 +203,7 @@ class Distribution:
dtype = tmp.dtype
for arg in numpy_args:
arg_broadcasted, _ = np.broadcast_arrays(arg, tmp)
arg_variable = tensor.create_tensor(dtype=dtype)
arg_variable = paddle.tensor.create_tensor(dtype=dtype)
tensor.assign(arg_broadcasted, arg_variable)
variable_args.append(arg_variable)
......
......@@ -47,7 +47,6 @@ from .utils import check_shape
from paddle import _C_ops, _legacy_C_ops
__all__ = [
'create_tensor',
'create_global_var',
'cast',
'tensor_array_to_tensor',
......@@ -62,48 +61,6 @@ __all__ = [
]
def create_tensor(dtype, name=None, persistable=False):
"""
Create a variable, which will hold a Tensor with data type dtype.
Args:
dtype(string|numpy.dtype): the data type of Tensor to be created, the
data type is bool, float16, float32, float64, int8, int16, int32 and int64.
name(string, optional): The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name`
persistable(bool): Set the persistable flag of the create tensor.
default value is False.
Returns:
Variable: The tensor to be created according to dtype.
Examples:
.. code-block:: python
import paddle.fluid as fluid
tensor = fluid.layers.create_tensor(dtype='float32')
"""
check_dtype(
dtype,
'dtype',
[
'bool',
'float16',
'float32',
'float64',
'int8',
'int32',
'int32',
'int64',
],
'create_tensor',
)
helper = LayerHelper("create_tensor", **locals())
return helper.create_variable(
name=helper.name, dtype=dtype, persistable=persistable
)
def create_global_var(
shape, value, dtype, persistable=False, force_cpu=False, name=None
):
......
......@@ -46,7 +46,7 @@ class TestMNISTIfElseOp(unittest.TestCase):
cond = paddle.less_than(x=label, y=limit)
true_image, false_image = split_lod_tensor(input=image, mask=cond)
true_out = layers.create_tensor(dtype='float32')
true_out = paddle.tensor.create_tensor(dtype='float32')
true_cond = ConditionalBlock([cond])
with true_cond.block():
......@@ -54,7 +54,7 @@ class TestMNISTIfElseOp(unittest.TestCase):
prob = layers.fc(input=hidden, size=10, act='softmax')
layers.assign(input=prob, output=true_out)
false_out = layers.create_tensor(dtype='float32')
false_out = paddle.tensor.create_tensor(dtype='float32')
false_cond = ConditionalBlock([cond])
with false_cond.block():
......
......@@ -38,7 +38,7 @@ class TestDistMnist2x2(TestDistRunnerBase):
avg_cost = paddle.mean(x=cost)
# Evaluator
batch_size_tensor = fluid.layers.create_tensor(dtype='int64')
batch_size_tensor = paddle.tensor.create_tensor(dtype='int64')
batch_acc = paddle.static.accuracy(
input=predict, label=label, total=batch_size_tensor
)
......
......@@ -105,7 +105,7 @@ class TestDistMnist2x2(TestDistRunnerBase):
# Evaluator
with fluid.device_guard("gpu:1"):
batch_size_tensor = fluid.layers.create_tensor(dtype='int64')
batch_size_tensor = paddle.tensor.create_tensor(dtype='int64')
batch_acc = paddle.static.accuracy(
input=predict, label=label, total=batch_size_tensor
)
......
......@@ -105,7 +105,7 @@ class TestDistMnist2x2(TestDistRunnerBase):
# Evaluator
with fluid.device_guard("gpu:1"):
batch_size_tensor = fluid.layers.create_tensor(dtype='int64')
batch_size_tensor = paddle.tensor.create_tensor(dtype='int64')
batch_acc = paddle.static.accuracy(
input=predict, label=label, total=batch_size_tensor
)
......
......@@ -97,7 +97,7 @@ class TestDistMnist2x2(TestDistRunnerBase):
# Evaluator
with fluid.device_guard("gpu:0"):
batch_size_tensor = fluid.layers.create_tensor(dtype='int64')
batch_size_tensor = paddle.tensor.create_tensor(dtype='int64')
batch_acc = paddle.static.accuracy(
input=predict, label=label, total=batch_size_tensor
)
......
......@@ -81,7 +81,7 @@ class TestDistMnist2x2(TestDistRunnerBase):
avg_cost = paddle.mean(x=cost)
# Evaluator
batch_size_tensor = fluid.layers.create_tensor(dtype='int64')
batch_size_tensor = paddle.tensor.create_tensor(dtype='int64')
batch_acc = paddle.static.accuracy(
input=predict, label=label, total=batch_size_tensor
)
......
......@@ -83,7 +83,7 @@ class TestFleetMetaOptimizerPrecision(TestDistRunnerBase):
avg_cost = paddle.mean(x=cost)
# Evaluator
batch_size_tensor = fluid.layers.create_tensor(dtype='int64')
batch_size_tensor = paddle.tensor.create_tensor(dtype='int64')
batch_acc = paddle.static.accuracy(
input=predict, label=label, total=batch_size_tensor
)
......
......@@ -83,7 +83,7 @@ class TestFleetMetaOptimizerFuseAllReducePrecision(TestDistRunnerBase):
avg_cost = paddle.mean(x=cost)
# Evaluator
batch_size_tensor = fluid.layers.create_tensor(dtype='int64')
batch_size_tensor = paddle.tensor.create_tensor(dtype='int64')
batch_acc = paddle.static.accuracy(
input=predict, label=label, total=batch_size_tensor
)
......
......@@ -82,7 +82,7 @@ class TestDistMnist2x2(TestDistRunnerBase):
avg_cost = paddle.mean(x=cost)
# Evaluator
batch_size_tensor = fluid.layers.create_tensor(dtype='int64')
batch_size_tensor = paddle.tensor.create_tensor(dtype='int64')
batch_acc = paddle.static.accuracy(
input=predict, label=label, total=batch_size_tensor
)
......
......@@ -47,7 +47,7 @@ class TestDistMnist2x2(TestDistRunnerBase):
avg_cost = paddle.mean(x=cost)
# Evaluator
batch_size_tensor = fluid.layers.create_tensor(dtype='int64')
batch_size_tensor = paddle.tensor.create_tensor(dtype='int64')
batch_acc = paddle.static.accuracy(
input=predict, label=label, total=batch_size_tensor
)
......
......@@ -41,7 +41,7 @@ class TestDistMnist2x2(TestDistRunnerBase):
avg_cost = paddle.mean(x=cost)
# Evaluator
batch_size_tensor = fluid.layers.create_tensor(dtype='int64')
batch_size_tensor = paddle.tensor.create_tensor(dtype='int64')
batch_acc = paddle.static.accuracy(
input=predict, label=label, total=batch_size_tensor
)
......
......@@ -38,7 +38,7 @@ class TestDistMnist2x2(TestDistRunnerBase):
avg_cost = paddle.mean(x=cost)
# Evaluator
batch_size_tensor = fluid.layers.create_tensor(dtype='int64')
batch_size_tensor = paddle.tensor.create_tensor(dtype='int64')
batch_acc = paddle.static.accuracy(
input=predict, label=label, total=batch_size_tensor
)
......
......@@ -20,7 +20,6 @@ from test_distribution import DistributionNumpy
import paddle
from paddle import fluid
from paddle.distribution import Categorical, Distribution, Normal, Uniform
from paddle.fluid import layers
np.random.seed(2022)
......@@ -380,7 +379,7 @@ class DistributionTestError(unittest.TestCase):
)
value_npdata = np.array([0.8], dtype="float32")
value_tensor = layers.create_tensor(dtype="float32")
value_tensor = paddle.tensor.create_tensor(dtype="float32")
self.assertRaises(
NotImplementedError, distribution.log_prob, value_tensor
)
......
......@@ -93,7 +93,7 @@ class TestAssignApi(unittest.TestCase):
def test_assign(self):
main_program = fluid.Program()
with fluid.program_guard(main_program):
x = layers.create_tensor(dtype=self.dtype)
x = paddle.tensor.create_tensor(dtype=self.dtype)
layers.assign(input=self.value, output=x)
exe = fluid.Executor(self.place)
......
......@@ -83,7 +83,7 @@ class TestAssignApi(unittest.TestCase):
def test_assign(self):
main_program = fluid.Program()
with fluid.program_guard(main_program):
x = layers.create_tensor(dtype=self.dtype)
x = paddle.tensor.create_tensor(dtype=self.dtype)
layers.assign(input=self.value, output=x)
exe = fluid.Executor(self.place)
......
......@@ -33,7 +33,7 @@ class ConditionalBlockTest(unittest.TestCase):
data = layers.data(name='X', shape=[1], dtype='float32')
data.stop_gradient = False
cond = ConditionalBlock(inputs=[data])
out = layers.create_tensor(dtype='float32')
out = paddle.tensor.create_tensor(dtype='float32')
with cond.block():
hidden = layers.fc(input=data, size=10)
layers.assign(hidden, out)
......
......@@ -77,7 +77,7 @@ def get_model(batch_size):
avg_cost = paddle.mean(x=cost)
# Evaluator
batch_size_tensor = fluid.layers.create_tensor(dtype='int64')
batch_size_tensor = paddle.tensor.create_tensor(dtype='int64')
batch_acc = paddle.static.accuracy(
input=predict, label=label, total=batch_size_tensor
)
......
......@@ -16,6 +16,7 @@ import unittest
import numpy as np
import paddle
import paddle.fluid as fluid
import paddle.fluid.layers as layers
......@@ -26,7 +27,9 @@ class TestFetchVar(unittest.TestCase):
def test_fetch_var(self):
self.set_input()
x = layers.create_tensor(dtype="int32", persistable=True, name="x")
x = paddle.tensor.create_tensor(
dtype="int32", persistable=True, name="x"
)
layers.assign(input=self.val, output=x)
exe = fluid.Executor(fluid.CPUPlace())
exe.run(fluid.default_main_program(), feed={}, fetch_list=[])
......
......@@ -59,7 +59,7 @@ class TestProfiler(unittest.TestCase):
label = fluid.layers.data(name='y', shape=[1], dtype='int64')
cost = fluid.layers.cross_entropy(input=predict, label=label)
avg_cost = paddle.mean(cost)
batch_size = fluid.layers.create_tensor(dtype='int64')
batch_size = paddle.tensor.create_tensor(dtype='int64')
batch_acc = paddle.static.accuracy(
input=predict, label=label, total=batch_size
)
......
......@@ -19,7 +19,6 @@ import numpy as np
import paddle
import paddle.fluid as fluid
import paddle.fluid.core as core
import paddle.fluid.layers as layers
from paddle.fluid.executor import Executor
......@@ -31,8 +30,8 @@ class TestSquareErrorCost(unittest.TestCase):
sub = input_val - label_val
np_result = sub * sub
input_var = layers.create_tensor(dtype="float32", name="input")
label_var = layers.create_tensor(dtype="float32", name="label")
input_var = paddle.tensor.create_tensor(dtype="float32", name="input")
label_var = paddle.tensor.create_tensor(dtype="float32", name="label")
output = paddle.nn.functional.square_error_cost(
input=input_var, label=label_var
)
......
......@@ -94,7 +94,7 @@ class TestAssignApi(unittest.TestCase):
def test_assign(self):
main_program = fluid.Program()
with fluid.program_guard(main_program):
x = layers.create_tensor(dtype=self.dtype)
x = paddle.tensor.create_tensor(dtype=self.dtype)
layers.assign(input=self.value, output=x)
exe = fluid.Executor(self.place)
......
......@@ -19,6 +19,8 @@ from .attribute import shape # noqa: F401
from .attribute import real # noqa: F401
from .attribute import imag # noqa: F401
from .attribute import is_floating_point # noqa: F401
from .creation import create_parameter # noqa: F401
from .creation import create_tensor # noqa: F401
from .creation import to_tensor # noqa: F401
from .creation import diag # noqa: F401
from .creation import diagflat # noqa: F401
......@@ -289,6 +291,8 @@ from .einsum import einsum # noqa: F401
# this list used in math_op_patch.py for _binary_creator_
tensor_method_func = [ # noqa
'create_parameter',
'create_tensor',
'matmul',
'dot',
'cov',
......
......@@ -100,7 +100,7 @@ def create_parameter(
import paddle
paddle.enable_static()
W = paddle.static.create_parameter(shape=[784, 200], dtype='float32')
W = paddle.create_parameter(shape=[784, 200], dtype='float32')
"""
check_type(shape, 'shape', (list, tuple, np.ndarray), 'create_parameter')
for item in shape:
......@@ -150,6 +150,48 @@ def create_parameter(
)
def create_tensor(dtype, name=None, persistable=False):
"""
Create a variable, which will hold a Tensor with data type dtype.
Args:
dtype(string|numpy.dtype): the data type of Tensor to be created, the
data type is bool, float16, float32, float64, int8, int16, int32 and int64.
name(string, optional): The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name`
persistable(bool): Set the persistable flag of the create tensor.
default value is False.
Returns:
Variable: The tensor to be created according to dtype.
Examples:
.. code-block:: python
import paddle
tensor = paddle.tensor.create_tensor(dtype='float32')
"""
check_dtype(
dtype,
'dtype',
[
'bool',
'float16',
'float32',
'float64',
'int8',
'int32',
'int32',
'int64',
],
'create_tensor',
)
helper = LayerHelper("create_tensor", **locals())
return helper.create_variable(
name=helper.name, dtype=dtype, persistable=persistable
)
def linspace(start, stop, num, dtype=None, name=None):
r"""
Return fixed number of evenly spaced values within a given interval.
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册