diff --git a/python/paddle/distribution/distribution.py b/python/paddle/distribution/distribution.py index 15ee5d8e011e4ec09c14e4d5087ba9445d2f7d7e..ae4cb2f9b16ef69d4babbffab2058026d3a62f4e 100644 --- a/python/paddle/distribution/distribution.py +++ b/python/paddle/distribution/distribution.py @@ -203,7 +203,7 @@ class Distribution: dtype = tmp.dtype for arg in numpy_args: arg_broadcasted, _ = np.broadcast_arrays(arg, tmp) - arg_variable = tensor.create_tensor(dtype=dtype) + arg_variable = paddle.tensor.create_tensor(dtype=dtype) tensor.assign(arg_broadcasted, arg_variable) variable_args.append(arg_variable) diff --git a/python/paddle/fluid/layers/tensor.py b/python/paddle/fluid/layers/tensor.py index e131744cd8685ed63d3acd91588a4dd4838aaf2d..6a88b6828fb8511315adc06e9ded2baa861520ec 100644 --- a/python/paddle/fluid/layers/tensor.py +++ b/python/paddle/fluid/layers/tensor.py @@ -47,7 +47,6 @@ from .utils import check_shape from paddle import _C_ops, _legacy_C_ops __all__ = [ - 'create_tensor', 'create_global_var', 'cast', 'tensor_array_to_tensor', @@ -62,48 +61,6 @@ __all__ = [ ] -def create_tensor(dtype, name=None, persistable=False): - """ - Create a variable, which will hold a Tensor with data type dtype. - - Args: - dtype(string|numpy.dtype): the data type of Tensor to be created, the - data type is bool, float16, float32, float64, int8, int16, int32 and int64. - name(string, optional): The default value is None. Normally there is no need for - user to set this property. For more information, please refer to :ref:`api_guide_Name` - persistable(bool): Set the persistable flag of the create tensor. - default value is False. - - Returns: - Variable: The tensor to be created according to dtype. - - Examples: - .. code-block:: python - - import paddle.fluid as fluid - tensor = fluid.layers.create_tensor(dtype='float32') - """ - check_dtype( - dtype, - 'dtype', - [ - 'bool', - 'float16', - 'float32', - 'float64', - 'int8', - 'int32', - 'int32', - 'int64', - ], - 'create_tensor', - ) - helper = LayerHelper("create_tensor", **locals()) - return helper.create_variable( - name=helper.name, dtype=dtype, persistable=persistable - ) - - def create_global_var( shape, value, dtype, persistable=False, force_cpu=False, name=None ): diff --git a/python/paddle/fluid/tests/test_if_else_op.py b/python/paddle/fluid/tests/test_if_else_op.py index 1eba6cbb60ee190ea5fad2791b17cdbd86cb7665..24857164dc30b84ca19c5aa983ec3ff8923b1d4f 100644 --- a/python/paddle/fluid/tests/test_if_else_op.py +++ b/python/paddle/fluid/tests/test_if_else_op.py @@ -46,7 +46,7 @@ class TestMNISTIfElseOp(unittest.TestCase): cond = paddle.less_than(x=label, y=limit) true_image, false_image = split_lod_tensor(input=image, mask=cond) - true_out = layers.create_tensor(dtype='float32') + true_out = paddle.tensor.create_tensor(dtype='float32') true_cond = ConditionalBlock([cond]) with true_cond.block(): @@ -54,7 +54,7 @@ class TestMNISTIfElseOp(unittest.TestCase): prob = layers.fc(input=hidden, size=10, act='softmax') layers.assign(input=prob, output=true_out) - false_out = layers.create_tensor(dtype='float32') + false_out = paddle.tensor.create_tensor(dtype='float32') false_cond = ConditionalBlock([cond]) with false_cond.block(): diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/dist_mnist_gradient_merge.py b/python/paddle/fluid/tests/unittests/collective/fleet/dist_mnist_gradient_merge.py index 1e67d722040c4048b3ff86d7e629c6b039df72f9..85394ea89da4e77b799fe52dfa40ebf6bbfce1aa 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/dist_mnist_gradient_merge.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/dist_mnist_gradient_merge.py @@ -38,7 +38,7 @@ class TestDistMnist2x2(TestDistRunnerBase): avg_cost = paddle.mean(x=cost) # Evaluator - batch_size_tensor = fluid.layers.create_tensor(dtype='int64') + batch_size_tensor = paddle.tensor.create_tensor(dtype='int64') batch_acc = paddle.static.accuracy( input=predict, label=label, total=batch_size_tensor ) diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/pipeline_mnist.py b/python/paddle/fluid/tests/unittests/collective/fleet/pipeline_mnist.py index 59572a5e7deacb552ffd071aaee011c6c8c8277e..4530d8e24216ae11ef3d54a830edfc5f739ee88f 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/pipeline_mnist.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/pipeline_mnist.py @@ -105,7 +105,7 @@ class TestDistMnist2x2(TestDistRunnerBase): # Evaluator with fluid.device_guard("gpu:1"): - batch_size_tensor = fluid.layers.create_tensor(dtype='int64') + batch_size_tensor = paddle.tensor.create_tensor(dtype='int64') batch_acc = paddle.static.accuracy( input=predict, label=label, total=batch_size_tensor ) diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/pipeline_mnist_multi_device.py b/python/paddle/fluid/tests/unittests/collective/fleet/pipeline_mnist_multi_device.py index e60b6bbbcd42803d52bf79e982150f237e3db4e6..0b75b034ce46e13311afe798ef097555b396ade0 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/pipeline_mnist_multi_device.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/pipeline_mnist_multi_device.py @@ -105,7 +105,7 @@ class TestDistMnist2x2(TestDistRunnerBase): # Evaluator with fluid.device_guard("gpu:1"): - batch_size_tensor = fluid.layers.create_tensor(dtype='int64') + batch_size_tensor = paddle.tensor.create_tensor(dtype='int64') batch_acc = paddle.static.accuracy( input=predict, label=label, total=batch_size_tensor ) diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/pipeline_mnist_one_device.py b/python/paddle/fluid/tests/unittests/collective/fleet/pipeline_mnist_one_device.py index 8ecea66aaa6bcb132da7dad018c5aed0139053c7..5b1e590fc005856977ba13776eb6f030c0cca40d 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/pipeline_mnist_one_device.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/pipeline_mnist_one_device.py @@ -97,7 +97,7 @@ class TestDistMnist2x2(TestDistRunnerBase): # Evaluator with fluid.device_guard("gpu:0"): - batch_size_tensor = fluid.layers.create_tensor(dtype='int64') + batch_size_tensor = paddle.tensor.create_tensor(dtype='int64') batch_acc = paddle.static.accuracy( input=predict, label=label, total=batch_size_tensor ) diff --git a/python/paddle/fluid/tests/unittests/dist_allreduce_op.py b/python/paddle/fluid/tests/unittests/dist_allreduce_op.py index bd3a6d659a39eb10f905028ae397ea6545542344..c956f287d7b14978e15c26c5e534b6f55776a53f 100644 --- a/python/paddle/fluid/tests/unittests/dist_allreduce_op.py +++ b/python/paddle/fluid/tests/unittests/dist_allreduce_op.py @@ -81,7 +81,7 @@ class TestDistMnist2x2(TestDistRunnerBase): avg_cost = paddle.mean(x=cost) # Evaluator - batch_size_tensor = fluid.layers.create_tensor(dtype='int64') + batch_size_tensor = paddle.tensor.create_tensor(dtype='int64') batch_acc = paddle.static.accuracy( input=predict, label=label, total=batch_size_tensor ) diff --git a/python/paddle/fluid/tests/unittests/dist_fleet_raw_program_optimizer.py b/python/paddle/fluid/tests/unittests/dist_fleet_raw_program_optimizer.py index 90c1ea16a82c53dee2d577396896103722781690..7c98169433b0b89c2dbeb1fa6bdefe1aad6d7725 100644 --- a/python/paddle/fluid/tests/unittests/dist_fleet_raw_program_optimizer.py +++ b/python/paddle/fluid/tests/unittests/dist_fleet_raw_program_optimizer.py @@ -83,7 +83,7 @@ class TestFleetMetaOptimizerPrecision(TestDistRunnerBase): avg_cost = paddle.mean(x=cost) # Evaluator - batch_size_tensor = fluid.layers.create_tensor(dtype='int64') + batch_size_tensor = paddle.tensor.create_tensor(dtype='int64') batch_acc = paddle.static.accuracy( input=predict, label=label, total=batch_size_tensor ) diff --git a/python/paddle/fluid/tests/unittests/dist_fleet_raw_program_optimizer_fuse_allreduce.py b/python/paddle/fluid/tests/unittests/dist_fleet_raw_program_optimizer_fuse_allreduce.py index 98d7ef1d1569d4a62fd2e3ce95739f56f7b11987..e46173735a96796e3598a45c4a67b5a61881b599 100644 --- a/python/paddle/fluid/tests/unittests/dist_fleet_raw_program_optimizer_fuse_allreduce.py +++ b/python/paddle/fluid/tests/unittests/dist_fleet_raw_program_optimizer_fuse_allreduce.py @@ -83,7 +83,7 @@ class TestFleetMetaOptimizerFuseAllReducePrecision(TestDistRunnerBase): avg_cost = paddle.mean(x=cost) # Evaluator - batch_size_tensor = fluid.layers.create_tensor(dtype='int64') + batch_size_tensor = paddle.tensor.create_tensor(dtype='int64') batch_acc = paddle.static.accuracy( input=predict, label=label, total=batch_size_tensor ) diff --git a/python/paddle/fluid/tests/unittests/dist_mnist.py b/python/paddle/fluid/tests/unittests/dist_mnist.py index 3cecc8b32c0b021a780b3f0d1089aafc563bdaae..819b959a1fa8d32b72d98a1d40f34b8c31be2757 100644 --- a/python/paddle/fluid/tests/unittests/dist_mnist.py +++ b/python/paddle/fluid/tests/unittests/dist_mnist.py @@ -82,7 +82,7 @@ class TestDistMnist2x2(TestDistRunnerBase): avg_cost = paddle.mean(x=cost) # Evaluator - batch_size_tensor = fluid.layers.create_tensor(dtype='int64') + batch_size_tensor = paddle.tensor.create_tensor(dtype='int64') batch_acc = paddle.static.accuracy( input=predict, label=label, total=batch_size_tensor ) diff --git a/python/paddle/fluid/tests/unittests/dist_mnist_batch_merge.py b/python/paddle/fluid/tests/unittests/dist_mnist_batch_merge.py index 4cda9dd53a7233c01deb8dabdd9ecf354bc26a01..aa963ab012bc05f4bdb3fc2ab58e853afb4833ea 100644 --- a/python/paddle/fluid/tests/unittests/dist_mnist_batch_merge.py +++ b/python/paddle/fluid/tests/unittests/dist_mnist_batch_merge.py @@ -47,7 +47,7 @@ class TestDistMnist2x2(TestDistRunnerBase): avg_cost = paddle.mean(x=cost) # Evaluator - batch_size_tensor = fluid.layers.create_tensor(dtype='int64') + batch_size_tensor = paddle.tensor.create_tensor(dtype='int64') batch_acc = paddle.static.accuracy( input=predict, label=label, total=batch_size_tensor ) diff --git a/python/paddle/fluid/tests/unittests/dist_mnist_fp16_allreduce.py b/python/paddle/fluid/tests/unittests/dist_mnist_fp16_allreduce.py index 53819ca5491d4c8363d2e7ef841a75785f109143..ad0b25e8ea15acf6868059f37d73faa47aae79a2 100644 --- a/python/paddle/fluid/tests/unittests/dist_mnist_fp16_allreduce.py +++ b/python/paddle/fluid/tests/unittests/dist_mnist_fp16_allreduce.py @@ -41,7 +41,7 @@ class TestDistMnist2x2(TestDistRunnerBase): avg_cost = paddle.mean(x=cost) # Evaluator - batch_size_tensor = fluid.layers.create_tensor(dtype='int64') + batch_size_tensor = paddle.tensor.create_tensor(dtype='int64') batch_acc = paddle.static.accuracy( input=predict, label=label, total=batch_size_tensor ) diff --git a/python/paddle/fluid/tests/unittests/dist_mnist_lars.py b/python/paddle/fluid/tests/unittests/dist_mnist_lars.py index 347692afdd0b12328eb361dd23602d821af3a891..b886ad8953461b9984cf2b8e017aa6dac76f408e 100644 --- a/python/paddle/fluid/tests/unittests/dist_mnist_lars.py +++ b/python/paddle/fluid/tests/unittests/dist_mnist_lars.py @@ -38,7 +38,7 @@ class TestDistMnist2x2(TestDistRunnerBase): avg_cost = paddle.mean(x=cost) # Evaluator - batch_size_tensor = fluid.layers.create_tensor(dtype='int64') + batch_size_tensor = paddle.tensor.create_tensor(dtype='int64') batch_acc = paddle.static.accuracy( input=predict, label=label, total=batch_size_tensor ) diff --git a/python/paddle/fluid/tests/unittests/distribution/test_distribution_categorical.py b/python/paddle/fluid/tests/unittests/distribution/test_distribution_categorical.py index 5dfcedcc0c5a63edfb94cc8c76c70480976c2416..91e5b2257674330abf2b2fc07dc8534fa54b20ae 100644 --- a/python/paddle/fluid/tests/unittests/distribution/test_distribution_categorical.py +++ b/python/paddle/fluid/tests/unittests/distribution/test_distribution_categorical.py @@ -20,7 +20,6 @@ from test_distribution import DistributionNumpy import paddle from paddle import fluid from paddle.distribution import Categorical, Distribution, Normal, Uniform -from paddle.fluid import layers np.random.seed(2022) @@ -380,7 +379,7 @@ class DistributionTestError(unittest.TestCase): ) value_npdata = np.array([0.8], dtype="float32") - value_tensor = layers.create_tensor(dtype="float32") + value_tensor = paddle.tensor.create_tensor(dtype="float32") self.assertRaises( NotImplementedError, distribution.log_prob, value_tensor ) diff --git a/python/paddle/fluid/tests/unittests/npu/test_assign_value_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_assign_value_op_npu.py index 402b90bc49bbdf736e2f9ebdde8906569d8181c0..1df24e54a16b32a962c969f00f1f31006a4a4381 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_assign_value_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_assign_value_op_npu.py @@ -93,7 +93,7 @@ class TestAssignApi(unittest.TestCase): def test_assign(self): main_program = fluid.Program() with fluid.program_guard(main_program): - x = layers.create_tensor(dtype=self.dtype) + x = paddle.tensor.create_tensor(dtype=self.dtype) layers.assign(input=self.value, output=x) exe = fluid.Executor(self.place) diff --git a/python/paddle/fluid/tests/unittests/test_assign_value_op.py b/python/paddle/fluid/tests/unittests/test_assign_value_op.py index 7a5128ed2ff15968a3256ad89fc1cfe5750e3191..c0a5554d39b97884117c508d93823f7250cb1934 100644 --- a/python/paddle/fluid/tests/unittests/test_assign_value_op.py +++ b/python/paddle/fluid/tests/unittests/test_assign_value_op.py @@ -83,7 +83,7 @@ class TestAssignApi(unittest.TestCase): def test_assign(self): main_program = fluid.Program() with fluid.program_guard(main_program): - x = layers.create_tensor(dtype=self.dtype) + x = paddle.tensor.create_tensor(dtype=self.dtype) layers.assign(input=self.value, output=x) exe = fluid.Executor(self.place) diff --git a/python/paddle/fluid/tests/unittests/test_conditional_block.py b/python/paddle/fluid/tests/unittests/test_conditional_block.py index 418ae3875998e7acc0f883e660b5e426da71c253..1eaf25dc3487746939ee8520d3348640d543b9fc 100644 --- a/python/paddle/fluid/tests/unittests/test_conditional_block.py +++ b/python/paddle/fluid/tests/unittests/test_conditional_block.py @@ -33,7 +33,7 @@ class ConditionalBlockTest(unittest.TestCase): data = layers.data(name='X', shape=[1], dtype='float32') data.stop_gradient = False cond = ConditionalBlock(inputs=[data]) - out = layers.create_tensor(dtype='float32') + out = paddle.tensor.create_tensor(dtype='float32') with cond.block(): hidden = layers.fc(input=data, size=10) layers.assign(hidden, out) diff --git a/python/paddle/fluid/tests/unittests/test_desc_clone.py b/python/paddle/fluid/tests/unittests/test_desc_clone.py index 7d4f1f0975fc46712343cb475b5a1fe522b5cf2c..477910f53d59d414deb3ee779287ad3dca4cb58f 100644 --- a/python/paddle/fluid/tests/unittests/test_desc_clone.py +++ b/python/paddle/fluid/tests/unittests/test_desc_clone.py @@ -77,7 +77,7 @@ def get_model(batch_size): avg_cost = paddle.mean(x=cost) # Evaluator - batch_size_tensor = fluid.layers.create_tensor(dtype='int64') + batch_size_tensor = paddle.tensor.create_tensor(dtype='int64') batch_acc = paddle.static.accuracy( input=predict, label=label, total=batch_size_tensor ) diff --git a/python/paddle/fluid/tests/unittests/test_fetch_var.py b/python/paddle/fluid/tests/unittests/test_fetch_var.py index 4339813584a909d628a3c83725b96fa14c50f407..3303e30a4f3ad6b18e49d3d4ce9a6913a6fb4717 100644 --- a/python/paddle/fluid/tests/unittests/test_fetch_var.py +++ b/python/paddle/fluid/tests/unittests/test_fetch_var.py @@ -16,6 +16,7 @@ import unittest import numpy as np +import paddle import paddle.fluid as fluid import paddle.fluid.layers as layers @@ -26,7 +27,9 @@ class TestFetchVar(unittest.TestCase): def test_fetch_var(self): self.set_input() - x = layers.create_tensor(dtype="int32", persistable=True, name="x") + x = paddle.tensor.create_tensor( + dtype="int32", persistable=True, name="x" + ) layers.assign(input=self.val, output=x) exe = fluid.Executor(fluid.CPUPlace()) exe.run(fluid.default_main_program(), feed={}, fetch_list=[]) diff --git a/python/paddle/fluid/tests/unittests/test_profiler.py b/python/paddle/fluid/tests/unittests/test_profiler.py index 6b414afbe4a7fb3bed76a1911f1da465aab21944..62d46d4cadc48d46e9fac0d44e6ce1d4ee49cd7a 100644 --- a/python/paddle/fluid/tests/unittests/test_profiler.py +++ b/python/paddle/fluid/tests/unittests/test_profiler.py @@ -59,7 +59,7 @@ class TestProfiler(unittest.TestCase): label = fluid.layers.data(name='y', shape=[1], dtype='int64') cost = fluid.layers.cross_entropy(input=predict, label=label) avg_cost = paddle.mean(cost) - batch_size = fluid.layers.create_tensor(dtype='int64') + batch_size = paddle.tensor.create_tensor(dtype='int64') batch_acc = paddle.static.accuracy( input=predict, label=label, total=batch_size ) diff --git a/python/paddle/fluid/tests/unittests/test_square_error_cost.py b/python/paddle/fluid/tests/unittests/test_square_error_cost.py index 7828f01b02fe61773c63476d605421d7860012ae..afd16a3095738ed97f99f0cf4a2c9bede7a0dec1 100644 --- a/python/paddle/fluid/tests/unittests/test_square_error_cost.py +++ b/python/paddle/fluid/tests/unittests/test_square_error_cost.py @@ -19,7 +19,6 @@ import numpy as np import paddle import paddle.fluid as fluid import paddle.fluid.core as core -import paddle.fluid.layers as layers from paddle.fluid.executor import Executor @@ -31,8 +30,8 @@ class TestSquareErrorCost(unittest.TestCase): sub = input_val - label_val np_result = sub * sub - input_var = layers.create_tensor(dtype="float32", name="input") - label_var = layers.create_tensor(dtype="float32", name="label") + input_var = paddle.tensor.create_tensor(dtype="float32", name="input") + label_var = paddle.tensor.create_tensor(dtype="float32", name="label") output = paddle.nn.functional.square_error_cost( input=input_var, label=label_var ) diff --git a/python/paddle/fluid/tests/unittests/xpu/test_assign_value_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_assign_value_op_xpu.py index 7de6af1b45c3c01878edc706cb47e7f8bdb2aa92..560815cb56bee2eb8481dc3da6196a8e9e57b075 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_assign_value_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_assign_value_op_xpu.py @@ -94,7 +94,7 @@ class TestAssignApi(unittest.TestCase): def test_assign(self): main_program = fluid.Program() with fluid.program_guard(main_program): - x = layers.create_tensor(dtype=self.dtype) + x = paddle.tensor.create_tensor(dtype=self.dtype) layers.assign(input=self.value, output=x) exe = fluid.Executor(self.place) diff --git a/python/paddle/tensor/__init__.py b/python/paddle/tensor/__init__.py index 4c1ec078380506c399fe3866cad752a5dcb9a624..89df1b6ac3b477b2963717b53aa7bd7983fe3b8a 100755 --- a/python/paddle/tensor/__init__.py +++ b/python/paddle/tensor/__init__.py @@ -19,6 +19,8 @@ from .attribute import shape # noqa: F401 from .attribute import real # noqa: F401 from .attribute import imag # noqa: F401 from .attribute import is_floating_point # noqa: F401 +from .creation import create_parameter # noqa: F401 +from .creation import create_tensor # noqa: F401 from .creation import to_tensor # noqa: F401 from .creation import diag # noqa: F401 from .creation import diagflat # noqa: F401 @@ -289,6 +291,8 @@ from .einsum import einsum # noqa: F401 # this list used in math_op_patch.py for _binary_creator_ tensor_method_func = [ # noqa + 'create_parameter', + 'create_tensor', 'matmul', 'dot', 'cov', diff --git a/python/paddle/tensor/creation.py b/python/paddle/tensor/creation.py index c969ee3639bf93d62ae8bba8a9b6b6a7c1cbb147..134e27eef9df6e82ced105cbb0d443885ee7a301 100644 --- a/python/paddle/tensor/creation.py +++ b/python/paddle/tensor/creation.py @@ -100,7 +100,7 @@ def create_parameter( import paddle paddle.enable_static() - W = paddle.static.create_parameter(shape=[784, 200], dtype='float32') + W = paddle.create_parameter(shape=[784, 200], dtype='float32') """ check_type(shape, 'shape', (list, tuple, np.ndarray), 'create_parameter') for item in shape: @@ -150,6 +150,48 @@ def create_parameter( ) +def create_tensor(dtype, name=None, persistable=False): + """ + Create a variable, which will hold a Tensor with data type dtype. + + Args: + dtype(string|numpy.dtype): the data type of Tensor to be created, the + data type is bool, float16, float32, float64, int8, int16, int32 and int64. + name(string, optional): The default value is None. Normally there is no need for + user to set this property. For more information, please refer to :ref:`api_guide_Name` + persistable(bool): Set the persistable flag of the create tensor. + default value is False. + + Returns: + Variable: The tensor to be created according to dtype. + + Examples: + .. code-block:: python + + import paddle + tensor = paddle.tensor.create_tensor(dtype='float32') + """ + check_dtype( + dtype, + 'dtype', + [ + 'bool', + 'float16', + 'float32', + 'float64', + 'int8', + 'int32', + 'int32', + 'int64', + ], + 'create_tensor', + ) + helper = LayerHelper("create_tensor", **locals()) + return helper.create_variable( + name=helper.name, dtype=dtype, persistable=persistable + ) + + def linspace(start, stop, num, dtype=None, name=None): r""" Return fixed number of evenly spaced values within a given interval.