From 4952f344dd573f854e33ef2f067eb437a3565972 Mon Sep 17 00:00:00 2001 From: 201716010711 <87008376+201716010711@users.noreply.github.com> Date: Thu, 24 Nov 2022 18:34:00 +0800 Subject: [PATCH] clean fluid task: transfer uniform_random_batch_size_like api (#48270) --- python/paddle/distribution/uniform.py | 3 +- python/paddle/fluid/layers/distributions.py | 6 +- python/paddle/fluid/layers/nn.py | 110 ------------------ ...perative_star_gan_with_gradient_penalty.py | 3 +- .../fluid/tests/unittests/test_layers.py | 3 +- .../unittests/test_uniform_random_bf16_op.py | 3 +- .../tests/unittests/test_uniform_random_op.py | 7 +- python/paddle/tensor/random.py | 95 +++++++++++++++ 8 files changed, 111 insertions(+), 119 deletions(-) diff --git a/python/paddle/distribution/uniform.py b/python/paddle/distribution/uniform.py index c8f8c40a75..a5013ab988 100644 --- a/python/paddle/distribution/uniform.py +++ b/python/paddle/distribution/uniform.py @@ -15,6 +15,7 @@ import numpy as np from paddle import _C_ops, _legacy_C_ops from paddle.distribution import distribution +from paddle.tensor import random from paddle.fluid.data_feeder import check_type, convert_dtype from paddle.fluid.framework import ( _non_static_mode, @@ -167,7 +168,7 @@ class Uniform(distribution.Distribution): zero_tmp = tensor.fill_constant_batch_size_like( self.low + self.high, batch_shape + shape, self.dtype, 0.0 ) - uniform_random_tmp = nn.uniform_random_batch_size_like( + uniform_random_tmp = random.uniform_random_batch_size_like( zero_tmp, zero_tmp.shape, dtype=self.dtype, diff --git a/python/paddle/fluid/layers/distributions.py b/python/paddle/fluid/layers/distributions.py index 18b7f26713..4d13260c61 100644 --- a/python/paddle/fluid/layers/distributions.py +++ b/python/paddle/fluid/layers/distributions.py @@ -221,8 +221,10 @@ class Uniform(Distribution): zero_tmp = tensor.fill_constant_batch_size_like( self.low + self.high, batch_shape + shape, self.low.dtype, 0.0 ) - uniform_random_tmp = nn.uniform_random_batch_size_like( - zero_tmp, zero_tmp.shape, min=0.0, max=1.0, seed=seed + uniform_random_tmp = ( + paddle.tensor.random.uniform_random_batch_size_like( + zero_tmp, zero_tmp.shape, min=0.0, max=1.0, seed=seed + ) ) output = ( uniform_random_tmp * (zero_tmp + self.high - self.low) diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index d2ac562dfd..215dd0845e 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -121,7 +121,6 @@ __all__ = [ 'elementwise_div', 'elementwise_sub', 'elementwise_mul', - 'uniform_random_batch_size_like', 'gaussian_random', 'sampling_id', 'gaussian_random_batch_size_like', @@ -7182,115 +7181,6 @@ def flatten(x, axis=1, name=None): from paddle.fluid.framework import convert_np_dtype_to_dtype_ -@deprecated(since='1.8.0', update_to="paddle.uniform") -@templatedoc() -def uniform_random_batch_size_like( - input, - shape, - dtype='float32', - input_dim_idx=0, - output_dim_idx=0, - min=-1.0, - max=1.0, - seed=0, -): - """ - This OP initializes a variable with random values sampled from a - uniform distribution in the range [min, max). The input_dim_idx used to get the input dimension value which will be used to resize the output dimension. - - .. code-block:: text - - *Case 1: - - Given: - input =[[0.946741 , 0.1357001 , 0.38086128]] # input.shape=[1,3] - shape=[2,4] - - result.shape[output_dim_idx] = input.shape[input_dim_idx], - output_dim_idx = 0, - input_dim_idx = 0, - result.shape[0] = input.shape[0], - then: - result=[[ 0.3443427 , -0.23056602, 0.3477049 , 0.06139076]] # result.shape=[1,4] - - *Case 2: - - Given: - input =[[0.946741 , 0.1357001 , 0.38086128]] # input.shape=[1,3] - shape=[2,4] - input_dim_idx=1 - output_dim_idx=1 - - result.shape[output_dim_idx] = input.shape[input_dim_idx], - output_dim_idx = 1, - input_dim_idx = 1, - result.shape[1] = input.shape[1], - then: - result=[[-0.23133647, -0.84195036, 0.21441269], - [-0.08774924, 0.25605237, -0.09403259]] # result.shape=[2,3] - Args: - input (Variable): A Tensor. Supported data types: float32, float64. - shape (tuple|list): A python list or python tuple. The shape of the output Tensor, the data type is int. - input_dim_idx (int, optional): An index used to get the input dimension value which will be used to resize the output dimension. Default 0. - output_dim_idx (int, optional): An index used to indicate the specific dimension that will be replaced by corresponding input dimension value. Default 0. - min (float, optional): The lower bound on the range of random values to generate, the min is included in the range. Default -1.0. - max (float, optional): The upper bound on the range of random values to generate, the max is excluded in the range. Default 1.0. - seed (int, optional): Random seed used for generating samples. 0 means use a seed generated by the system.Note that if seed is not 0, this operator will always generate the same random numbers every time. - dtype(np.dtype|core.VarDesc.VarType|str, optional): The data type of output Tensor. Supported data types: float32, float64. Default float32. - Returns: - Variable: A Tensor of the specified shape filled with uniform_random values. The shape of the Tensor is determined by the shape parameter and the specified dimension of the input Tensor. - - Examples: - .. code-block:: python - - import paddle - import paddle.fluid as fluid - paddle.enable_static() - - # example 1: - input = fluid.data(name="input", shape=[1, 3], dtype='float32') - out_1 = fluid.layers.uniform_random_batch_size_like(input, [2, 4]) # out_1.shape=[1, 4] - - # example 2: - out_2 = fluid.layers.uniform_random_batch_size_like(input, [2, 4], input_dim_idx=1, output_dim_idx=1) # out_2.shape=[2, 3] - - - """ - check_variable_and_dtype( - input, - 'Input', - ("float32", 'float64', "uint16"), - 'uniform_random_batch_size_like', - ) - check_type(shape, 'shape', (list, tuple), 'uniform_random_batch_size_like') - check_dtype( - dtype, - 'dtype', - ('float32', 'float64', "uint16"), - 'uniform_random_batch_size_like', - ) - - helper = LayerHelper('uniform_random_batch_size_like', **locals()) - out = helper.create_variable_for_type_inference(dtype) - c_dtype = convert_np_dtype_to_dtype_(dtype) - helper.append_op( - type='uniform_random_batch_size_like', - inputs={'Input': input}, - outputs={'Out': out}, - attrs={ - 'shape': shape, - 'input_dim_idx': input_dim_idx, - 'output_dim_idx': output_dim_idx, - 'min': min, - 'max': max, - 'seed': seed, - 'dtype': c_dtype, - }, - ) - - return out - - @deprecated(since="2.0.0", update_to="paddle.normal") @templatedoc() def gaussian_random( diff --git a/python/paddle/fluid/tests/unittests/test_imperative_star_gan_with_gradient_penalty.py b/python/paddle/fluid/tests/unittests/test_imperative_star_gan_with_gradient_penalty.py index d623a277cf..5d969804a8 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_star_gan_with_gradient_penalty.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_star_gan_with_gradient_penalty.py @@ -14,6 +14,7 @@ import paddle import paddle.fluid as fluid +from paddle.tensor import random import numpy as np import unittest from paddle import _legacy_C_ops @@ -402,7 +403,7 @@ def calc_gradients(outputs, inputs, no_grad_set): def gradient_penalty(f, real, fake, no_grad_set, cfg): def _interpolate(a, b): shape = [a.shape[0]] - alpha = fluid.layers.uniform_random_batch_size_like( + alpha = random.uniform_random_batch_size_like( input=a, shape=shape, min=0.1, max=1.0, seed=cfg.seed ) diff --git a/python/paddle/fluid/tests/unittests/test_layers.py b/python/paddle/fluid/tests/unittests/test_layers.py index 8301a02a2e..00733426b6 100644 --- a/python/paddle/fluid/tests/unittests/test_layers.py +++ b/python/paddle/fluid/tests/unittests/test_layers.py @@ -33,6 +33,7 @@ from paddle.fluid.dygraph import nn from paddle.fluid.dygraph import base from paddle.fluid.dygraph import to_variable from paddle.fluid.framework import _test_eager_guard +from paddle.tensor import random import paddle.nn.functional as F @@ -3555,7 +3556,7 @@ class TestBook(LayerTest): input = self._get_data( name="input", shape=[13, 11], dtype='float32' ) - out = layers.uniform_random_batch_size_like(input, [-1, 11]) + out = random.uniform_random_batch_size_like(input, [-1, 11]) return out def make_gaussian_random(self): diff --git a/python/paddle/fluid/tests/unittests/test_uniform_random_bf16_op.py b/python/paddle/fluid/tests/unittests/test_uniform_random_bf16_op.py index 0977ec69ac..b417789ec0 100644 --- a/python/paddle/fluid/tests/unittests/test_uniform_random_bf16_op.py +++ b/python/paddle/fluid/tests/unittests/test_uniform_random_bf16_op.py @@ -23,6 +23,7 @@ from paddle.fluid.tests.unittests.test_uniform_random_op import ( output_hist, output_hist_diag, ) +from paddle.tensor import random class TestUniformRandomOpBF16(OpTest): @@ -262,7 +263,7 @@ class TestUniformRandomBatchSizeLikeOpBF16API(unittest.TestCase): train_program = fluid.Program() with fluid.program_guard(train_program, startup_program): input = fluid.data(name="input", shape=[1, 3], dtype='uint16') - out_1 = fluid.layers.uniform_random_batch_size_like( + out_1 = random.uniform_random_batch_size_like( input, [2, 4], dtype=np.uint16 ) # out_1.shape=[1, 4] diff --git a/python/paddle/fluid/tests/unittests/test_uniform_random_op.py b/python/paddle/fluid/tests/unittests/test_uniform_random_op.py index efcbf075bf..dbc036cb7e 100644 --- a/python/paddle/fluid/tests/unittests/test_uniform_random_op.py +++ b/python/paddle/fluid/tests/unittests/test_uniform_random_op.py @@ -25,6 +25,7 @@ from paddle.fluid import Program, program_guard from paddle.fluid.framework import _test_eager_guard from test_attribute_var import UnittestBase +from paddle.tensor import random def output_hist(out): @@ -481,7 +482,7 @@ class TestUniformRandomBatchSizeLikeOpError(unittest.TestCase): x1 = fluid.create_lod_tensor( np.zeros((100, 784)), [[10, 10, 10, 70]], fluid.CPUPlace() ) - fluid.layers.uniform_random_batch_size_like(x1) + random.uniform_random_batch_size_like(x1) self.assertRaises(TypeError, test_Variable) @@ -489,7 +490,7 @@ class TestUniformRandomBatchSizeLikeOpError(unittest.TestCase): x1 = fluid.layers.data( name='x2', shape=[100, 784], dtype='float32' ) - fluid.layers.uniform_random_batch_size_like(x1, shape="shape") + random.uniform_random_batch_size_like(x1, shape="shape") self.assertRaises(TypeError, test_shape) @@ -497,7 +498,7 @@ class TestUniformRandomBatchSizeLikeOpError(unittest.TestCase): x2 = fluid.layers.data( name='x2', shape=[100, 784], dtype='float32' ) - fluid.layers.uniform_random_batch_size_like(x2, 'int32') + random.uniform_random_batch_size_like(x2, 'int32') self.assertRaises(TypeError, test_dtype) diff --git a/python/paddle/tensor/random.py b/python/paddle/tensor/random.py index 54e8459661..da7f8b6a28 100644 --- a/python/paddle/tensor/random.py +++ b/python/paddle/tensor/random.py @@ -215,6 +215,101 @@ def multinomial(x, num_samples=1, replacement=False, name=None): return out +def uniform_random_batch_size_like( + input, + shape, + dtype='float32', + input_dim_idx=0, + output_dim_idx=0, + min=-1.0, + max=1.0, + seed=0, +): + """ + This OP initializes a variable with random values sampled from a + uniform distribution in the range [min, max). The input_dim_idx used to get the input dimension value which will be used to resize the output dimension. + .. code-block:: text + *Case 1: + Given: + input =[[0.946741 , 0.1357001 , 0.38086128]] # input.shape=[1,3] + shape=[2,4] + result.shape[output_dim_idx] = input.shape[input_dim_idx], + output_dim_idx = 0, + input_dim_idx = 0, + result.shape[0] = input.shape[0], + then: + result=[[ 0.3443427 , -0.23056602, 0.3477049 , 0.06139076]] # result.shape=[1,4] + *Case 2: + Given: + input =[[0.946741 , 0.1357001 , 0.38086128]] # input.shape=[1,3] + shape=[2,4] + input_dim_idx=1 + output_dim_idx=1 + result.shape[output_dim_idx] = input.shape[input_dim_idx], + output_dim_idx = 1, + input_dim_idx = 1, + result.shape[1] = input.shape[1], + then: + result=[[-0.23133647, -0.84195036, 0.21441269], + [-0.08774924, 0.25605237, -0.09403259]] # result.shape=[2,3] + Args: + input (Variable): A Tensor. Supported data types: float32, float64. + shape (tuple|list): A python list or python tuple. The shape of the output Tensor, the data type is int. + input_dim_idx (int, optional): An index used to get the input dimension value which will be used to resize the output dimension. Default 0. + output_dim_idx (int, optional): An index used to indicate the specific dimension that will be replaced by corresponding input dimension value. Default 0. + min (float, optional): The lower bound on the range of random values to generate, the min is included in the range. Default -1.0. + max (float, optional): The upper bound on the range of random values to generate, the max is excluded in the range. Default 1.0. + seed (int, optional): Random seed used for generating samples. 0 means use a seed generated by the system.Note that if seed is not 0, this operator will always generate the same random numbers every time. + dtype(np.dtype|core.VarDesc.VarType|str, optional): The data type of output Tensor. Supported data types: float32, float64. Default float32. + Returns: + Variable: A Tensor of the specified shape filled with uniform_random values. The shape of the Tensor is determined by the shape parameter and the specified dimension of the input Tensor. + Examples: + .. code-block:: python + import paddle + import paddle.fluid as fluid + from paddle.tensor import random + paddle.enable_static() + # example 1: + input = fluid.data(name="input", shape=[1, 3], dtype='float32') + out_1 = random.uniform_random_batch_size_like(input, [2, 4]) # out_1.shape=[1, 4] + # example 2: + out_2 = random.uniform_random_batch_size_like(input, [2, 4], input_dim_idx=1, output_dim_idx=1) # out_2.shape=[2, 3] + """ + check_variable_and_dtype( + input, + 'Input', + ("float32", 'float64', "uint16"), + 'uniform_random_batch_size_like', + ) + check_type(shape, 'shape', (list, tuple), 'uniform_random_batch_size_like') + check_dtype( + dtype, + 'dtype', + ('float32', 'float64', "uint16"), + 'uniform_random_batch_size_like', + ) + + helper = LayerHelper('uniform_random_batch_size_like', **locals()) + out = helper.create_variable_for_type_inference(dtype) + c_dtype = convert_np_dtype_to_dtype_(dtype) + helper.append_op( + type='uniform_random_batch_size_like', + inputs={'Input': input}, + outputs={'Out': out}, + attrs={ + 'shape': shape, + 'input_dim_idx': input_dim_idx, + 'output_dim_idx': output_dim_idx, + 'min': min, + 'max': max, + 'seed': seed, + 'dtype': c_dtype, + }, + ) + + return out + + def gaussian(shape, mean=0.0, std=1.0, dtype=None, name=None): """ Returns a Tensor filled with random values sampled from a Gaussian -- GitLab