未验证 提交 4952f344 编写于 作者: 2 201716010711 提交者: GitHub

clean fluid task: transfer uniform_random_batch_size_like api (#48270)

上级 43b92b63
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
import numpy as np import numpy as np
from paddle import _C_ops, _legacy_C_ops from paddle import _C_ops, _legacy_C_ops
from paddle.distribution import distribution from paddle.distribution import distribution
from paddle.tensor import random
from paddle.fluid.data_feeder import check_type, convert_dtype from paddle.fluid.data_feeder import check_type, convert_dtype
from paddle.fluid.framework import ( from paddle.fluid.framework import (
_non_static_mode, _non_static_mode,
...@@ -167,7 +168,7 @@ class Uniform(distribution.Distribution): ...@@ -167,7 +168,7 @@ class Uniform(distribution.Distribution):
zero_tmp = tensor.fill_constant_batch_size_like( zero_tmp = tensor.fill_constant_batch_size_like(
self.low + self.high, batch_shape + shape, self.dtype, 0.0 self.low + self.high, batch_shape + shape, self.dtype, 0.0
) )
uniform_random_tmp = nn.uniform_random_batch_size_like( uniform_random_tmp = random.uniform_random_batch_size_like(
zero_tmp, zero_tmp,
zero_tmp.shape, zero_tmp.shape,
dtype=self.dtype, dtype=self.dtype,
......
...@@ -221,9 +221,11 @@ class Uniform(Distribution): ...@@ -221,9 +221,11 @@ class Uniform(Distribution):
zero_tmp = tensor.fill_constant_batch_size_like( zero_tmp = tensor.fill_constant_batch_size_like(
self.low + self.high, batch_shape + shape, self.low.dtype, 0.0 self.low + self.high, batch_shape + shape, self.low.dtype, 0.0
) )
uniform_random_tmp = nn.uniform_random_batch_size_like( uniform_random_tmp = (
paddle.tensor.random.uniform_random_batch_size_like(
zero_tmp, zero_tmp.shape, min=0.0, max=1.0, seed=seed zero_tmp, zero_tmp.shape, min=0.0, max=1.0, seed=seed
) )
)
output = ( output = (
uniform_random_tmp * (zero_tmp + self.high - self.low) uniform_random_tmp * (zero_tmp + self.high - self.low)
+ self.low + self.low
......
...@@ -121,7 +121,6 @@ __all__ = [ ...@@ -121,7 +121,6 @@ __all__ = [
'elementwise_div', 'elementwise_div',
'elementwise_sub', 'elementwise_sub',
'elementwise_mul', 'elementwise_mul',
'uniform_random_batch_size_like',
'gaussian_random', 'gaussian_random',
'sampling_id', 'sampling_id',
'gaussian_random_batch_size_like', 'gaussian_random_batch_size_like',
...@@ -7182,115 +7181,6 @@ def flatten(x, axis=1, name=None): ...@@ -7182,115 +7181,6 @@ def flatten(x, axis=1, name=None):
from paddle.fluid.framework import convert_np_dtype_to_dtype_ from paddle.fluid.framework import convert_np_dtype_to_dtype_
@deprecated(since='1.8.0', update_to="paddle.uniform")
@templatedoc()
def uniform_random_batch_size_like(
input,
shape,
dtype='float32',
input_dim_idx=0,
output_dim_idx=0,
min=-1.0,
max=1.0,
seed=0,
):
"""
This OP initializes a variable with random values sampled from a
uniform distribution in the range [min, max). The input_dim_idx used to get the input dimension value which will be used to resize the output dimension.
.. code-block:: text
*Case 1:
Given:
input =[[0.946741 , 0.1357001 , 0.38086128]] # input.shape=[1,3]
shape=[2,4]
result.shape[output_dim_idx] = input.shape[input_dim_idx],
output_dim_idx = 0,
input_dim_idx = 0,
result.shape[0] = input.shape[0],
then:
result=[[ 0.3443427 , -0.23056602, 0.3477049 , 0.06139076]] # result.shape=[1,4]
*Case 2:
Given:
input =[[0.946741 , 0.1357001 , 0.38086128]] # input.shape=[1,3]
shape=[2,4]
input_dim_idx=1
output_dim_idx=1
result.shape[output_dim_idx] = input.shape[input_dim_idx],
output_dim_idx = 1,
input_dim_idx = 1,
result.shape[1] = input.shape[1],
then:
result=[[-0.23133647, -0.84195036, 0.21441269],
[-0.08774924, 0.25605237, -0.09403259]] # result.shape=[2,3]
Args:
input (Variable): A Tensor. Supported data types: float32, float64.
shape (tuple|list): A python list or python tuple. The shape of the output Tensor, the data type is int.
input_dim_idx (int, optional): An index used to get the input dimension value which will be used to resize the output dimension. Default 0.
output_dim_idx (int, optional): An index used to indicate the specific dimension that will be replaced by corresponding input dimension value. Default 0.
min (float, optional): The lower bound on the range of random values to generate, the min is included in the range. Default -1.0.
max (float, optional): The upper bound on the range of random values to generate, the max is excluded in the range. Default 1.0.
seed (int, optional): Random seed used for generating samples. 0 means use a seed generated by the system.Note that if seed is not 0, this operator will always generate the same random numbers every time.
dtype(np.dtype|core.VarDesc.VarType|str, optional): The data type of output Tensor. Supported data types: float32, float64. Default float32.
Returns:
Variable: A Tensor of the specified shape filled with uniform_random values. The shape of the Tensor is determined by the shape parameter and the specified dimension of the input Tensor.
Examples:
.. code-block:: python
import paddle
import paddle.fluid as fluid
paddle.enable_static()
# example 1:
input = fluid.data(name="input", shape=[1, 3], dtype='float32')
out_1 = fluid.layers.uniform_random_batch_size_like(input, [2, 4]) # out_1.shape=[1, 4]
# example 2:
out_2 = fluid.layers.uniform_random_batch_size_like(input, [2, 4], input_dim_idx=1, output_dim_idx=1) # out_2.shape=[2, 3]
"""
check_variable_and_dtype(
input,
'Input',
("float32", 'float64', "uint16"),
'uniform_random_batch_size_like',
)
check_type(shape, 'shape', (list, tuple), 'uniform_random_batch_size_like')
check_dtype(
dtype,
'dtype',
('float32', 'float64', "uint16"),
'uniform_random_batch_size_like',
)
helper = LayerHelper('uniform_random_batch_size_like', **locals())
out = helper.create_variable_for_type_inference(dtype)
c_dtype = convert_np_dtype_to_dtype_(dtype)
helper.append_op(
type='uniform_random_batch_size_like',
inputs={'Input': input},
outputs={'Out': out},
attrs={
'shape': shape,
'input_dim_idx': input_dim_idx,
'output_dim_idx': output_dim_idx,
'min': min,
'max': max,
'seed': seed,
'dtype': c_dtype,
},
)
return out
@deprecated(since="2.0.0", update_to="paddle.normal") @deprecated(since="2.0.0", update_to="paddle.normal")
@templatedoc() @templatedoc()
def gaussian_random( def gaussian_random(
......
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.tensor import random
import numpy as np import numpy as np
import unittest import unittest
from paddle import _legacy_C_ops from paddle import _legacy_C_ops
...@@ -402,7 +403,7 @@ def calc_gradients(outputs, inputs, no_grad_set): ...@@ -402,7 +403,7 @@ def calc_gradients(outputs, inputs, no_grad_set):
def gradient_penalty(f, real, fake, no_grad_set, cfg): def gradient_penalty(f, real, fake, no_grad_set, cfg):
def _interpolate(a, b): def _interpolate(a, b):
shape = [a.shape[0]] shape = [a.shape[0]]
alpha = fluid.layers.uniform_random_batch_size_like( alpha = random.uniform_random_batch_size_like(
input=a, shape=shape, min=0.1, max=1.0, seed=cfg.seed input=a, shape=shape, min=0.1, max=1.0, seed=cfg.seed
) )
......
...@@ -33,6 +33,7 @@ from paddle.fluid.dygraph import nn ...@@ -33,6 +33,7 @@ from paddle.fluid.dygraph import nn
from paddle.fluid.dygraph import base from paddle.fluid.dygraph import base
from paddle.fluid.dygraph import to_variable from paddle.fluid.dygraph import to_variable
from paddle.fluid.framework import _test_eager_guard from paddle.fluid.framework import _test_eager_guard
from paddle.tensor import random
import paddle.nn.functional as F import paddle.nn.functional as F
...@@ -3555,7 +3556,7 @@ class TestBook(LayerTest): ...@@ -3555,7 +3556,7 @@ class TestBook(LayerTest):
input = self._get_data( input = self._get_data(
name="input", shape=[13, 11], dtype='float32' name="input", shape=[13, 11], dtype='float32'
) )
out = layers.uniform_random_batch_size_like(input, [-1, 11]) out = random.uniform_random_batch_size_like(input, [-1, 11])
return out return out
def make_gaussian_random(self): def make_gaussian_random(self):
......
...@@ -23,6 +23,7 @@ from paddle.fluid.tests.unittests.test_uniform_random_op import ( ...@@ -23,6 +23,7 @@ from paddle.fluid.tests.unittests.test_uniform_random_op import (
output_hist, output_hist,
output_hist_diag, output_hist_diag,
) )
from paddle.tensor import random
class TestUniformRandomOpBF16(OpTest): class TestUniformRandomOpBF16(OpTest):
...@@ -262,7 +263,7 @@ class TestUniformRandomBatchSizeLikeOpBF16API(unittest.TestCase): ...@@ -262,7 +263,7 @@ class TestUniformRandomBatchSizeLikeOpBF16API(unittest.TestCase):
train_program = fluid.Program() train_program = fluid.Program()
with fluid.program_guard(train_program, startup_program): with fluid.program_guard(train_program, startup_program):
input = fluid.data(name="input", shape=[1, 3], dtype='uint16') input = fluid.data(name="input", shape=[1, 3], dtype='uint16')
out_1 = fluid.layers.uniform_random_batch_size_like( out_1 = random.uniform_random_batch_size_like(
input, [2, 4], dtype=np.uint16 input, [2, 4], dtype=np.uint16
) # out_1.shape=[1, 4] ) # out_1.shape=[1, 4]
......
...@@ -25,6 +25,7 @@ from paddle.fluid import Program, program_guard ...@@ -25,6 +25,7 @@ from paddle.fluid import Program, program_guard
from paddle.fluid.framework import _test_eager_guard from paddle.fluid.framework import _test_eager_guard
from test_attribute_var import UnittestBase from test_attribute_var import UnittestBase
from paddle.tensor import random
def output_hist(out): def output_hist(out):
...@@ -481,7 +482,7 @@ class TestUniformRandomBatchSizeLikeOpError(unittest.TestCase): ...@@ -481,7 +482,7 @@ class TestUniformRandomBatchSizeLikeOpError(unittest.TestCase):
x1 = fluid.create_lod_tensor( x1 = fluid.create_lod_tensor(
np.zeros((100, 784)), [[10, 10, 10, 70]], fluid.CPUPlace() np.zeros((100, 784)), [[10, 10, 10, 70]], fluid.CPUPlace()
) )
fluid.layers.uniform_random_batch_size_like(x1) random.uniform_random_batch_size_like(x1)
self.assertRaises(TypeError, test_Variable) self.assertRaises(TypeError, test_Variable)
...@@ -489,7 +490,7 @@ class TestUniformRandomBatchSizeLikeOpError(unittest.TestCase): ...@@ -489,7 +490,7 @@ class TestUniformRandomBatchSizeLikeOpError(unittest.TestCase):
x1 = fluid.layers.data( x1 = fluid.layers.data(
name='x2', shape=[100, 784], dtype='float32' name='x2', shape=[100, 784], dtype='float32'
) )
fluid.layers.uniform_random_batch_size_like(x1, shape="shape") random.uniform_random_batch_size_like(x1, shape="shape")
self.assertRaises(TypeError, test_shape) self.assertRaises(TypeError, test_shape)
...@@ -497,7 +498,7 @@ class TestUniformRandomBatchSizeLikeOpError(unittest.TestCase): ...@@ -497,7 +498,7 @@ class TestUniformRandomBatchSizeLikeOpError(unittest.TestCase):
x2 = fluid.layers.data( x2 = fluid.layers.data(
name='x2', shape=[100, 784], dtype='float32' name='x2', shape=[100, 784], dtype='float32'
) )
fluid.layers.uniform_random_batch_size_like(x2, 'int32') random.uniform_random_batch_size_like(x2, 'int32')
self.assertRaises(TypeError, test_dtype) self.assertRaises(TypeError, test_dtype)
......
...@@ -215,6 +215,101 @@ def multinomial(x, num_samples=1, replacement=False, name=None): ...@@ -215,6 +215,101 @@ def multinomial(x, num_samples=1, replacement=False, name=None):
return out return out
def uniform_random_batch_size_like(
input,
shape,
dtype='float32',
input_dim_idx=0,
output_dim_idx=0,
min=-1.0,
max=1.0,
seed=0,
):
"""
This OP initializes a variable with random values sampled from a
uniform distribution in the range [min, max). The input_dim_idx used to get the input dimension value which will be used to resize the output dimension.
.. code-block:: text
*Case 1:
Given:
input =[[0.946741 , 0.1357001 , 0.38086128]] # input.shape=[1,3]
shape=[2,4]
result.shape[output_dim_idx] = input.shape[input_dim_idx],
output_dim_idx = 0,
input_dim_idx = 0,
result.shape[0] = input.shape[0],
then:
result=[[ 0.3443427 , -0.23056602, 0.3477049 , 0.06139076]] # result.shape=[1,4]
*Case 2:
Given:
input =[[0.946741 , 0.1357001 , 0.38086128]] # input.shape=[1,3]
shape=[2,4]
input_dim_idx=1
output_dim_idx=1
result.shape[output_dim_idx] = input.shape[input_dim_idx],
output_dim_idx = 1,
input_dim_idx = 1,
result.shape[1] = input.shape[1],
then:
result=[[-0.23133647, -0.84195036, 0.21441269],
[-0.08774924, 0.25605237, -0.09403259]] # result.shape=[2,3]
Args:
input (Variable): A Tensor. Supported data types: float32, float64.
shape (tuple|list): A python list or python tuple. The shape of the output Tensor, the data type is int.
input_dim_idx (int, optional): An index used to get the input dimension value which will be used to resize the output dimension. Default 0.
output_dim_idx (int, optional): An index used to indicate the specific dimension that will be replaced by corresponding input dimension value. Default 0.
min (float, optional): The lower bound on the range of random values to generate, the min is included in the range. Default -1.0.
max (float, optional): The upper bound on the range of random values to generate, the max is excluded in the range. Default 1.0.
seed (int, optional): Random seed used for generating samples. 0 means use a seed generated by the system.Note that if seed is not 0, this operator will always generate the same random numbers every time.
dtype(np.dtype|core.VarDesc.VarType|str, optional): The data type of output Tensor. Supported data types: float32, float64. Default float32.
Returns:
Variable: A Tensor of the specified shape filled with uniform_random values. The shape of the Tensor is determined by the shape parameter and the specified dimension of the input Tensor.
Examples:
.. code-block:: python
import paddle
import paddle.fluid as fluid
from paddle.tensor import random
paddle.enable_static()
# example 1:
input = fluid.data(name="input", shape=[1, 3], dtype='float32')
out_1 = random.uniform_random_batch_size_like(input, [2, 4]) # out_1.shape=[1, 4]
# example 2:
out_2 = random.uniform_random_batch_size_like(input, [2, 4], input_dim_idx=1, output_dim_idx=1) # out_2.shape=[2, 3]
"""
check_variable_and_dtype(
input,
'Input',
("float32", 'float64', "uint16"),
'uniform_random_batch_size_like',
)
check_type(shape, 'shape', (list, tuple), 'uniform_random_batch_size_like')
check_dtype(
dtype,
'dtype',
('float32', 'float64', "uint16"),
'uniform_random_batch_size_like',
)
helper = LayerHelper('uniform_random_batch_size_like', **locals())
out = helper.create_variable_for_type_inference(dtype)
c_dtype = convert_np_dtype_to_dtype_(dtype)
helper.append_op(
type='uniform_random_batch_size_like',
inputs={'Input': input},
outputs={'Out': out},
attrs={
'shape': shape,
'input_dim_idx': input_dim_idx,
'output_dim_idx': output_dim_idx,
'min': min,
'max': max,
'seed': seed,
'dtype': c_dtype,
},
)
return out
def gaussian(shape, mean=0.0, std=1.0, dtype=None, name=None): def gaussian(shape, mean=0.0, std=1.0, dtype=None, name=None):
""" """
Returns a Tensor filled with random values sampled from a Gaussian Returns a Tensor filled with random values sampled from a Gaussian
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册