未验证 提交 c8955d0d 编写于 作者: A Aurelius84 提交者: GitHub

[OpAttr]min/max of uniform_random support Tensor type (#45417)

* [OpAttr]min/max of Uniform_rand support Tensor type

* fix typo
上级 4d78390e
......@@ -182,9 +182,11 @@ uniform distribution. The random result is in set [min, max).
AddAttr<std::vector<int64_t>>("shape", "The shape of the output tensor")
.SetDefault({});
AddAttr<float>("min", "Minimum value of uniform random. [default -1.0].")
.SetDefault(-1.0f);
.SetDefault(-1.0f)
.SupportTensor();
AddAttr<float>("max", "Maximun value of uniform random. [default 1.0].")
.SetDefault(1.0f);
.SetDefault(1.0f)
.SupportTensor();
AddAttr<int>("seed",
"Random seed used for generating samples. "
"0 means use a seed generated by the system."
......
......@@ -2754,11 +2754,11 @@
backward : unfold_grad
- api : uniform_random
args : (IntArray shape, DataType dtype, float min, float max, int seed, Place place={})
args : (IntArray shape, DataType dtype, Scalar min, Scalar max, int seed, Place place={})
output : Tensor(out)
infer_meta :
func : UniformRandomInferMeta
param: [shape, dtype, min, max, seed]
param: [shape, dtype]
kernel :
func : uniform_random
param: [shape, dtype, min, max, seed]
......
......@@ -79,9 +79,6 @@ void RandpermInferMeta(int n, DataType dtype, MetaTensor* out) {
void UniformRandomInferMeta(const IntArray& shape,
DataType dtype,
float min,
float max,
int seed,
MetaTensor* out) {
auto out_dims = phi::make_ddim(shape.GetData());
out->set_dims(out_dims);
......
......@@ -67,9 +67,6 @@ void TruncatedGaussianRandomInferMeta(const std::vector<int>& shape,
void UniformRandomInferMeta(const IntArray& shape,
DataType dtype,
float min,
float max,
int seed,
MetaTensor* out);
void TrilIndicesInferMeta(
......
......@@ -47,8 +47,8 @@ template <typename T, typename Context>
void UniformRandomRawKernel(const Context &dev_ctx,
const IntArray &shape,
DataType dtype,
float min,
float max,
const Scalar &min,
const Scalar &max,
int seed,
int diag_num,
int diag_step,
......@@ -64,7 +64,8 @@ void UniformRandomRawKernel(const Context &dev_ctx,
} else {
engine = dev_ctx.GetGenerator()->GetCPUEngine();
}
UniformRealDistribution<T>(data, size, min, max, engine);
UniformRealDistribution<T>(
data, size, min.to<float>(), max.to<float>(), engine);
if (diag_num > 0) {
PADDLE_ENFORCE_GT(
size,
......@@ -88,8 +89,8 @@ template <typename T, typename Context>
void UniformRandomKernel(const Context &dev_ctx,
const IntArray &shape,
DataType dtype,
float min,
float max,
const Scalar &min,
const Scalar &max,
int seed,
DenseTensor *out) {
UniformRandomRawKernel<T>(
......
......@@ -57,8 +57,8 @@ template <typename T, typename Context>
void UniformRandomRawKernel(const Context& dev_ctx,
const IntArray& shape,
DataType dtype,
float min,
float max,
const Scalar& min,
const Scalar& max,
int seed,
int diag_num,
int diag_step,
......@@ -70,12 +70,12 @@ void UniformRandomRawKernel(const Context& dev_ctx,
// Use global Generator seed
using MT = typename kps::details::MPTypeTrait<T>::Type;
funcs::uniform_distribution<MT> dist;
funcs::uniform_real_transform<MT> trans(min, max);
funcs::uniform_real_transform<MT> trans(min.to<float>(), max.to<float>());
funcs::distribution_and_transform<T>(dev_ctx, out, dist, trans);
} else {
// Use OP seed
auto func =
UniformGenerator<T>(min, max, seed, diag_num, diag_step, diag_val);
auto func = UniformGenerator<T>(
min.to<float>(), max.to<float>(), seed, diag_num, diag_step, diag_val);
IndexKernel<T, UniformGenerator<T>>(dev_ctx, out, func);
}
}
......@@ -84,8 +84,8 @@ template <typename T, typename Context>
void UniformRandomKernel(const Context& dev_ctx,
const IntArray& shape,
DataType dtype,
float min,
float max,
const Scalar& min,
const Scalar& max,
int seed,
DenseTensor* out) {
UniformRandomRawKernel<T>(
......
......@@ -26,8 +26,8 @@ template <typename T, typename Context>
void UniformRandomRawKernel(const Context& dev_ctx,
const IntArray& shape,
DataType dtype,
float min,
float max,
const Scalar& min,
const Scalar& max,
int seed,
int diag_num,
int diag_step,
......@@ -49,8 +49,8 @@ template <typename T, typename Context>
void UniformRandomKernel(const Context& dev_ctx,
const IntArray& shape,
DataType dtype,
float min,
float max,
const Scalar& min,
const Scalar& max,
int seed,
SelectedRows* out) {
phi::UniformRandomKernel<T>(
......
......@@ -15,6 +15,7 @@
#pragma once
#include "paddle/phi/common/int_array.h"
#include "paddle/phi/common/scalar.h"
#include "paddle/phi/core/selected_rows.h"
namespace phi {
......@@ -24,8 +25,8 @@ template <typename T, typename Context>
void UniformRandomRawKernel(const Context& dev_ctx,
const IntArray& shape,
DataType dtype,
float min,
float max,
const Scalar& min,
const Scalar& max,
int seed,
int diag_num,
int diag_step,
......@@ -36,8 +37,8 @@ template <typename T, typename Context>
void UniformRandomKernel(const Context& dev_ctx,
const IntArray& shape,
DataType dtype,
float min,
float max,
const Scalar& min,
const Scalar& max,
int seed,
SelectedRows* out);
......
......@@ -15,6 +15,7 @@
#pragma once
#include "paddle/phi/common/int_array.h"
#include "paddle/phi/common/scalar.h"
#include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/core/device_context.h"
......@@ -24,8 +25,8 @@ template <typename T, typename Context>
void UniformRandomRawKernel(const Context& dev_ctx,
const IntArray& shape,
DataType dtype,
float min,
float max,
const Scalar& min,
const Scalar& max,
int seed,
int diag_num,
int diag_step,
......@@ -36,8 +37,8 @@ template <typename T, typename Context>
void UniformRandomKernel(const Context& dev_ctx,
const IntArray& shape,
DataType dtype,
float min,
float max,
const Scalar& min,
const Scalar& max,
int seed,
DenseTensor* out);
......
......@@ -15767,6 +15767,8 @@ def uniform_random(shape,
check_type(shape, 'shape', (list, tuple, Variable), 'uniform_random/rand')
check_dtype(dtype, 'dtype', ('float32', 'float64', 'uint16'),
'uniform_random/rand')
check_type(min, 'min', (float, int, Variable), 'uniform_random/rand')
check_type(max, 'max', (float, int, Variable), 'uniform_random/rand')
inputs = dict()
attrs = {'seed': seed, 'min': min, 'max': max, 'dtype': dtype}
......
......@@ -28,6 +28,8 @@ import paddle.fluid as fluid
from paddle.fluid import Program, program_guard
from paddle.fluid.framework import _test_eager_guard
from test_attribute_var import UnittestBase
def output_hist(out):
hist, _ = np.histogram(out, range=(-5, 10))
......@@ -648,5 +650,49 @@ class TestRandomValue(unittest.TestCase):
paddle.enable_static()
class TestUniformMinMaxTensor(UnittestBase):
def init_info(self):
self.shapes = [[2, 3, 4]]
self.save_path = os.path.join(self.temp_dir.name, self.path_prefix())
def test_static(self):
main_prog = Program()
starup_prog = Program()
with program_guard(main_prog, starup_prog):
fc = paddle.nn.Linear(4, 10)
x = paddle.randn([2, 3, 4])
x.stop_gradient = False
feat = fc(x) # [2,3,10]
min_v = paddle.to_tensor([0.1])
max_v = paddle.to_tensor([0.9])
y = paddle.uniform([2, 3, 10], min=min_v, max=max_v)
z = paddle.fluid.layers.uniform_random([2, 3, 10],
min=min_v,
max=max_v)
out = feat + y + z
sgd = paddle.optimizer.SGD()
sgd.minimize(paddle.mean(out))
self.assertTrue(self.var_prefix() in str(main_prog))
exe = paddle.static.Executor()
exe.run(starup_prog)
res = exe.run(fetch_list=[out])
np.testing.assert_array_equal(res[0].shape, [2, 3, 10])
paddle.static.save_inference_model(self.save_path, [x], [out], exe)
# Test for Inference Predictor
infer_out = self.infer_prog()
np.testing.assert_array_equal(res[0].shape, [2, 3, 10])
def path_prefix(self):
return 'uniform_random'
def var_prefix(self):
return "Var["
if __name__ == "__main__":
unittest.main()
......@@ -568,6 +568,8 @@ def uniform(shape, dtype=None, min=-1.0, max=1.0, seed=0, name=None):
check_type(shape, 'shape', (list, tuple, Variable), 'uniform/rand')
check_dtype(dtype, 'dtype', ('float32', 'float64'), 'uniform/rand')
check_type(min, 'min', (float, int, Variable), 'uniform/rand')
check_type(max, 'max', (float, int, Variable), 'uniform/rand')
inputs = dict()
attrs = {'seed': seed, 'min': min, 'max': max, 'dtype': dtype}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册