未验证 提交 fd1ecfc5 编写于 作者: Z zyfncg 提交者: GitHub

Add randperm and range yaml (#41265)

* add randperm and range yaml

* add eager test for randperm
上级 7315fb2d
......@@ -61,6 +61,6 @@ class RangeOpMaker : public framework::OpProtoAndCheckerMaker {
namespace ops = paddle::operators;
DECLARE_INFER_SHAPE_FUNCTOR(range, RangeInferMetaFunctor,
PD_INFER_META(phi::RangeInferMeta));
PD_INFER_META(phi::ArangeInferMeta));
REGISTER_OP_WITHOUT_GRADIENT(range, ops::RangeOp, ops::RangeOpMaker,
RangeInferMetaFunctor);
......@@ -58,6 +58,11 @@ void GaussianRandomInferMeta(const IntArray& shape,
out->set_layout(DataLayout::NCHW);
}
void RandpermInferMeta(int n, DataType dtype, MetaTensor* out) {
out->set_dims(phi::make_ddim({n}));
out->set_dtype(dtype);
}
void TruncatedGaussianRandomInferMeta(const std::vector<int>& shape,
float mean,
float std,
......
......@@ -53,6 +53,8 @@ void GaussianRandomInferMeta(const IntArray& shape,
DataType dtype,
MetaTensor* out);
void RandpermInferMeta(int n, DataType dtype, MetaTensor* out);
void TruncatedGaussianRandomInferMeta(const std::vector<int>& shape,
float mean,
float std,
......
......@@ -141,6 +141,56 @@ void AddmmInferMeta(const MetaTensor& input,
out->set_dtype(input.dtype());
}
void ArangeInferMeta(const MetaTensor& start,
const MetaTensor& end,
const MetaTensor& step,
MetaTensor* out) {
auto start_dims = start.dims();
auto end_dims = end.dims();
auto step_dims = step.dims();
PADDLE_ENFORCE_EQ(
start_dims.size(),
1,
phi::errors::InvalidArgument(
"The dim of the shape of Input(Start) should be 1, but got %d",
start_dims.size()));
PADDLE_ENFORCE_EQ(start_dims[0],
1,
phi::errors::InvalidArgument(
"The first dim of the shape of Input(Start) should "
"be 1, but got %d",
start_dims[0]));
PADDLE_ENFORCE_EQ(
end_dims.size(),
1,
phi::errors::InvalidArgument(
"The dim of the shape of Input(End) should be 1, but got %d",
end_dims.size()));
PADDLE_ENFORCE_EQ(
end_dims[0],
1,
phi::errors::InvalidArgument("The first dim of the shape of "
"Input(End) should be 1, but got %d",
end_dims[0]));
PADDLE_ENFORCE_EQ(
step_dims.size(),
1,
phi::errors::InvalidArgument(
"The dim of the shape of Input(Step) should be 1, but got %d",
step_dims.size()));
PADDLE_ENFORCE_EQ(step_dims[0],
1,
phi::errors::InvalidArgument(
"The first dim of the shape of Input(Step) should "
"be 1, but got %d",
step_dims[0]));
out->set_dims({-1});
out->set_dtype(start.dtype());
}
void GraphSendRecvInferMeta(const MetaTensor& x,
const MetaTensor& src_index,
const MetaTensor& dst_index,
......@@ -345,56 +395,6 @@ void PutAlongAxisInferMeta(const MetaTensor& x,
out->set_dtype(x.dtype());
}
void RangeInferMeta(const MetaTensor& start,
const MetaTensor& end,
const MetaTensor& step,
MetaTensor* out) {
auto start_dims = start.dims();
auto end_dims = end.dims();
auto step_dims = step.dims();
PADDLE_ENFORCE_EQ(
start_dims.size(),
1,
phi::errors::InvalidArgument(
"The dim of the shape of Input(Start) should be 1, but got %d",
start_dims.size()));
PADDLE_ENFORCE_EQ(start_dims[0],
1,
phi::errors::InvalidArgument(
"The first dim of the shape of Input(Start) should "
"be 1, but got %d",
start_dims[0]));
PADDLE_ENFORCE_EQ(
end_dims.size(),
1,
phi::errors::InvalidArgument(
"The dim of the shape of Input(End) should be 1, but got %d",
end_dims.size()));
PADDLE_ENFORCE_EQ(
end_dims[0],
1,
phi::errors::InvalidArgument("The first dim of the shape of "
"Input(End) should be 1, but got %d",
end_dims[0]));
PADDLE_ENFORCE_EQ(
step_dims.size(),
1,
phi::errors::InvalidArgument(
"The dim of the shape of Input(Step) should be 1, but got %d",
step_dims.size()));
PADDLE_ENFORCE_EQ(step_dims[0],
1,
phi::errors::InvalidArgument(
"The first dim of the shape of Input(Step) should "
"be 1, but got %d",
step_dims[0]));
out->set_dims({-1});
out->set_dtype(start.dtype());
}
void RoiAlignInferMeta(const MetaTensor& x,
const MetaTensor& boxes,
paddle::optional<const MetaTensor&> boxes_num,
......
......@@ -47,6 +47,11 @@ void AddmmInferMeta(const MetaTensor& input,
float beta,
MetaTensor* out);
void ArangeInferMeta(const MetaTensor& start,
const MetaTensor& end,
const MetaTensor& step,
MetaTensor* out);
void GraphSendRecvInferMeta(const MetaTensor& x,
const MetaTensor& src_index,
const MetaTensor& dst_index,
......@@ -81,11 +86,6 @@ void PutAlongAxisInferMeta(const MetaTensor& x,
const std::string& reduce,
MetaTensor* out);
void RangeInferMeta(const MetaTensor& start,
const MetaTensor& end,
const MetaTensor& step,
MetaTensor* out);
void RoiAlignInferMeta(const MetaTensor& x,
const MetaTensor& boxes,
paddle::optional<const MetaTensor&> boxes_num,
......
......@@ -19,7 +19,7 @@
namespace phi {
template <typename T, typename Context>
void RangeKernel(const Context& dev_ctx,
void ArangeKernel(const Context& dev_ctx,
const DenseTensor& start,
const DenseTensor& end,
const DenseTensor& step,
......
......@@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/phi/kernels/range_kernel.h"
#include "paddle/phi/kernels/arange_kernel.h"
#include "paddle/phi/backends/cpu/cpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/funcs/range_function.h"
......@@ -20,7 +20,7 @@ limitations under the License. */
namespace phi {
template <typename T, typename Context>
void RangeKernel(const Context& dev_ctx,
void ArangeKernel(const Context& dev_ctx,
const DenseTensor& start,
const DenseTensor& end,
const DenseTensor& step,
......@@ -42,4 +42,4 @@ void RangeKernel(const Context& dev_ctx,
} // namespace phi
PD_REGISTER_KERNEL(
range, CPU, ALL_LAYOUT, phi::RangeKernel, float, double, int, int64_t) {}
arange, CPU, ALL_LAYOUT, phi::ArangeKernel, float, double, int, int64_t) {}
......@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/range_kernel.h"
#include "paddle/phi/kernels/arange_kernel.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
......@@ -40,7 +40,7 @@ __global__ void Range(T start, T step, int64_t size, T* out) {
}
template <typename T, typename Context>
void RangeKernel(const Context& dev_ctx,
void ArangeKernel(const Context& dev_ctx,
const DenseTensor& start,
const DenseTensor& end,
const DenseTensor& step,
......@@ -63,7 +63,7 @@ void RangeKernel(const Context& dev_ctx,
} // namespace phi
PD_REGISTER_KERNEL(
range, GPU, ALL_LAYOUT, phi::RangeKernel, float, double, int64_t, int) {
arange, GPU, ALL_LAYOUT, phi::ArangeKernel, float, double, int64_t, int) {
kernel->InputAt(0).SetBackend(phi::Backend::CPU);
kernel->InputAt(1).SetBackend(phi::Backend::CPU);
kernel->InputAt(2).SetBackend(phi::Backend::CPU);
......
/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/phi/core/compat/op_utils.h"
PD_REGISTER_BASE_KERNEL_NAME(range, arange);
......@@ -21,7 +21,7 @@ import warnings
from ..layer_helper import LayerHelper
from ..param_attr import ParamAttr
from ..initializer import Initializer
from ..framework import convert_np_dtype_to_dtype_, _non_static_mode, _varbase_creator, device_guard, _in_legacy_dygraph, in_dygraph_mode
from ..framework import _current_expected_place, convert_np_dtype_to_dtype_, _non_static_mode, _varbase_creator, device_guard, _in_legacy_dygraph, in_dygraph_mode
from ..framework import Variable
from ..initializer import Constant
from ..core import VarDesc
......@@ -1433,6 +1433,10 @@ def range(start, end, step, dtype, name=None):
if not isinstance(dtype, core.VarDesc.VarType):
dtype = convert_np_dtype_to_dtype_(dtype)
if in_dygraph_mode():
return _C_ops.final_state_arange(start, end, step, dtype,
_current_expected_place())
if not isinstance(start, Variable):
with device_guard("cpu"):
start = fill_constant([1], dtype, start, force_cpu=True)
......@@ -1451,7 +1455,7 @@ def range(start, end, step, dtype, name=None):
elif step.dtype != dtype:
step = cast(step, dtype)
if _non_static_mode():
if _in_legacy_dygraph():
out = _C_ops.range(start, end, step)
out.stop_gradient = True
return out
......
......@@ -18,6 +18,7 @@ from op_test import OpTest
import paddle
import paddle.fluid.core as core
from paddle.static import program_guard, Program
from paddle.fluid.framework import _test_eager_guard
import os
......@@ -50,6 +51,7 @@ class TestRandpermOp(OpTest):
def setUp(self):
self.op_type = "randperm"
self.python_api = paddle.randperm
self.n = 200
self.dtype = "int64"
......@@ -72,6 +74,10 @@ class TestRandpermOp(OpTest):
self.assertTrue(
check_randperm_out(self.n, out_np), msg=error_msg(out_np))
def test_eager(self):
with _test_eager_guard():
self.test_check_output()
class TestRandpermOpN(TestRandpermOp):
def init_attrs(self):
......@@ -130,6 +136,19 @@ class TestRandpermImperative(unittest.TestCase):
paddle.enable_static()
class TestRandpermEager(unittest.TestCase):
def test_out(self):
paddle.disable_static()
n = 10
with _test_eager_guard():
for dtype in ['int32', np.int64, 'float32', 'float64']:
data_p = paddle.randperm(n, dtype)
data_np = data_p.numpy()
self.assertTrue(
check_randperm_out(n, data_np), msg=error_msg(data_np))
paddle.enable_static()
class TestRandomValue(unittest.TestCase):
def test_fixed_random_number(self):
# Test GPU Fixed random number, which is generated by 'curandStatePhilox4_32_10_t'
......
......@@ -14,9 +14,15 @@
from __future__ import print_function
import paddle
import unittest
import numpy as np
from op_test import OpTest
from functools import partial
def arange_wrapper(start, end, step, dtype=None):
return paddle.arange(start, end, step, dtype)
class TestRangeOp(OpTest):
......@@ -36,33 +42,38 @@ class TestRangeOp(OpTest):
def init_config(self):
self.dtype = np.float32
self.python_api = partial(arange_wrapper, dtype=self.dtype)
self.case = (0, 1, 0.2)
def test_check_output(self):
self.check_output()
self.check_output(check_eager=True)
class TestFloatRangeOpCase0(TestRangeOp):
def init_config(self):
self.dtype = np.float32
self.python_api = partial(arange_wrapper, dtype=self.dtype)
self.case = (0, 5, 1)
class TestInt32RangeOpCase0(TestRangeOp):
def init_config(self):
self.dtype = np.int32
self.python_api = partial(arange_wrapper, dtype=self.dtype)
self.case = (0, 5, 2)
class TestInt32RangeOpCase1(TestRangeOp):
def init_config(self):
self.dtype = np.int32
self.python_api = partial(arange_wrapper, dtype=self.dtype)
self.case = (10, 1, -2)
class TestInt32RangeOpCase2(TestRangeOp):
def init_config(self):
self.dtype = np.int32
self.python_api = partial(arange_wrapper, dtype=self.dtype)
self.case = (-1, -10, -2)
......
......@@ -22,7 +22,7 @@ from ..fluid.layers import utils
import paddle
from paddle import _C_ops
from paddle.static import Variable
from paddle.fluid.framework import in_dygraph_mode, _in_legacy_dygraph
from ..fluid.framework import _in_legacy_dygraph, in_dygraph_mode
__all__ = []
......@@ -919,7 +919,10 @@ def randperm(n, dtype="int64", name=None):
if not isinstance(dtype, core.VarDesc.VarType):
dtype = convert_np_dtype_to_dtype_(dtype)
if paddle.in_dynamic_mode():
if in_dygraph_mode():
return _C_ops.final_state_randperm(
n, dtype, paddle.fluid.framework._current_expected_place())
if _in_legacy_dygraph():
return _C_ops.randperm('n', n, 'seed', 0, 'dtype', dtype)
if n < 1:
......
......@@ -97,6 +97,20 @@
kernel :
func : any
- api : arange
args : (Tensor start, Tensor end, Tensor step, DataType dtype, Place place={})
output : Tensor
infer_meta :
func : ArangeInferMeta
param : [start, end, step]
kernel :
func : arange
param : [start, end, step]
data_type : dtype
backend : place
data_transform :
support_trans_dtype : start, end, step
# arg_max
- api : argmax
args : (Tensor x, int64_t axis, bool keepdims, bool flatten, int dtype)
......@@ -1227,6 +1241,18 @@
data_type : x
backward : put_along_axis_grad
- api : randperm
args : (int n, DataType dtype, Place place={})
output : Tensor
infer_meta :
func : RandpermInferMeta
param : [n, dtype]
kernel :
func : randperm
param : [n, dtype]
data_type : dtype
backend : place
- api : reciprocal
args : (Tensor x)
output : Tensor
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册