未验证 提交 6198ff24 编写于 作者: W wanghuancoder 提交者: GitHub

add reverse yaml (#44518)

* add reverse yaml
上级 e0dd7f32
...@@ -1802,6 +1802,24 @@ ...@@ -1802,6 +1802,24 @@
intermediate : xshape intermediate : xshape
backward: reshape_grad backward: reshape_grad
- api : reverse
args : (Tensor x, int[] axis)
output : Tensor
infer_meta :
func : ReverseInferMeta
kernel :
func : reverse
backward : reverse_grad
- api : reverse_array
args : (Tensor[] x, int[] axis)
output : Tensor[]{x.size()}
infer_meta :
func : ReverseArrayInferMeta
kernel :
func : reverse_array
backward : reverse_array_grad
- api : roi_align - api : roi_align
args : (Tensor x, Tensor boxes, Tensor boxes_num, int pooled_height, int pooled_width, float spatial_scale, int sampling_ratio, bool aligned) args : (Tensor x, Tensor boxes, Tensor boxes_num, int pooled_height, int pooled_width, float spatial_scale, int sampling_ratio, bool aligned)
output : Tensor output : Tensor
......
...@@ -1708,6 +1708,23 @@ ...@@ -1708,6 +1708,23 @@
backward : reshape_double_grad backward : reshape_double_grad
inplace : (out_grad -> x_grad) inplace : (out_grad -> x_grad)
- backward_api : reverse_array_grad
forward : reverse_array (Tensor[] x, int[] axis) -> Tensor[](out)
args : (Tensor[] out_grad, int[] axis)
output : Tensor[](x_grad){out_grad.size()}
infer_meta :
func : ReverseArrayInferMeta
kernel :
func : reverse
- backward_api : reverse_grad
forward : reverse (Tensor x, int[] axis) -> Tensor(out)
args : (Tensor out_grad, int[] axis)
output : Tensor(x_grad)
infer_meta :
func : ReverseInferMeta
invoke : reverse(out_grad, axis)
- backward_api : roi_align_grad - backward_api : roi_align_grad
forward : roi_align (Tensor x, Tensor boxes, Tensor boxes_num, int pooled_height, int pooled_width, float spatial_scale, int sampling_ratio, bool aligned) -> Tensor(out) forward : roi_align (Tensor x, Tensor boxes, Tensor boxes_num, int pooled_height, int pooled_width, float spatial_scale, int sampling_ratio, bool aligned) -> Tensor(out)
args : (Tensor x, Tensor boxes, Tensor boxes_num, Tensor out_grad, int pooled_height, int pooled_width, float spatial_scale, int sampling_ratio, bool aligned) args : (Tensor x, Tensor boxes, Tensor boxes_num, Tensor out_grad, int pooled_height, int pooled_width, float spatial_scale, int sampling_ratio, bool aligned)
......
...@@ -2165,6 +2165,25 @@ void ReverseInferMeta(const MetaTensor& x, ...@@ -2165,6 +2165,25 @@ void ReverseInferMeta(const MetaTensor& x,
out->share_meta(x); out->share_meta(x);
} }
void ReverseArrayInferMeta(const std::vector<const phi::MetaTensor*>& x,
const std::vector<int>& axis,
std::vector<phi::MetaTensor*> out) {
PADDLE_ENFORCE_EQ(
axis.size(),
1,
phi::errors::InvalidArgument(
"The size of axis must be 1 when the Input(X) is LoDTensorArray, "
"but received %d.",
axis.size()));
PADDLE_ENFORCE_EQ(
axis[0],
0,
phi::errors::InvalidArgument("The value of axis should be 1 when "
"the Input(X) is LoDTensorArray, "
"but received %d.",
axis[0]));
}
void RollInferMeta(const MetaTensor& x, void RollInferMeta(const MetaTensor& x,
const IntArray& shifts, const IntArray& shifts,
const std::vector<int64_t>& axis, const std::vector<int64_t>& axis,
......
...@@ -298,6 +298,10 @@ void ReverseInferMeta(const MetaTensor& x, ...@@ -298,6 +298,10 @@ void ReverseInferMeta(const MetaTensor& x,
const std::vector<int>& axis, const std::vector<int>& axis,
MetaTensor* out); MetaTensor* out);
void ReverseArrayInferMeta(const std::vector<const phi::MetaTensor*>& x,
const std::vector<int>& axis,
std::vector<phi::MetaTensor*> out);
void RollInferMeta(const MetaTensor& x, void RollInferMeta(const MetaTensor& x,
const IntArray& shifts, const IntArray& shifts,
const std::vector<int64_t>& axis, const std::vector<int64_t>& axis,
......
...@@ -1271,6 +1271,11 @@ def reverse(x, axis): ...@@ -1271,6 +1271,11 @@ def reverse(x, axis):
check_type(axis, 'axis', (int, tuple, list), 'reverse') check_type(axis, 'axis', (int, tuple, list), 'reverse')
if isinstance(axis, int): if isinstance(axis, int):
axis = [axis] axis = [axis]
if in_dygraph_mode():
if x.type == core.VarDesc.VarType.LOD_TENSOR_ARRAY:
return _C_ops.final_state_reverse_array(x, axis)
else:
return _C_ops.final_state_reverse(x, axis)
helper = LayerHelper("reverse", **locals()) helper = LayerHelper("reverse", **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype) out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(type='reverse', helper.append_op(type='reverse',
......
...@@ -31,6 +31,7 @@ class TestReverseOp(OpTest): ...@@ -31,6 +31,7 @@ class TestReverseOp(OpTest):
def setUp(self): def setUp(self):
self.initTestCase() self.initTestCase()
self.op_type = "reverse" self.op_type = "reverse"
self.python_api = fluid.layers.reverse
self.inputs = {"X": self.x} self.inputs = {"X": self.x}
self.attrs = {'axis': self.axis} self.attrs = {'axis': self.axis}
out = self.x out = self.x
...@@ -39,10 +40,10 @@ class TestReverseOp(OpTest): ...@@ -39,10 +40,10 @@ class TestReverseOp(OpTest):
self.outputs = {'Out': out} self.outputs = {'Out': out}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_eager=True)
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Out') self.check_grad(['X'], 'Out', check_eager=True)
class TestCase0(TestReverseOp): class TestCase0(TestReverseOp):
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册