未验证 提交 9e9b02d3 编写于 作者: Y Yuang Liu 提交者: GitHub

[operator migration] Migrate unique consecutive infer shape and yaml (#44248)

上级 60bad464
...@@ -12,8 +12,11 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ...@@ -12,8 +12,11 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#include "paddle/fluid/framework/infershape_utils.h"
#include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/op_version_registry.h" #include "paddle/fluid/framework/op_version_registry.h"
#include "paddle/phi/core/infermeta_utils.h"
#include "paddle/phi/infermeta/unary.h"
namespace paddle { namespace paddle {
namespace operators { namespace operators {
...@@ -22,53 +25,6 @@ class UniqueConsecutiveOp : public framework::OperatorWithKernel { ...@@ -22,53 +25,6 @@ class UniqueConsecutiveOp : public framework::OperatorWithKernel {
public: public:
using framework::OperatorWithKernel::OperatorWithKernel; using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override {
OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "unique_consecutive");
OP_INOUT_CHECK(
ctx->HasOutput("Out"), "Output", "Out", "unique_consecutive");
auto in_dims = ctx->GetInputDim("X");
bool return_inverse = ctx->Attrs().Get<bool>("return_inverse");
bool return_counts = ctx->Attrs().Get<bool>("return_counts");
auto axis_vec = ctx->Attrs().Get<std::vector<int>>("axis");
if (return_inverse) {
OP_INOUT_CHECK(
ctx->HasOutput("Index"), "Output", "Index", "unique_consecutive");
}
if (return_counts) {
OP_INOUT_CHECK(
ctx->HasOutput("Counts"), "Output", "Counts", "unique_consecutive");
}
if (axis_vec.empty()) {
ctx->SetOutputDim("Out", {-1});
if (return_inverse) {
ctx->SetOutputDim("Index", {phi::product(in_dims)});
}
} else {
int axis = axis_vec[0];
if (axis < 0) {
axis += in_dims.size();
}
PADDLE_ENFORCE_LT(
axis,
in_dims.size(),
platform::errors::InvalidArgument("The axis(%d) should be less than "
"the dimension size(%d) of x.",
axis,
in_dims.size()));
auto out_dims = in_dims;
out_dims[axis] = -1;
ctx->SetOutputDim("Out", out_dims);
if (return_inverse) {
ctx->SetOutputDim("Index", {in_dims[axis]});
}
}
if (return_counts) {
ctx->SetOutputDim("Counts", {-1});
}
}
protected: protected:
framework::OpKernelType GetExpectedKernelType( framework::OpKernelType GetExpectedKernelType(
const framework::ExecutionContext& ctx) const override { const framework::ExecutionContext& ctx) const override {
...@@ -114,9 +70,13 @@ class UniqueConsecutiveOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -114,9 +70,13 @@ class UniqueConsecutiveOpMaker : public framework::OpProtoAndCheckerMaker {
} // namespace paddle } // namespace paddle
namespace ops = paddle::operators; namespace ops = paddle::operators;
DECLARE_INFER_SHAPE_FUNCTOR(unique_consecutive,
UniqueConsecutiveInferShapeFunctor,
PD_INFER_META(phi::UniqueConsecutiveInferMeta));
REGISTER_OP_WITHOUT_GRADIENT(unique_consecutive, REGISTER_OP_WITHOUT_GRADIENT(unique_consecutive,
ops::UniqueConsecutiveOp, ops::UniqueConsecutiveOp,
ops::UniqueConsecutiveOpMaker); ops::UniqueConsecutiveOpMaker,
UniqueConsecutiveInferShapeFunctor);
REGISTER_OP_VERSION(unique_consecutive) REGISTER_OP_VERSION(unique_consecutive)
.AddCheckpoint( .AddCheckpoint(
R"ROC( R"ROC(
......
...@@ -2205,6 +2205,15 @@ ...@@ -2205,6 +2205,15 @@
func : unique func : unique
data_type : x data_type : x
- api : unique_consecutive
args : (Tensor x, bool return_inverse, bool return_counts, int[] axis, int dtype)
output : Tensor(out), Tensor(index), Tensor(counts)
infer_meta :
func : UniqueConsecutiveInferMeta
kernel :
func : unique_consecutive
data_type : x
- api : unsqueeze - api : unsqueeze
args : (Tensor x, IntArray axis) args : (Tensor x, IntArray axis)
output : Tensor(out), Tensor(xshape) output : Tensor(out), Tensor(xshape)
......
...@@ -2999,6 +2999,66 @@ void UnfoldInferMeta(const MetaTensor& x, ...@@ -2999,6 +2999,66 @@ void UnfoldInferMeta(const MetaTensor& x,
out->set_dims(phi::make_ddim(out_dims)); out->set_dims(phi::make_ddim(out_dims));
} }
void UniqueConsecutiveInferMeta(const MetaTensor& x,
bool return_inverse,
bool return_counts,
const std::vector<int>& axis,
int dtype,
MetaTensor* out,
MetaTensor* index,
MetaTensor* counts) {
PADDLE_ENFORCE_NE(out,
nullptr,
phi::errors::InvalidArgument(
"unique_consecutive should have output tensor out."));
auto in_dims = x.dims();
if (return_inverse) {
PADDLE_ENFORCE_NE(
index,
nullptr,
phi::errors::InvalidArgument("Tensor index should not be null if "
"return_inverse is set to True."));
}
if (return_counts) {
PADDLE_ENFORCE_NE(
counts,
nullptr,
phi::errors::InvalidArgument("Tensor counts should not be null if "
"return_counts is set to True."));
}
if (axis.empty()) {
out->set_dims({-1});
out->set_dtype(x.dtype());
if (return_inverse) {
index->set_dims({phi::product(in_dims)});
}
} else {
int axis_value = axis[0];
if (axis_value < 0) {
axis_value += in_dims.size();
}
PADDLE_ENFORCE_LT(
axis_value,
in_dims.size(),
phi::errors::InvalidArgument("The axis(%d) should be less than "
"the dimension size(%d) of x.",
axis_value,
in_dims.size()));
auto out_dims = in_dims;
out_dims[axis_value] = -1;
out->set_dims(out_dims);
out->set_dtype(x.dtype());
if (return_inverse) {
index->set_dims({in_dims[axis_value]});
}
}
if (return_counts) {
counts->set_dims({-1});
}
}
void UniqueInferMeta(const MetaTensor& x, void UniqueInferMeta(const MetaTensor& x,
bool return_index, bool return_index,
bool return_inverse, bool return_inverse,
......
...@@ -420,6 +420,15 @@ void UnfoldInferMeta(const MetaTensor& x, ...@@ -420,6 +420,15 @@ void UnfoldInferMeta(const MetaTensor& x,
MetaTensor* out, MetaTensor* out,
MetaConfig config = MetaConfig()); MetaConfig config = MetaConfig());
void UniqueConsecutiveInferMeta(const MetaTensor& x,
bool return_inverse,
bool return_counts,
const std::vector<int>& axis,
int dtype,
MetaTensor* out,
MetaTensor* index,
MetaTensor* counts);
void UniqueInferMeta(const MetaTensor& x, void UniqueInferMeta(const MetaTensor& x,
bool return_index, bool return_index,
bool return_inverse, bool return_inverse,
......
...@@ -72,6 +72,7 @@ class TestUniqueConsecutiveOp(OpTest): ...@@ -72,6 +72,7 @@ class TestUniqueConsecutiveOp(OpTest):
self.x_range = 20 self.x_range = 20
self.return_inverse = False self.return_inverse = False
self.return_counts = False self.return_counts = False
self.python_api = paddle.unique_consecutive
def init_kernel_type(self): def init_kernel_type(self):
self.dtype = "float32" if core.is_compiled_with_rocm() else "float64" self.dtype = "float32" if core.is_compiled_with_rocm() else "float64"
...@@ -88,13 +89,14 @@ class TestUniqueConsecutiveOp(OpTest): ...@@ -88,13 +89,14 @@ class TestUniqueConsecutiveOp(OpTest):
self.inputs = { self.inputs = {
'X': x, 'X': x,
} }
self.python_out_sig = ["Out"]
self.attrs = {'dtype': int(core.VarDesc.VarType.INT32)} self.attrs = {'dtype': int(core.VarDesc.VarType.INT32)}
self.outputs = { self.outputs = {
'Out': out, 'Out': out,
} }
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_eager=True)
class TestUniqueConsecutiveOp2(TestUniqueConsecutiveOp): class TestUniqueConsecutiveOp2(TestUniqueConsecutiveOp):
...@@ -105,6 +107,7 @@ class TestUniqueConsecutiveOp2(TestUniqueConsecutiveOp): ...@@ -105,6 +107,7 @@ class TestUniqueConsecutiveOp2(TestUniqueConsecutiveOp):
self.x_range = 20 self.x_range = 20
self.return_inverse = True self.return_inverse = True
self.return_counts = False self.return_counts = False
self.python_api = paddle.unique_consecutive
def setUp(self): def setUp(self):
self.init_kernel_type() self.init_kernel_type()
...@@ -122,6 +125,7 @@ class TestUniqueConsecutiveOp2(TestUniqueConsecutiveOp): ...@@ -122,6 +125,7 @@ class TestUniqueConsecutiveOp2(TestUniqueConsecutiveOp):
'return_inverse': self.return_inverse, 'return_inverse': self.return_inverse,
'dtype': int(core.VarDesc.VarType.INT32) 'dtype': int(core.VarDesc.VarType.INT32)
} }
self.python_out_sig = ["Out"]
self.outputs = {'Out': result, 'Index': inverse} self.outputs = {'Out': result, 'Index': inverse}
...@@ -133,6 +137,7 @@ class TestUniqueConsecutiveOp3(TestUniqueConsecutiveOp): ...@@ -133,6 +137,7 @@ class TestUniqueConsecutiveOp3(TestUniqueConsecutiveOp):
self.x_range = 20 self.x_range = 20
self.return_inverse = False self.return_inverse = False
self.return_counts = True self.return_counts = True
self.python_api = paddle.unique_consecutive
def setUp(self): def setUp(self):
self.init_kernel_type() self.init_kernel_type()
...@@ -150,6 +155,7 @@ class TestUniqueConsecutiveOp3(TestUniqueConsecutiveOp): ...@@ -150,6 +155,7 @@ class TestUniqueConsecutiveOp3(TestUniqueConsecutiveOp):
'return_counts': self.return_counts, 'return_counts': self.return_counts,
'dtype': int(core.VarDesc.VarType.INT32) 'dtype': int(core.VarDesc.VarType.INT32)
} }
self.python_out_sig = ["Out"]
self.outputs = {'Out': result, 'Counts': counts} self.outputs = {'Out': result, 'Counts': counts}
...@@ -161,6 +167,7 @@ class TestUniqueConsecutiveOp4(TestUniqueConsecutiveOp): ...@@ -161,6 +167,7 @@ class TestUniqueConsecutiveOp4(TestUniqueConsecutiveOp):
self.x_range = 20 self.x_range = 20
self.return_inverse = True self.return_inverse = True
self.return_counts = True self.return_counts = True
self.python_api = paddle.unique_consecutive
def setUp(self): def setUp(self):
self.init_kernel_type() self.init_kernel_type()
...@@ -180,6 +187,7 @@ class TestUniqueConsecutiveOp4(TestUniqueConsecutiveOp): ...@@ -180,6 +187,7 @@ class TestUniqueConsecutiveOp4(TestUniqueConsecutiveOp):
'return_counts': self.return_counts, 'return_counts': self.return_counts,
'dtype': int(core.VarDesc.VarType.INT32) 'dtype': int(core.VarDesc.VarType.INT32)
} }
self.python_out_sig = ["Out"]
self.outputs = {'Out': result, 'Index': inverse, 'Counts': counts} self.outputs = {'Out': result, 'Index': inverse, 'Counts': counts}
......
...@@ -2066,7 +2066,18 @@ def unique_consecutive(x, ...@@ -2066,7 +2066,18 @@ def unique_consecutive(x,
else: else:
axis = [axis] axis = [axis]
attr_dtype = convert_np_dtype_to_dtype_(dtype) attr_dtype = convert_np_dtype_to_dtype_(dtype)
if paddle.in_dynamic_mode(): if in_dygraph_mode():
out, inverse, counts = _C_ops.final_state_unique_consecutive(
x, return_inverse, return_counts, axis, attr_dtype)
outs = [out]
if return_inverse:
outs.append(inverse)
if return_counts:
outs.append(counts)
if len(outs) == 1:
return outs[0]
return tuple(outs)
elif paddle.in_dynamic_mode():
out, inverse, counts = _C_ops.unique_consecutive( out, inverse, counts = _C_ops.unique_consecutive(
x, 'dtype', attr_dtype, 'return_inverse', return_inverse, x, 'dtype', attr_dtype, 'return_inverse', return_inverse,
'return_counts', return_counts, 'axis', axis) 'return_counts', return_counts, 'axis', axis)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册