diff --git a/paddle/fluid/operators/generator/get_expected_kernel_func.cc b/paddle/fluid/operators/generator/get_expected_kernel_func.cc index a4b0e637e12dc76f8edb6f621b5d50f36965458e..ce2cbb43deed0e9b4f3fe1b56be9693edf019a0a 100644 --- a/paddle/fluid/operators/generator/get_expected_kernel_func.cc +++ b/paddle/fluid/operators/generator/get_expected_kernel_func.cc @@ -158,5 +158,23 @@ phi::KernelKey GetMatrixNmsExpectedKernelType( platform::CPUPlace()); } +phi::KernelKey GetUniqueExpectedKernelType( + const framework::ExecutionContext& ctx, + const framework::OperatorWithKernel* op_ptr) { + (void)ctx; + // Return CPUPlace when Attr("is_sorted") is false. Because it means + // that fluid.layers.unique is called, but there is no cuda kernel. + if (!ctx.Attr("is_sorted")) { + return phi::KernelKey( + op_ptr->OperatorWithKernel::IndicateVarDataType(ctx, "X"), + platform::CPUPlace()); + } else { + // new version paddle.unique is called. + return phi::KernelKey( + op_ptr->OperatorWithKernel::IndicateVarDataType(ctx, "X"), + ctx.GetPlace()); + } +} + } // namespace operators } // namespace paddle diff --git a/paddle/fluid/operators/generator/get_expected_kernel_func.h b/paddle/fluid/operators/generator/get_expected_kernel_func.h index a83f5865e349956493f4fb121068688442bc5c3d..cbbb74e2312ed3d916bf276d62bc31e635743694 100644 --- a/paddle/fluid/operators/generator/get_expected_kernel_func.h +++ b/paddle/fluid/operators/generator/get_expected_kernel_func.h @@ -44,5 +44,9 @@ phi::KernelKey GetMatrixNmsExpectedKernelType( const framework::ExecutionContext& ctx, const framework::OperatorWithKernel* op_ptr); +phi::KernelKey GetUniqueExpectedKernelType( + const framework::ExecutionContext& ctx, + const framework::OperatorWithKernel* op_ptr); + } // namespace operators } // namespace paddle diff --git a/paddle/fluid/operators/unique_op.cc b/paddle/fluid/operators/unique_op.cc deleted file mode 100644 index 5484a16ca6bd4d1111b82254e3f588f809917d0e..0000000000000000000000000000000000000000 --- a/paddle/fluid/operators/unique_op.cc +++ /dev/null @@ -1,168 +0,0 @@ -/* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#include "paddle/fluid/operators/unique_op.h" - -#include - -#include "paddle/fluid/framework/infershape_utils.h" -#include "paddle/fluid/framework/op_registry.h" -#include "paddle/phi/core/infermeta_utils.h" -#include "paddle/phi/infermeta/unary.h" - -namespace paddle { -namespace operators { - -class UniqueOp : public framework::OperatorWithKernel { - public: - using framework::OperatorWithKernel::OperatorWithKernel; - - void InferShape(framework::InferShapeContext* ctx) const override { - OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "unique"); - OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "unique"); - - bool return_index = ctx->Attrs().Get("return_index"); - bool return_inverse = ctx->Attrs().Get("return_inverse"); - bool return_counts = ctx->Attrs().Get("return_counts"); - auto axis_vec = ctx->Attrs().Get>("axis"); - auto data_type = - static_cast(static_cast( - ctx->Attrs().Get("dtype"))); - - // Construct MetaTensor for InferMeta Func - using CompatMetaTensor = framework::CompatMetaTensor; - CompatMetaTensor x(ctx->GetInputVarPtrs("X")[0], ctx->IsRuntime()); - CompatMetaTensor out(ctx->GetOutputVarPtrs("Out")[0], ctx->IsRuntime()); - std::unique_ptr indices(nullptr); - std::unique_ptr index(nullptr); - std::unique_ptr counts(nullptr); - - if (return_index) { - OP_INOUT_CHECK(ctx->HasOutput("Indices"), "Output", "Indices", "unique"); - indices = - std::move(std::unique_ptr(new CompatMetaTensor( - ctx->GetOutputVarPtrs("Indices")[0], ctx->IsRuntime()))); - } - if (return_inverse) { - OP_INOUT_CHECK(ctx->HasOutput("Index"), "Output", "Index", "unique"); - index = std::move(std::unique_ptr(new CompatMetaTensor( - ctx->GetOutputVarPtrs("Index")[0], ctx->IsRuntime()))); - } - if (return_counts) { - OP_INOUT_CHECK(ctx->HasOutput("Counts"), "Output", "Counts", "unique"); - counts = std::move(std::unique_ptr(new CompatMetaTensor( - ctx->GetOutputVarPtrs("Counts")[0], ctx->IsRuntime()))); - } - bool is_sorted = ctx->Attrs().Get("is_sorted"); - if (is_sorted) { - phi::UniqueInferMeta(x, - return_index, - return_inverse, - return_counts, - axis_vec, - data_type, - &out, - indices.get(), - index.get(), - counts.get()); - } else { - OP_INOUT_CHECK(ctx->HasOutput("Index"), "Output", "Index", "unique"); - if (index == nullptr) { - index = - std::move(std::unique_ptr(new CompatMetaTensor( - ctx->GetOutputVarPtrs("Index")[0], ctx->IsRuntime()))); - } - phi::UniqueRawInferMeta(x, - return_index, - return_inverse, - return_counts, - axis_vec, - data_type, - is_sorted, - &out, - indices.get(), - index.get(), - counts.get()); - } - } - - protected: - phi::KernelKey GetExpectedKernelType( - const framework::ExecutionContext& ctx) const override { - // Return CPUPlace when Attr("is_sorted") is false. Because it means - // that fluid.layers.unique is called, but there is no cuda kernel. - if (!ctx.Attr("is_sorted")) { - return phi::KernelKey(OperatorWithKernel::IndicateVarDataType(ctx, "X"), - platform::CPUPlace()); - } else { - // new version paddle.unique is called. - return phi::KernelKey(OperatorWithKernel::IndicateVarDataType(ctx, "X"), - ctx.GetPlace()); - } - } -}; - -class UniqueOpMaker : public framework::OpProtoAndCheckerMaker { - public: - void Make() override { - AddInput("X", - "Input tensor. It should be a 1-D tensor when Attr(is_sorted)" - " is false or a N-D tensor when Attr(is_sorted) is true."); - AddAttr("dtype", "data type for output index"); - AddOutput("Out", "A unique subsequence for input tensor."); - AddOutput("Index", - "Equivalent to inverse in numpy.unique, " - "the indices for where elements in the original input ended up " - "in the returned unique tensor."); - AddOutput( - "Indices", - "The indices of the input tensor that result in the unique tensor.") - .AsDispensable(); - AddOutput("Counts", "The counts for each unique element.").AsDispensable(); - AddAttr("return_index", - "If True, also return the indices of the input" - " tensor that result in the unique Tensor.") - .SetDefault(false); - AddAttr( - "return_inverse", - "If True, also return the indices for where elements" - " in the original input ended up in the returned unique tensor.") - .SetDefault(false); - AddAttr("return_counts", - "If True, also return the counts for each unique element.") - .SetDefault(false); - AddAttr>( - "axis", - "The axis to apply unique. If None, the input will be flattened.") - .SetDefault({}); - AddAttr("is_sorted", - "If True, the unique elements of X are in ascending order." - "Otherwise, the unique elements are not sorted.") - .SetDefault(false); - AddComment(R"DOC( - 1. Return a unique subsequence for 1-D input tensor, and an index tensor - pointing to this unique subsequence when Attr(is_sorted) is false. This - means paddle.unique is called. - - 2. Returns the unique elements of X in ascending order when Attr(is_sorted) - is true. This means fluid.layers.unique is called. -)DOC"); - } -}; -} // namespace operators -} // namespace paddle - -namespace ops = paddle::operators; - -REGISTER_OP_WITHOUT_GRADIENT(unique, ops::UniqueOp, ops::UniqueOpMaker); diff --git a/paddle/phi/api/yaml/op_compat.yaml b/paddle/phi/api/yaml/op_compat.yaml index 90c75a8dcc6cd73b0a871daa33fa7357c61ccebe..e53909aa3fdee9f556365bd4472300f237bc91ee 100644 --- a/paddle/phi/api/yaml/op_compat.yaml +++ b/paddle/phi/api/yaml/op_compat.yaml @@ -2241,6 +2241,15 @@ support_tensor : true manual_signature : [uniform] +- op : unique + inputs : + {x : X} + outputs : + {out : Out, indices : Indices, inverse : Index, counts : Counts} + get_expected_kernel_type : + unique : GetUniqueExpectedKernelType + manual_signature : [unique] + - op : unique_consecutive inputs : x : X diff --git a/paddle/phi/api/yaml/static_ops.yaml b/paddle/phi/api/yaml/static_ops.yaml index f0f26e27c1f2c4017df8a7f78cfc26b1bf0b99b1..802c6b1d46df5415d01942d17efd89dd92c46bec 100644 --- a/paddle/phi/api/yaml/static_ops.yaml +++ b/paddle/phi/api/yaml/static_ops.yaml @@ -342,3 +342,13 @@ func : uniform param: [shape, dtype, min, max, seed] data_type : dtype + +- op : unique + args : (Tensor x, bool return_index=false, bool return_inverse=false, bool return_counts=false, int[] axis={}, DataType dtype=DataType::INT64, bool is_sorted=false) + output : Tensor(out), Tensor(indices), Tensor(inverse), Tensor(counts) + optional : indices, counts + infer_meta : + func : UniqueRawInferMeta + kernel : + func : unique + data_type : x diff --git a/paddle/phi/ops/compat/unique_sig.cc b/paddle/phi/ops/compat/unique_sig.cc index 2a7ba543012f3edfa3dba5df02680d52def338b8..8a38775bc608027cce0f5119c5ed20738a2a3e8c 100644 --- a/paddle/phi/ops/compat/unique_sig.cc +++ b/paddle/phi/ops/compat/unique_sig.cc @@ -17,6 +17,17 @@ limitations under the License. */ namespace phi { KernelSignature UniqueOpArgumentMapping(const ArgumentMappingContext& ctx) { + if (ctx.IsForInferShape()) { + return KernelSignature("unique_raw", + {"X"}, + {"return_index", + "return_inverse", + "return_counts", + "axis", + "dtype", + "is_sorted"}, + {"Out", "Indices", "Index", "Counts"}); + } bool is_sorted = paddle::any_cast(ctx.Attr("is_sorted")); if (is_sorted) { return KernelSignature(