未验证 提交 d947b20a 编写于 作者: engineer1109's avatar engineer1109 提交者: GitHub

add argmax to ops (#52562)

上级 5c19bfc8
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/framework/infershape_utils.h"
#include "paddle/fluid/framework/op_version_registry.h"
#include "paddle/fluid/operators/arg_min_max_op_base.h"
#include "paddle/phi/core/infermeta_utils.h"
#include "paddle/phi/infermeta/unary.h"
DECLARE_INFER_SHAPE_FUNCTOR(arg_max,
ArgMaxInferShapeFunctor,
PD_INFER_META(phi::ArgMinMaxInferMeta));
REGISTER_OPERATOR(
arg_max,
paddle::operators::ArgMinMaxOp,
paddle::operators::ArgMaxOpMaker,
paddle::framework::EmptyGradOpMaker<paddle::framework::OpDesc>,
paddle::framework::EmptyGradOpMaker<paddle::imperative::OpBase>,
ArgMaxInferShapeFunctor);
REGISTER_OP_VERSION(arg_max).AddCheckpoint(
R"ROC(
Upgrade argmax add a new attribute [flatten] and modify the attribute of dtype)ROC",
paddle::framework::compatible::OpVersionDesc()
.NewAttr("flatten",
"In order to compute the argmax over the flattened array "
"when the "
"argument `axis` in python API is None.",
false)
.ModifyAttr("dtype",
"Change the default value of dtype from -1 to 3"
", means return the int64 indices directly. The rearse why "
"changing the default value is that the int64 value in "
"VarType is 3 in the frameworke.proto.",
3));
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the Licnse. */
#include "paddle/fluid/operators/arg_min_max_op_base.h"
namespace paddle {
namespace operators {
using NPUDeviceContext = platform::NPUDeviceContext;
template <typename T>
struct VisitDataArgNPUMaxFunctor {
const framework::ExecutionContext& ctx;
explicit VisitDataArgNPUMaxFunctor(const framework::ExecutionContext& ctx)
: ctx(ctx) {}
template <typename Tout>
void apply() const {
auto& x = *(ctx.Input<phi::DenseTensor>("X"));
auto& out = *(ctx.Output<phi::DenseTensor>("Out"));
out.template mutable_data<Tout>(ctx.GetPlace());
auto axis = ctx.Attr<int64_t>("axis");
auto dtype = ctx.Attr<int>("dtype");
const bool& flatten = ctx.Attr<bool>("flatten");
phi::DenseTensor transformed_x(x.type());
transformed_x.ShareDataWith(x);
if (flatten) {
transformed_x.Resize(phi::make_ddim({x.numel()}));
}
auto stream = ctx.template device_context<NPUDeviceContext>().stream();
NpuOpRunner runner;
runner.SetType("ArgMaxV2")
.AddInput(transformed_x)
.AddInput(std::vector<int64_t>{axis})
.AddOutput(out)
.AddAttrDataType("dtype", dtype)
.Run(stream);
}
};
template <typename T>
class ArgMaxNPUKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto& dtype = ctx.Attr<int>("dtype");
if (dtype < 0) {
framework::VisitDataTypeTiny(static_cast<framework::proto::VarType::Type>(
framework::proto::VarType::INT64),
VisitDataArgNPUMaxFunctor<T>(ctx));
return;
}
framework::VisitDataTypeTiny(
static_cast<framework::proto::VarType::Type>(dtype),
VisitDataArgNPUMaxFunctor<T>(ctx));
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_NPU_KERNEL(arg_max,
ops::ArgMaxNPUKernel<float>,
ops::ArgMaxNPUKernel<paddle::platform::float16>);
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <string>
#include <type_traits>
#include <vector>
#include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/framework/lod_tensor.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/operator.h"
#include "paddle/fluid/platform/enforce.h"
#include "paddle/fluid/string/printf.h"
#include "paddle/phi/core/ddim.h"
namespace paddle {
namespace operators {
class ArgMinMaxOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
phi::KernelKey GetExpectedKernelType(
const framework::ExecutionContext& ctx) const override {
auto input_data_type =
framework::OperatorWithKernel::IndicateVarDataType(ctx, "X");
return phi::KernelKey(input_data_type, ctx.GetPlace());
}
};
class BaseArgMinMaxOpMaker : public framework::OpProtoAndCheckerMaker {
protected:
virtual const char* OpName() const = 0;
virtual const char* Name() const = 0;
public:
void Make() override {
AddInput("X", "Input tensor.");
AddOutput("Out", "Output tensor.");
AddAttr<int64_t>("axis", "The axis in which to compute the arg indics.")
.SupportTensor();
AddAttr<bool>("keepdims", "Keep the dim that to reduce.").SetDefault(false);
AddAttr<bool>("flatten",
"Flatten the input value, and search the min or max indices")
.SetDefault(false);
AddAttr<int>("dtype",
"(int, 3), the dtype of indices, the indices dtype must be "
"int32, int64."
"default dtype is int64, and proto value is 3.")
.SetDefault(3);
AddComment(string::Sprintf(R"DOC(
%s Operator.
Computes the indices of the %s elements of the input tensor's element
along the provided axis.
)DOC",
OpName(),
Name()));
}
};
class ArgMinOpMaker : public BaseArgMinMaxOpMaker {
protected:
const char* OpName() const override { return "ArgMin"; }
const char* Name() const override { return "min"; }
};
class ArgMaxOpMaker : public BaseArgMinMaxOpMaker {
protected:
const char* OpName() const override { return "ArgMax"; }
const char* Name() const override { return "max"; }
};
} // namespace operators
} // namespace paddle
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/framework/infershape_utils.h"
#include "paddle/fluid/framework/op_version_registry.h"
#include "paddle/fluid/operators/arg_min_max_op_base.h"
#include "paddle/phi/core/infermeta_utils.h"
#include "paddle/phi/infermeta/unary.h"
DECLARE_INFER_SHAPE_FUNCTOR(arg_min,
ArgMinInferShapeFunctor,
PD_INFER_META(phi::ArgMinMaxInferMeta));
REGISTER_OPERATOR(
arg_min,
paddle::operators::ArgMinMaxOp,
paddle::operators::ArgMinOpMaker,
paddle::framework::EmptyGradOpMaker<paddle::framework::OpDesc>,
paddle::framework::EmptyGradOpMaker<paddle::imperative::OpBase>,
ArgMinInferShapeFunctor);
REGISTER_OP_VERSION(arg_min).AddCheckpoint(
R"ROC(
Upgrade argmin add a new attribute [flatten] and modify the attribute of dtype)ROC",
paddle::framework::compatible::OpVersionDesc()
.NewAttr("flatten",
"In order to compute the argmin over the flattened array "
"when the "
"argument `axis` in python API is None.",
false)
.ModifyAttr("dtype",
"Change the default value of dtype from -1 to 3"
", means return the int64 indices directly. The rearse why "
"changing the default value is that the int64 value in "
"VarType is 3 in the frameworke.proto.",
3));
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/arg_min_max_op_base.h"
namespace paddle {
namespace operators {
template <typename DeviceContext, typename T>
class ArgMinNPUKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto* x = ctx.Input<phi::DenseTensor>("X");
int64_t axis = ctx.Attr<int64_t>("axis");
auto dtype = ctx.Attr<int>("dtype");
auto* out = ctx.Output<phi::DenseTensor>("Out");
out->mutable_data<int32_t>(ctx.GetPlace());
NpuOpRunner runner;
runner.SetType("ArgMin")
.AddInput(*x)
.AddInput(std::vector<int64_t>{axis})
.AddOutput(*out)
.AddAttr("dtype", dtype);
auto stream =
ctx.template device_context<paddle::platform::NPUDeviceContext>()
.stream();
runner.Run(stream);
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_NPU_KERNEL(
arg_min,
ops::ArgMinNPUKernel<paddle::platform::NPUDeviceContext, float>,
ops::ArgMinNPUKernel<paddle::platform::NPUDeviceContext,
paddle::platform::float16>);
......@@ -143,22 +143,6 @@
data_transform :
support_trans_dtype : start, end, step
- op : argmax
args : (Tensor x, Scalar axis, bool keepdims, bool flatten, int dtype)
output : Tensor(out)
infer_meta :
func : ArgMinMaxInferMeta
kernel :
func : argmax
- op : argmin
args : (Tensor x, Scalar axis, bool keepdims, bool flatten, int dtype)
output : Tensor(out)
infer_meta :
func : ArgMinMaxInferMeta
kernel :
func : argmin
- op : assign
args : (Tensor x)
output : Tensor
......
......@@ -109,6 +109,26 @@
outputs :
out : Out
- op : argmax(arg_max)
inputs :
x : X
outputs :
out : Out
scalar:
axis:
data_type : int64_t
support_tensor : true
- op : argmin(arg_min)
inputs :
x : X
outputs :
out : Out
scalar:
axis:
data_type : int64_t
support_tensor : true
- op : argsort
inputs :
x : X
......
......@@ -61,6 +61,24 @@
func : angle
backward : angle_grad
- op : argmax
args : (Tensor x, Scalar(int64_t) axis, bool keepdims = false, bool flatten = false, int dtype = 3)
output : Tensor(out)
infer_meta :
func : ArgMinMaxInferMeta
kernel :
func : argmax
data_type : x
- op : argmin
args : (Tensor x, Scalar(int64_t) axis, bool keepdims = false, bool flatten = false, int dtype = 3)
output : Tensor(out)
infer_meta :
func : ArgMinMaxInferMeta
kernel :
func : argmin
data_type : x
- op : argsort
args : (Tensor x, int axis=-1, bool descending=false)
output : Tensor(out), Tensor(indices)
......
/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/phi/core/compat/op_utils.h"
PD_REGISTER_BASE_KERNEL_NAME(arg_max, argmax);
PD_REGISTER_BASE_KERNEL_NAME(arg_min, argmin);
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册