未验证 提交 3d35aa80 编写于 作者: Z zyfncg 提交者: GitHub

Rename kernel for top_k, slogdeterminant, generate_proposals_v2 (#48594)

* rename kernel for top_k, slogdeterminant, generate_proposals_v2

* fix bug
上级 3cbca60f
...@@ -1504,7 +1504,7 @@ ...@@ -1504,7 +1504,7 @@
func : UnchangedInferMeta func : UnchangedInferMeta
param : [x] param : [x]
kernel : kernel :
func : slogdeterminant_grad func : slogdet_grad
- backward_op : softmax_grad - backward_op : softmax_grad
forward : softmax (Tensor x, int axis) -> Tensor(out) forward : softmax (Tensor x, int axis) -> Tensor(out)
...@@ -1713,7 +1713,7 @@ ...@@ -1713,7 +1713,7 @@
func : UnchangedInferMeta func : UnchangedInferMeta
param : [x] param : [x]
kernel : kernel :
func : top_k_grad func : topk_grad
- backward_op : transpose_double_grad - backward_op : transpose_double_grad
forward : transpose_grad (Tensor grad_out, int[] perm) -> Tensor(grad_x) forward : transpose_grad (Tensor grad_out, int[] perm) -> Tensor(grad_x)
......
...@@ -878,7 +878,7 @@ ...@@ -878,7 +878,7 @@
infer_meta : infer_meta :
func : GenerateProposalsV2InferMeta func : GenerateProposalsV2InferMeta
kernel : kernel :
func : generate_proposals_v2 func : generate_proposals
- op : greater_equal - op : greater_equal
args : (Tensor x, Tensor y) args : (Tensor x, Tensor y)
...@@ -1935,7 +1935,7 @@ ...@@ -1935,7 +1935,7 @@
infer_meta : infer_meta :
func : UnchangedInferMeta func : UnchangedInferMeta
kernel : kernel :
func : slogdeterminant func : slogdet
backward : slogdet_grad backward : slogdet_grad
- op : softmax - op : softmax
...@@ -2100,7 +2100,7 @@ ...@@ -2100,7 +2100,7 @@
infer_meta : infer_meta :
func : TopKInferMeta func : TopKInferMeta
kernel : kernel :
func : top_k func : topk
backward : topk_grad backward : topk_grad
- op : transpose - op : transpose
......
...@@ -83,7 +83,8 @@ static const std::unordered_set<std::string> deprecated_op_names( ...@@ -83,7 +83,8 @@ static const std::unordered_set<std::string> deprecated_op_names(
"bicubic_interp", "bicubic_interp",
"bicubic_interp_grad", "bicubic_interp_grad",
"crop", "crop",
"crop_grad"}); "crop_grad",
"generate_proposals"});
class DefaultKernelSignatureMap { class DefaultKernelSignatureMap {
public: public:
......
...@@ -12,7 +12,7 @@ ...@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
#include "paddle/phi/kernels/generate_proposals_v2_kernel.h" #include "paddle/phi/kernels/generate_proposals_kernel.h"
#include "paddle/phi/backends/cpu/cpu_context.h" #include "paddle/phi/backends/cpu/cpu_context.h"
#include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/funcs/detection/nms_util.h" #include "paddle/phi/kernels/funcs/detection/nms_util.h"
...@@ -284,7 +284,7 @@ std::pair<DenseTensor, DenseTensor> ProposalForOneImage( ...@@ -284,7 +284,7 @@ std::pair<DenseTensor, DenseTensor> ProposalForOneImage(
} }
template <typename T, typename Context> template <typename T, typename Context>
void GenerateProposalsV2Kernel(const Context& ctx, void GenerateProposalsKernel(const Context& ctx,
const DenseTensor& scores, const DenseTensor& scores,
const DenseTensor& bbox_deltas, const DenseTensor& bbox_deltas,
const DenseTensor& im_shape, const DenseTensor& im_shape,
...@@ -384,9 +384,9 @@ void GenerateProposalsV2Kernel(const Context& ctx, ...@@ -384,9 +384,9 @@ void GenerateProposalsV2Kernel(const Context& ctx,
} // namespace phi } // namespace phi
PD_REGISTER_KERNEL(generate_proposals_v2, PD_REGISTER_KERNEL(generate_proposals,
CPU, CPU,
ALL_LAYOUT, ALL_LAYOUT,
phi::GenerateProposalsV2Kernel, phi::GenerateProposalsKernel,
float, float,
double) {} double) {}
...@@ -17,7 +17,7 @@ ...@@ -17,7 +17,7 @@
#include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/impl/slogdeterminant_grad_kernel_impl.h" #include "paddle/phi/kernels/impl/slogdeterminant_grad_kernel_impl.h"
PD_REGISTER_KERNEL(slogdeterminant_grad, PD_REGISTER_KERNEL(slogdet_grad,
CPU, CPU,
ALL_LAYOUT, ALL_LAYOUT,
phi::SlogDeterminantGradKernel, phi::SlogDeterminantGradKernel,
......
...@@ -17,9 +17,5 @@ ...@@ -17,9 +17,5 @@
#include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/impl/slogdeterminant_kernel_impl.h" #include "paddle/phi/kernels/impl/slogdeterminant_kernel_impl.h"
PD_REGISTER_KERNEL(slogdeterminant, PD_REGISTER_KERNEL(
CPU, slogdet, CPU, ALL_LAYOUT, phi::SlogDeterminantKernel, float, double) {}
ALL_LAYOUT,
phi::SlogDeterminantKernel,
float,
double) {}
...@@ -141,7 +141,7 @@ void TopkGradKernel(const Context& dev_ctx, ...@@ -141,7 +141,7 @@ void TopkGradKernel(const Context& dev_ctx,
} // namespace phi } // namespace phi
PD_REGISTER_KERNEL(top_k_grad, PD_REGISTER_KERNEL(topk_grad,
CPU, CPU,
ALL_LAYOUT, ALL_LAYOUT,
phi::TopkGradKernel, phi::TopkGradKernel,
......
...@@ -227,4 +227,4 @@ void TopkKernel(const Context& dev_ctx, ...@@ -227,4 +227,4 @@ void TopkKernel(const Context& dev_ctx,
} // namespace phi } // namespace phi
PD_REGISTER_KERNEL( PD_REGISTER_KERNEL(
top_k, CPU, ALL_LAYOUT, phi::TopkKernel, float, double, int32_t, int64_t) {} topk, CPU, ALL_LAYOUT, phi::TopkKernel, float, double, int32_t, int64_t) {}
...@@ -19,7 +19,7 @@ ...@@ -19,7 +19,7 @@
namespace phi { namespace phi {
template <typename T, typename Context> template <typename T, typename Context>
void GenerateProposalsV2Kernel(const Context& ctx, void GenerateProposalsKernel(const Context& ctx,
const DenseTensor& scores, const DenseTensor& scores,
const DenseTensor& bbox_deltas, const DenseTensor& bbox_deltas,
const DenseTensor& im_shape, const DenseTensor& im_shape,
......
...@@ -12,7 +12,7 @@ ...@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
#include "paddle/phi/kernels/generate_proposals_v2_kernel.h" #include "paddle/phi/kernels/generate_proposals_kernel.h"
#include <algorithm> #include <algorithm>
#include <vector> #include <vector>
...@@ -458,7 +458,7 @@ static std::pair<DenseTensor, DenseTensor> ProposalForOneImage( ...@@ -458,7 +458,7 @@ static std::pair<DenseTensor, DenseTensor> ProposalForOneImage(
} }
template <typename T, typename Context> template <typename T, typename Context>
void GenerateProposalsV2Kernel(const Context &ctx, void GenerateProposalsKernel(const Context &ctx,
const DenseTensor &scores, const DenseTensor &scores,
const DenseTensor &bbox_deltas, const DenseTensor &bbox_deltas,
const DenseTensor &im_shape, const DenseTensor &im_shape,
...@@ -584,8 +584,5 @@ void GenerateProposalsV2Kernel(const Context &ctx, ...@@ -584,8 +584,5 @@ void GenerateProposalsV2Kernel(const Context &ctx,
} // namespace phi } // namespace phi
PD_REGISTER_KERNEL(generate_proposals_v2, PD_REGISTER_KERNEL(
GPU, generate_proposals, GPU, ALL_LAYOUT, phi::GenerateProposalsKernel, float) {}
ALL_LAYOUT,
phi::GenerateProposalsV2Kernel,
float) {}
...@@ -17,7 +17,7 @@ ...@@ -17,7 +17,7 @@
#include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/impl/slogdeterminant_grad_kernel_impl.h" #include "paddle/phi/kernels/impl/slogdeterminant_grad_kernel_impl.h"
PD_REGISTER_KERNEL(slogdeterminant_grad, PD_REGISTER_KERNEL(slogdet_grad,
GPU, GPU,
ALL_LAYOUT, ALL_LAYOUT,
phi::SlogDeterminantGradKernel, phi::SlogDeterminantGradKernel,
......
...@@ -17,9 +17,5 @@ ...@@ -17,9 +17,5 @@
#include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/impl/slogdeterminant_kernel_impl.h" #include "paddle/phi/kernels/impl/slogdeterminant_kernel_impl.h"
PD_REGISTER_KERNEL(slogdeterminant, PD_REGISTER_KERNEL(
GPU, slogdet, GPU, ALL_LAYOUT, phi::SlogDeterminantKernel, float, double) {}
ALL_LAYOUT,
phi::SlogDeterminantKernel,
float,
double) {}
...@@ -76,7 +76,7 @@ void TopkGradKernel(const Context& dev_ctx, ...@@ -76,7 +76,7 @@ void TopkGradKernel(const Context& dev_ctx,
} // namespace phi } // namespace phi
PD_REGISTER_KERNEL(top_k_grad, PD_REGISTER_KERNEL(topk_grad,
GPU, GPU,
ALL_LAYOUT, ALL_LAYOUT,
phi::TopkGradKernel, phi::TopkGradKernel,
......
...@@ -332,7 +332,7 @@ void TopkKernel(const Context& dev_ctx, ...@@ -332,7 +332,7 @@ void TopkKernel(const Context& dev_ctx,
} // namespace phi } // namespace phi
PD_REGISTER_KERNEL(top_k, PD_REGISTER_KERNEL(topk,
GPU, GPU,
ALL_LAYOUT, ALL_LAYOUT,
phi::TopkKernel, phi::TopkKernel,
......
...@@ -12,7 +12,7 @@ ...@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
#include "paddle/phi/kernels/generate_proposals_v2_kernel.h" #include "paddle/phi/kernels/generate_proposals_kernel.h"
#include "paddle/phi/backends/xpu/enforce_xpu.h" #include "paddle/phi/backends/xpu/enforce_xpu.h"
#include "paddle/phi/backends/xpu/xpu_context.h" #include "paddle/phi/backends/xpu/xpu_context.h"
...@@ -272,7 +272,7 @@ std::pair<DenseTensor, DenseTensor> ProposalForOneImage( ...@@ -272,7 +272,7 @@ std::pair<DenseTensor, DenseTensor> ProposalForOneImage(
} }
template <typename T, typename Context> template <typename T, typename Context>
void GenerateProposalsV2Kernel(const Context& dev_ctx, void GenerateProposalsKernel(const Context& dev_ctx,
const DenseTensor& scores, const DenseTensor& scores,
const DenseTensor& bbox_deltas, const DenseTensor& bbox_deltas,
const DenseTensor& im_shape, const DenseTensor& im_shape,
...@@ -408,8 +408,5 @@ void GenerateProposalsV2Kernel(const Context& dev_ctx, ...@@ -408,8 +408,5 @@ void GenerateProposalsV2Kernel(const Context& dev_ctx,
} }
} // namespace phi } // namespace phi
PD_REGISTER_KERNEL(generate_proposals_v2, PD_REGISTER_KERNEL(
XPU, generate_proposals, XPU, ALL_LAYOUT, phi::GenerateProposalsKernel, float) {}
ALL_LAYOUT,
phi::GenerateProposalsV2Kernel,
float) {}
...@@ -173,4 +173,4 @@ void TopkKernel(const Context& dev_ctx, ...@@ -173,4 +173,4 @@ void TopkKernel(const Context& dev_ctx,
} // namespace phi } // namespace phi
PD_REGISTER_KERNEL(top_k, XPU, ALL_LAYOUT, phi::TopkKernel, float) {} PD_REGISTER_KERNEL(topk, XPU, ALL_LAYOUT, phi::TopkKernel, float) {}
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/core/compat/op_utils.h"
PD_REGISTER_BASE_KERNEL_NAME(generate_proposals_v2, generate_proposals);
PD_REGISTER_BASE_KERNEL_NAME(generate_proposals_v2_grad,
generate_proposals_grad);
...@@ -19,10 +19,13 @@ namespace phi { ...@@ -19,10 +19,13 @@ namespace phi {
KernelSignature SlogDeterminantGradOpArgumentMapping( KernelSignature SlogDeterminantGradOpArgumentMapping(
const ArgumentMappingContext& ctx) { const ArgumentMappingContext& ctx) {
return KernelSignature( return KernelSignature(
"slogdeterminant_grad", {"Input", "Out", "Out@GRAD"}, {}, {"Input@GRAD"}); "slogdet_grad", {"Input", "Out", "Out@GRAD"}, {}, {"Input@GRAD"});
} }
} // namespace phi } // namespace phi
PD_REGISTER_BASE_KERNEL_NAME(slogdeterminant, slogdet);
PD_REGISTER_BASE_KERNEL_NAME(slogdeterminant_grad, slogdet_grad);
PD_REGISTER_ARG_MAPPING_FN(slogdeterminant_grad, PD_REGISTER_ARG_MAPPING_FN(slogdeterminant_grad,
phi::SlogDeterminantGradOpArgumentMapping); phi::SlogDeterminantGradOpArgumentMapping);
...@@ -19,16 +19,16 @@ namespace phi { ...@@ -19,16 +19,16 @@ namespace phi {
KernelSignature TopkOpArgumentMapping(const ArgumentMappingContext& ctx) { KernelSignature TopkOpArgumentMapping(const ArgumentMappingContext& ctx) {
if (ctx.HasInput("K")) { if (ctx.HasInput("K")) {
return KernelSignature( return KernelSignature(
"top_k", {"X"}, {"K", "axis", "largest", "sorted"}, {"Out", "Indices"}); "topk", {"X"}, {"K", "axis", "largest", "sorted"}, {"Out", "Indices"});
} else { } else {
return KernelSignature( return KernelSignature(
"top_k", {"X"}, {"k", "axis", "largest", "sorted"}, {"Out", "Indices"}); "topk", {"X"}, {"k", "axis", "largest", "sorted"}, {"Out", "Indices"});
} }
} }
KernelSignature TopkGradOpArgumentMapping(const ArgumentMappingContext& ctx) { KernelSignature TopkGradOpArgumentMapping(const ArgumentMappingContext& ctx) {
return KernelSignature("top_k_grad", return KernelSignature("topk_grad",
{"X", "Indices", "Out@GRAD"}, {"X", "Indices", "Out@GRAD"},
{"k", "axis", "largest", "sorted"}, {"k", "axis", "largest", "sorted"},
{"X@GRAD"}); {"X@GRAD"});
...@@ -36,7 +36,7 @@ KernelSignature TopkGradOpArgumentMapping(const ArgumentMappingContext& ctx) { ...@@ -36,7 +36,7 @@ KernelSignature TopkGradOpArgumentMapping(const ArgumentMappingContext& ctx) {
} // namespace phi } // namespace phi
PD_REGISTER_BASE_KERNEL_NAME(top_k_v2, top_k); PD_REGISTER_BASE_KERNEL_NAME(top_k_v2, topk);
PD_REGISTER_BASE_KERNEL_NAME(top_k_v2_grad, top_k_grad); PD_REGISTER_BASE_KERNEL_NAME(top_k_v2_grad, topk_grad);
PD_REGISTER_ARG_MAPPING_FN(top_k_v2, phi::TopkOpArgumentMapping); PD_REGISTER_ARG_MAPPING_FN(top_k_v2, phi::TopkOpArgumentMapping);
PD_REGISTER_ARG_MAPPING_FN(top_k_v2_grad, phi::TopkGradOpArgumentMapping); PD_REGISTER_ARG_MAPPING_FN(top_k_v2_grad, phi::TopkGradOpArgumentMapping);
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册