未验证 提交 3d35aa80 编写于 作者: Z zyfncg 提交者: GitHub

Rename kernel for top_k, slogdeterminant, generate_proposals_v2 (#48594)

* rename kernel for top_k, slogdeterminant, generate_proposals_v2

* fix bug
上级 3cbca60f
......@@ -1504,7 +1504,7 @@
func : UnchangedInferMeta
param : [x]
kernel :
func : slogdeterminant_grad
func : slogdet_grad
- backward_op : softmax_grad
forward : softmax (Tensor x, int axis) -> Tensor(out)
......@@ -1713,7 +1713,7 @@
func : UnchangedInferMeta
param : [x]
kernel :
func : top_k_grad
func : topk_grad
- backward_op : transpose_double_grad
forward : transpose_grad (Tensor grad_out, int[] perm) -> Tensor(grad_x)
......
......@@ -878,7 +878,7 @@
infer_meta :
func : GenerateProposalsV2InferMeta
kernel :
func : generate_proposals_v2
func : generate_proposals
- op : greater_equal
args : (Tensor x, Tensor y)
......@@ -1935,7 +1935,7 @@
infer_meta :
func : UnchangedInferMeta
kernel :
func : slogdeterminant
func : slogdet
backward : slogdet_grad
- op : softmax
......@@ -2100,7 +2100,7 @@
infer_meta :
func : TopKInferMeta
kernel :
func : top_k
func : topk
backward : topk_grad
- op : transpose
......
......@@ -83,7 +83,8 @@ static const std::unordered_set<std::string> deprecated_op_names(
"bicubic_interp",
"bicubic_interp_grad",
"crop",
"crop_grad"});
"crop_grad",
"generate_proposals"});
class DefaultKernelSignatureMap {
public:
......
......@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/generate_proposals_v2_kernel.h"
#include "paddle/phi/kernels/generate_proposals_kernel.h"
#include "paddle/phi/backends/cpu/cpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/funcs/detection/nms_util.h"
......@@ -284,21 +284,21 @@ std::pair<DenseTensor, DenseTensor> ProposalForOneImage(
}
template <typename T, typename Context>
void GenerateProposalsV2Kernel(const Context& ctx,
const DenseTensor& scores,
const DenseTensor& bbox_deltas,
const DenseTensor& im_shape,
const DenseTensor& anchors,
const DenseTensor& variances,
int pre_nms_top_n,
int post_nms_top_n,
float nms_thresh,
float min_size,
float eta,
bool pixel_offset,
DenseTensor* rpn_rois,
DenseTensor* rpn_roi_probs,
DenseTensor* rpn_rois_num) {
void GenerateProposalsKernel(const Context& ctx,
const DenseTensor& scores,
const DenseTensor& bbox_deltas,
const DenseTensor& im_shape,
const DenseTensor& anchors,
const DenseTensor& variances,
int pre_nms_top_n,
int post_nms_top_n,
float nms_thresh,
float min_size,
float eta,
bool pixel_offset,
DenseTensor* rpn_rois,
DenseTensor* rpn_roi_probs,
DenseTensor* rpn_rois_num) {
auto& scores_dim = scores.dims();
int64_t num = scores_dim[0];
int64_t c_score = scores_dim[1];
......@@ -384,9 +384,9 @@ void GenerateProposalsV2Kernel(const Context& ctx,
} // namespace phi
PD_REGISTER_KERNEL(generate_proposals_v2,
PD_REGISTER_KERNEL(generate_proposals,
CPU,
ALL_LAYOUT,
phi::GenerateProposalsV2Kernel,
phi::GenerateProposalsKernel,
float,
double) {}
......@@ -17,7 +17,7 @@
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/impl/slogdeterminant_grad_kernel_impl.h"
PD_REGISTER_KERNEL(slogdeterminant_grad,
PD_REGISTER_KERNEL(slogdet_grad,
CPU,
ALL_LAYOUT,
phi::SlogDeterminantGradKernel,
......
......@@ -17,9 +17,5 @@
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/impl/slogdeterminant_kernel_impl.h"
PD_REGISTER_KERNEL(slogdeterminant,
CPU,
ALL_LAYOUT,
phi::SlogDeterminantKernel,
float,
double) {}
PD_REGISTER_KERNEL(
slogdet, CPU, ALL_LAYOUT, phi::SlogDeterminantKernel, float, double) {}
......@@ -141,7 +141,7 @@ void TopkGradKernel(const Context& dev_ctx,
} // namespace phi
PD_REGISTER_KERNEL(top_k_grad,
PD_REGISTER_KERNEL(topk_grad,
CPU,
ALL_LAYOUT,
phi::TopkGradKernel,
......
......@@ -227,4 +227,4 @@ void TopkKernel(const Context& dev_ctx,
} // namespace phi
PD_REGISTER_KERNEL(
top_k, CPU, ALL_LAYOUT, phi::TopkKernel, float, double, int32_t, int64_t) {}
topk, CPU, ALL_LAYOUT, phi::TopkKernel, float, double, int32_t, int64_t) {}
......@@ -19,20 +19,20 @@
namespace phi {
template <typename T, typename Context>
void GenerateProposalsV2Kernel(const Context& ctx,
const DenseTensor& scores,
const DenseTensor& bbox_deltas,
const DenseTensor& im_shape,
const DenseTensor& anchors,
const DenseTensor& variances,
int pre_nms_top_n,
int post_nms_top_n,
float nms_thresh,
float min_size,
float eta,
bool pixel_offset,
DenseTensor* rpn_rois,
DenseTensor* rpn_roi_probs,
DenseTensor* rpn_rois_num);
void GenerateProposalsKernel(const Context& ctx,
const DenseTensor& scores,
const DenseTensor& bbox_deltas,
const DenseTensor& im_shape,
const DenseTensor& anchors,
const DenseTensor& variances,
int pre_nms_top_n,
int post_nms_top_n,
float nms_thresh,
float min_size,
float eta,
bool pixel_offset,
DenseTensor* rpn_rois,
DenseTensor* rpn_roi_probs,
DenseTensor* rpn_rois_num);
} // namespace phi
......@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/generate_proposals_v2_kernel.h"
#include "paddle/phi/kernels/generate_proposals_kernel.h"
#include <algorithm>
#include <vector>
......@@ -458,21 +458,21 @@ static std::pair<DenseTensor, DenseTensor> ProposalForOneImage(
}
template <typename T, typename Context>
void GenerateProposalsV2Kernel(const Context &ctx,
const DenseTensor &scores,
const DenseTensor &bbox_deltas,
const DenseTensor &im_shape,
const DenseTensor &anchors,
const DenseTensor &variances,
int pre_nms_top_n,
int post_nms_top_n,
float nms_thresh,
float min_size,
float eta,
bool pixel_offset,
DenseTensor *rpn_rois,
DenseTensor *rpn_roi_probs,
DenseTensor *rpn_rois_num) {
void GenerateProposalsKernel(const Context &ctx,
const DenseTensor &scores,
const DenseTensor &bbox_deltas,
const DenseTensor &im_shape,
const DenseTensor &anchors,
const DenseTensor &variances,
int pre_nms_top_n,
int post_nms_top_n,
float nms_thresh,
float min_size,
float eta,
bool pixel_offset,
DenseTensor *rpn_rois,
DenseTensor *rpn_roi_probs,
DenseTensor *rpn_rois_num) {
PADDLE_ENFORCE_GE(
eta,
1.,
......@@ -584,8 +584,5 @@ void GenerateProposalsV2Kernel(const Context &ctx,
} // namespace phi
PD_REGISTER_KERNEL(generate_proposals_v2,
GPU,
ALL_LAYOUT,
phi::GenerateProposalsV2Kernel,
float) {}
PD_REGISTER_KERNEL(
generate_proposals, GPU, ALL_LAYOUT, phi::GenerateProposalsKernel, float) {}
......@@ -17,7 +17,7 @@
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/impl/slogdeterminant_grad_kernel_impl.h"
PD_REGISTER_KERNEL(slogdeterminant_grad,
PD_REGISTER_KERNEL(slogdet_grad,
GPU,
ALL_LAYOUT,
phi::SlogDeterminantGradKernel,
......
......@@ -17,9 +17,5 @@
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/impl/slogdeterminant_kernel_impl.h"
PD_REGISTER_KERNEL(slogdeterminant,
GPU,
ALL_LAYOUT,
phi::SlogDeterminantKernel,
float,
double) {}
PD_REGISTER_KERNEL(
slogdet, GPU, ALL_LAYOUT, phi::SlogDeterminantKernel, float, double) {}
......@@ -76,7 +76,7 @@ void TopkGradKernel(const Context& dev_ctx,
} // namespace phi
PD_REGISTER_KERNEL(top_k_grad,
PD_REGISTER_KERNEL(topk_grad,
GPU,
ALL_LAYOUT,
phi::TopkGradKernel,
......
......@@ -332,7 +332,7 @@ void TopkKernel(const Context& dev_ctx,
} // namespace phi
PD_REGISTER_KERNEL(top_k,
PD_REGISTER_KERNEL(topk,
GPU,
ALL_LAYOUT,
phi::TopkKernel,
......
......@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/generate_proposals_v2_kernel.h"
#include "paddle/phi/kernels/generate_proposals_kernel.h"
#include "paddle/phi/backends/xpu/enforce_xpu.h"
#include "paddle/phi/backends/xpu/xpu_context.h"
......@@ -272,21 +272,21 @@ std::pair<DenseTensor, DenseTensor> ProposalForOneImage(
}
template <typename T, typename Context>
void GenerateProposalsV2Kernel(const Context& dev_ctx,
const DenseTensor& scores,
const DenseTensor& bbox_deltas,
const DenseTensor& im_shape,
const DenseTensor& anchors,
const DenseTensor& variances,
int pre_nms_top_n,
int post_nms_top_n,
float nms_thresh,
float min_size,
float eta,
bool pixel_offset,
DenseTensor* rpn_rois,
DenseTensor* rpn_roi_probs,
DenseTensor* rpn_rois_num) {
void GenerateProposalsKernel(const Context& dev_ctx,
const DenseTensor& scores,
const DenseTensor& bbox_deltas,
const DenseTensor& im_shape,
const DenseTensor& anchors,
const DenseTensor& variances,
int pre_nms_top_n,
int post_nms_top_n,
float nms_thresh,
float min_size,
float eta,
bool pixel_offset,
DenseTensor* rpn_rois,
DenseTensor* rpn_roi_probs,
DenseTensor* rpn_rois_num) {
PADDLE_ENFORCE_GE(eta,
1.,
phi::errors::InvalidArgument(
......@@ -408,8 +408,5 @@ void GenerateProposalsV2Kernel(const Context& dev_ctx,
}
} // namespace phi
PD_REGISTER_KERNEL(generate_proposals_v2,
XPU,
ALL_LAYOUT,
phi::GenerateProposalsV2Kernel,
float) {}
PD_REGISTER_KERNEL(
generate_proposals, XPU, ALL_LAYOUT, phi::GenerateProposalsKernel, float) {}
......@@ -173,4 +173,4 @@ void TopkKernel(const Context& dev_ctx,
} // namespace phi
PD_REGISTER_KERNEL(top_k, XPU, ALL_LAYOUT, phi::TopkKernel, float) {}
PD_REGISTER_KERNEL(topk, XPU, ALL_LAYOUT, phi::TopkKernel, float) {}
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/core/compat/op_utils.h"
PD_REGISTER_BASE_KERNEL_NAME(generate_proposals_v2, generate_proposals);
PD_REGISTER_BASE_KERNEL_NAME(generate_proposals_v2_grad,
generate_proposals_grad);
......@@ -19,10 +19,13 @@ namespace phi {
KernelSignature SlogDeterminantGradOpArgumentMapping(
const ArgumentMappingContext& ctx) {
return KernelSignature(
"slogdeterminant_grad", {"Input", "Out", "Out@GRAD"}, {}, {"Input@GRAD"});
"slogdet_grad", {"Input", "Out", "Out@GRAD"}, {}, {"Input@GRAD"});
}
} // namespace phi
PD_REGISTER_BASE_KERNEL_NAME(slogdeterminant, slogdet);
PD_REGISTER_BASE_KERNEL_NAME(slogdeterminant_grad, slogdet_grad);
PD_REGISTER_ARG_MAPPING_FN(slogdeterminant_grad,
phi::SlogDeterminantGradOpArgumentMapping);
......@@ -19,16 +19,16 @@ namespace phi {
KernelSignature TopkOpArgumentMapping(const ArgumentMappingContext& ctx) {
if (ctx.HasInput("K")) {
return KernelSignature(
"top_k", {"X"}, {"K", "axis", "largest", "sorted"}, {"Out", "Indices"});
"topk", {"X"}, {"K", "axis", "largest", "sorted"}, {"Out", "Indices"});
} else {
return KernelSignature(
"top_k", {"X"}, {"k", "axis", "largest", "sorted"}, {"Out", "Indices"});
"topk", {"X"}, {"k", "axis", "largest", "sorted"}, {"Out", "Indices"});
}
}
KernelSignature TopkGradOpArgumentMapping(const ArgumentMappingContext& ctx) {
return KernelSignature("top_k_grad",
return KernelSignature("topk_grad",
{"X", "Indices", "Out@GRAD"},
{"k", "axis", "largest", "sorted"},
{"X@GRAD"});
......@@ -36,7 +36,7 @@ KernelSignature TopkGradOpArgumentMapping(const ArgumentMappingContext& ctx) {
} // namespace phi
PD_REGISTER_BASE_KERNEL_NAME(top_k_v2, top_k);
PD_REGISTER_BASE_KERNEL_NAME(top_k_v2_grad, top_k_grad);
PD_REGISTER_BASE_KERNEL_NAME(top_k_v2, topk);
PD_REGISTER_BASE_KERNEL_NAME(top_k_v2_grad, topk_grad);
PD_REGISTER_ARG_MAPPING_FN(top_k_v2, phi::TopkOpArgumentMapping);
PD_REGISTER_ARG_MAPPING_FN(top_k_v2_grad, phi::TopkGradOpArgumentMapping);
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册