未验证 提交 ff6507db 编写于 作者: Y YuanRisheng 提交者: GitHub

[PTen]Add alias kernel name (#37881)

* add alias kernel name

* modify code as suggestions
上级 1716324c
......@@ -1760,7 +1760,8 @@ OpKernelType OperatorWithKernel::GetKernelTypeForVar(
KernelSignature OperatorWithKernel::GetExpectedPtenKernelArgs(
const ExecutionContext& ctx) const {
return KernelSignatureMap::Instance().Get(Type());
return KernelSignatureMap::Instance().Get(
pten::TransToPtenKernelName(Type()));
}
void OperatorWithKernel::BuildPtenKernelContext(
......
......@@ -101,10 +101,10 @@ KernelSignatureMap& KernelSignatureMap::Instance() {
if (pten::KernelFactory::Instance().HasCompatiblePtenKernel(op_type)) {
KernelArgsNameMakerByOpProto maker(op_proto);
VLOG(10) << "Register kernel signature for " << op_type;
auto success =
kernel_signature_map_->map_
.emplace(op_type, std::move(maker.GetKernelSignature()))
.second;
auto success = kernel_signature_map_->map_
.emplace(pten::TransToPtenKernelName(op_type),
std::move(maker.GetKernelSignature()))
.second;
PADDLE_ENFORCE_EQ(
success, true,
platform::errors::PermissionDenied(
......
......@@ -144,26 +144,25 @@ class ElementwiseOp : public framework::OperatorWithKernel {
const framework::ExecutionContext &ctx) const override {
if (Type() == "elementwise_add") {
if (ctx.InputVar("X")->IsType<framework::LoDTensor>()) {
return framework::KernelSignature("elementwise_add", {"X", "Y"},
{"axis"}, {"Out"});
return framework::KernelSignature("add", {"X", "Y"}, {"axis"}, {"Out"});
}
}
if (Type() == "elementwise_sub") {
if (ctx.InputVar("X")->IsType<framework::LoDTensor>()) {
return framework::KernelSignature("elementwise_sub", {"X", "Y"},
{"axis"}, {"Out"});
return framework::KernelSignature("subtract", {"X", "Y"}, {"axis"},
{"Out"});
}
}
if (Type() == "elementwise_div") {
if (ctx.InputVar("X")->IsType<framework::LoDTensor>()) {
return framework::KernelSignature("elementwise_div", {"X", "Y"},
{"axis"}, {"Out"});
return framework::KernelSignature("divide", {"X", "Y"}, {"axis"},
{"Out"});
}
}
if (Type() == "elementwise_mul") {
if (ctx.InputVar("X")->IsType<framework::LoDTensor>()) {
return framework::KernelSignature("elementwise_mul", {"X", "Y"},
{"axis"}, {"Out"});
return framework::KernelSignature("multiply", {"X", "Y"}, {"axis"},
{"Out"});
}
}
return framework::KernelSignature("None", {"X"}, {}, {"Out"});
......
......@@ -50,7 +50,7 @@ class FillAnyLikeOp : public framework::OperatorWithKernel {
framework::KernelSignature GetExpectedPtenKernelArgs(
const framework::ExecutionContext &ctx) const override {
return framework::KernelSignature("fill_any_like", {}, {"value"}, {"Out"});
return framework::KernelSignature("full_like", {}, {"value"}, {"Out"});
}
};
......
......@@ -118,8 +118,7 @@ class FillConstantOp : public framework::OperatorWithKernel {
value = str_value.empty() ? "value" : "str_value";
}
if (!ctx.OutputVar("Out")->IsType<framework::SelectedRows>()) {
return framework::KernelSignature("fill_constant", {}, {shape, value},
{"Out"});
return framework::KernelSignature("full", {}, {shape, value}, {"Out"});
}
return framework::KernelSignature("fill_constant.unregistered", {}, {}, {});
}
......
......@@ -337,11 +337,10 @@ class FlattenContiguousRangeOp : public framework::OperatorWithKernel {
framework::KernelSignature GetExpectedPtenKernelArgs(
const framework::ExecutionContext &ctx) const override {
if (ctx.HasOutput("XShape")) {
return framework::KernelSignature("flatten_contiguous_range.mid", {"X"},
{"start_axis", "stop_axis"},
{"Out", "XShape"});
return framework::KernelSignature(
"flatten.mid", {"X"}, {"start_axis", "stop_axis"}, {"Out", "XShape"});
} else {
return framework::KernelSignature("flatten_contiguous_range", {"X"},
return framework::KernelSignature("flatten", {"X"},
{"start_axis", "stop_axis"}, {"Out"});
}
}
......
......@@ -555,13 +555,13 @@ class Reshape2Op : public ReshapeOp {
const framework::ExecutionContext &ctx) const override {
auto multi_inputs = ctx.MultiInput<framework::Tensor>("ShapeTensor");
if (multi_inputs.size() > 0) {
return framework::KernelSignature("reshape2.mulhost",
{"X", "ShapeTensor"}, {}, {"Out"});
return framework::KernelSignature("reshape.mulhost", {"X", "ShapeTensor"},
{}, {"Out"});
} else if (ctx.HasInput("Shape")) {
return framework::KernelSignature("reshape2.host", {"X", "Shape"}, {},
return framework::KernelSignature("reshape.host", {"X", "Shape"}, {},
{"Out"});
} else {
return framework::KernelSignature("reshape2", {"X"}, {"shape"}, {"Out"});
return framework::KernelSignature("reshape", {"X"}, {"shape"}, {"Out"});
}
}
};
......
......@@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/pten/core/convert_utils.h"
#include "paddle/pten/core/kernel_alias_name.h"
// See Note [ Why still include the fluid headers? ]
#include "paddle/fluid/platform/device/gpu/gpu_info.h"
......@@ -270,4 +270,12 @@ std::string DataType2String(DataType dtype) {
}
}
const std::string& TransToPtenKernelName(const std::string& fluid_op_name) {
if (kernel_alias_name_map.find(fluid_op_name) !=
kernel_alias_name_map.end()) {
return kernel_alias_name_map.at(fluid_op_name);
}
return fluid_op_name;
}
} // namespace pten
......@@ -32,6 +32,8 @@ namespace pten {
using DataType = paddle::experimental::DataType;
using DataLayout = paddle::experimental::DataLayout;
const std::string& TransToPtenKernelName(const std::string& fluid_op_name);
Backend TransToPtenBackend(const paddle::platform::Place& place);
DataType TransToPtenDataType(
const paddle::framework::proto::VarType::Type& dtype);
......
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
// TODO(yuanrisheng): this file may need to be removed
#pragma once
namespace pten {
// the key is kernel_name in fluid, the value is the kernel_name in pten
// the key is sorted by key's alphabet
const std::unordered_map<std::string, std::string> kernel_alias_name_map = {
{"elementwise_add", "add"},
{"elementwise_div", "divide"},
{"elementwise_mul", "muliply"},
{"elementwise_sub", "subtract"},
{"fill_any_like", "full_like"},
{"fill_constant", "full"},
{"flatten_contiguous_range", "flatten"},
// {"matmul_v2", "matmul"},
{"reduce_mean", "mean"},
{"reduce_sum", "sum"},
{"reshape2", "reshape"},
// fluid kernel "mean/reshape/matmul/flatten/sum" should be deprecated
{"flatten", "deprecated"},
// {"matmul", "deprecated"},
{"mean", "deprecated"},
{"reshape", "deprecated"},
{"sum", "deprecated"}};
} // namespace pten
......@@ -23,6 +23,7 @@
#include "paddle/pten/common/backend.h"
#include "paddle/pten/common/data_type.h"
#include "paddle/pten/common/layout.h"
#include "paddle/pten/core/convert_utils.h"
#include "paddle/pten/core/kernel_def.h"
// See Note [ Why still include the fluid headers? ]
......@@ -269,7 +270,7 @@ class KernelFactory {
}
bool HasCompatiblePtenKernel(const std::string& op_type) const {
return compatible_op_types_.count(op_type) > 0;
return compatible_op_types_.count(TransToPtenKernelName(op_type)) > 0;
}
const Kernel& SelectKernelOrThrowError(const KernelName& kernel_name,
......
......@@ -63,7 +63,7 @@ void FillConstant(const CPUContext& dev_ctx,
PT_REGISTER_MODULE(CreationCPU);
PT_REGISTER_KERNEL("fill_any_like",
PT_REGISTER_KERNEL("full_like",
CPU,
ANY,
pten::FillAnyLike,
......@@ -74,7 +74,7 @@ PT_REGISTER_KERNEL("fill_any_like",
bool,
paddle::platform::float16) {}
PT_REGISTER_KERNEL("fill_constant",
PT_REGISTER_KERNEL("full",
CPU,
ANY,
pten::FillConstant,
......
......@@ -135,7 +135,7 @@ PT_REGISTER_MODULE(ManipulationCPU);
// TODO(yuanrisheng): "flatten_contiguous_range" is compatible with old kernel
// architecture, kernel_name should be "flatten".
PT_REGISTER_KERNEL("flatten_contiguous_range",
PT_REGISTER_KERNEL("flatten",
CPU,
ANY,
pten::Flatten,
......@@ -146,7 +146,7 @@ PT_REGISTER_KERNEL("flatten_contiguous_range",
int,
int64_t) {}
PT_REGISTER_KERNEL("flatten_contiguous_range.mid",
PT_REGISTER_KERNEL("flatten.mid",
CPU,
ANY,
pten::FlattenWithXShape,
......@@ -176,32 +176,29 @@ PT_REGISTER_KERNEL("cast",
// TODO(yuanrisheng): "reshape2" is compatible with old kernel
// architecture, kernel_name should be "reshape".
PT_REGISTER_KERNEL_WITH_NO_TYPE("reshape2",
PT_REGISTER_KERNEL_WITH_NO_TYPE("reshape",
CPU,
ANY,
pten::ReshapeFromVectorVal) {}
PT_REGISTER_KERNEL_WITH_NO_TYPE("reshape2.mid",
PT_REGISTER_KERNEL_WITH_NO_TYPE("reshape.mid",
CPU,
ANY,
pten::ReshapeFromVectorValWithXShape) {}
PT_REGISTER_KERNEL_WITH_NO_TYPE("reshape2.host",
CPU,
ANY,
pten::ReshapeFromDT) {
PT_REGISTER_KERNEL_WITH_NO_TYPE("reshape.host", CPU, ANY, pten::ReshapeFromDT) {
kernel->InputAt(1).SetBackend(pten::Backend::CPU);
kernel->InputAt(1).SetDataType(paddle::experimental::DataType::INT32);
}
PT_REGISTER_KERNEL_WITH_NO_TYPE("reshape2.host.mid",
PT_REGISTER_KERNEL_WITH_NO_TYPE("reshape.host.mid",
CPU,
ANY,
pten::ReshapeFromDTWithXShape) {
kernel->InputAt(1).SetBackend(pten::Backend::CPU);
kernel->InputAt(1).SetDataType(paddle::experimental::DataType::INT32);
}
PT_REGISTER_KERNEL_WITH_NO_TYPE("reshape2.mulhost",
PT_REGISTER_KERNEL_WITH_NO_TYPE("reshape.mulhost",
CPU,
ANY,
pten::ReshapeFromVectorDT) {
......@@ -209,7 +206,7 @@ PT_REGISTER_KERNEL_WITH_NO_TYPE("reshape2.mulhost",
kernel->InputAt(1).SetDataType(paddle::experimental::DataType::INT32);
}
PT_REGISTER_KERNEL_WITH_NO_TYPE("reshape2.mulhost.mid",
PT_REGISTER_KERNEL_WITH_NO_TYPE("reshape.mulhost.mid",
CPU,
ANY,
pten::ReshapeFromVectorDTWithXShape) {
......
......@@ -116,7 +116,7 @@ using complex128 = ::paddle::platform::complex<double>;
// using bfloat16 = ::paddle::platform::bfloat16;
PT_REGISTER_KERNEL("sign", CPU, ANY, pten::Sign, float, double) {}
PT_REGISTER_KERNEL("reduce_mean", CPU, ANY, pten::Mean, float, double, bool) {}
PT_REGISTER_KERNEL("mean", CPU, ANY, pten::Mean, float, double, bool) {}
PT_REGISTER_KERNEL("scale",
CPU,
ANY,
......@@ -130,7 +130,7 @@ PT_REGISTER_KERNEL("scale",
int,
int64_t) {}
PT_REGISTER_KERNEL("elementwise_add",
PT_REGISTER_KERNEL("add",
CPU,
ANY,
pten::ElementwiseAdd,
......@@ -140,7 +140,7 @@ PT_REGISTER_KERNEL("elementwise_add",
int64_t,
complex64,
complex128) {}
PT_REGISTER_KERNEL("elementwise_sub",
PT_REGISTER_KERNEL("subtract",
CPU,
ANY,
pten::ElementwiseSub,
......@@ -150,7 +150,7 @@ PT_REGISTER_KERNEL("elementwise_sub",
int64_t,
complex64,
complex128) {}
PT_REGISTER_KERNEL("elementwise_div",
PT_REGISTER_KERNEL("divide",
CPU,
ANY,
pten::ElementwiseDiv,
......@@ -160,7 +160,7 @@ PT_REGISTER_KERNEL("elementwise_div",
int64_t,
complex64,
complex128) {}
PT_REGISTER_KERNEL("elementwise_mul",
PT_REGISTER_KERNEL("multiply",
CPU,
ANY,
pten::ElementwiseMul,
......@@ -172,7 +172,7 @@ PT_REGISTER_KERNEL("elementwise_mul",
complex64,
complex128) {}
PT_REGISTER_KERNEL("reduce_sum",
PT_REGISTER_KERNEL("sum",
CPU,
ANY,
pten::Sum,
......
......@@ -64,7 +64,7 @@ void FillConstant(const CUDAContext& dev_ctx,
PT_REGISTER_MODULE(CreationCUDA);
PT_REGISTER_KERNEL("fill_any_like",
PT_REGISTER_KERNEL("full_like",
CUDA,
ANY,
pten::FillAnyLike,
......@@ -75,7 +75,7 @@ PT_REGISTER_KERNEL("fill_any_like",
bool,
paddle::platform::float16) {}
PT_REGISTER_KERNEL("fill_constant",
PT_REGISTER_KERNEL("full",
CUDA,
ANY,
pten::FillConstant,
......
......@@ -135,7 +135,7 @@ PT_REGISTER_MODULE(ManipulationCUDA);
using float16 = paddle::platform::float16;
// TODO(yuanrisheng): "flatten_contiguous_range" is compatible with old kernel
// architecture, kernel_name should be "flatten".
PT_REGISTER_KERNEL("flatten_contiguous_range",
PT_REGISTER_KERNEL("flatten",
CUDA,
ANY,
pten::Flatten,
......@@ -147,7 +147,7 @@ PT_REGISTER_KERNEL("flatten_contiguous_range",
int,
int64_t) {}
PT_REGISTER_KERNEL("flatten_contiguous_range.mid",
PT_REGISTER_KERNEL("flatten.mid",
CUDA,
ANY,
pten::FlattenWithXShape,
......@@ -184,17 +184,17 @@ PTEN_REGISTER_CAST_CUDA_BASE_TYPE(cast, paddle::platform::bfloat16)
PTEN_REGISTER_CAST_CUDA_BASE_TYPE(cast)
#endif
PT_REGISTER_KERNEL_WITH_NO_TYPE("reshape2",
PT_REGISTER_KERNEL_WITH_NO_TYPE("reshape",
CUDA,
ANY,
pten::ReshapeFromVectorVal) {}
PT_REGISTER_KERNEL_WITH_NO_TYPE("reshape2.mid",
PT_REGISTER_KERNEL_WITH_NO_TYPE("reshape.mid",
CUDA,
ANY,
pten::ReshapeFromVectorValWithXShape) {}
PT_REGISTER_KERNEL_WITH_NO_TYPE("reshape2.host",
PT_REGISTER_KERNEL_WITH_NO_TYPE("reshape.host",
CUDA,
ANY,
pten::ReshapeFromDT) {
......@@ -202,7 +202,7 @@ PT_REGISTER_KERNEL_WITH_NO_TYPE("reshape2.host",
kernel->InputAt(1).SetDataType(paddle::experimental::DataType::INT32);
}
PT_REGISTER_KERNEL_WITH_NO_TYPE("reshape2.host.mid",
PT_REGISTER_KERNEL_WITH_NO_TYPE("reshape.host.mid",
CUDA,
ANY,
pten::ReshapeFromDTWithXShape) {
......@@ -210,7 +210,7 @@ PT_REGISTER_KERNEL_WITH_NO_TYPE("reshape2.host.mid",
kernel->InputAt(1).SetDataType(paddle::experimental::DataType::INT32);
}
PT_REGISTER_KERNEL_WITH_NO_TYPE("reshape2.mulhost",
PT_REGISTER_KERNEL_WITH_NO_TYPE("reshape.mulhost",
CUDA,
ANY,
pten::ReshapeFromVectorDT) {
......@@ -218,7 +218,7 @@ PT_REGISTER_KERNEL_WITH_NO_TYPE("reshape2.mulhost",
kernel->InputAt(1).SetDataType(paddle::experimental::DataType::INT32);
}
PT_REGISTER_KERNEL_WITH_NO_TYPE("reshape2.mulhost.mid",
PT_REGISTER_KERNEL_WITH_NO_TYPE("reshape.mulhost.mid",
CUDA,
ANY,
pten::ReshapeFromVectorDTWithXShape) {
......
......@@ -119,7 +119,7 @@ using complex64 = ::paddle::platform::complex<float>;
using complex128 = ::paddle::platform::complex<double>;
PT_REGISTER_KERNEL("sign", CUDA, ANY, pten::Sign, float, double, float16) {}
PT_REGISTER_KERNEL("reduce_mean", CUDA, ANY, pten::Mean, float, double, bool) {}
PT_REGISTER_KERNEL("mean", CUDA, ANY, pten::Mean, float, double, bool) {}
PT_REGISTER_KERNEL("scale",
CUDA,
ANY,
......@@ -132,7 +132,7 @@ PT_REGISTER_KERNEL("scale",
int16_t,
int,
int64_t) {}
PT_REGISTER_KERNEL("elementwise_add",
PT_REGISTER_KERNEL("add",
CUDA,
ANY,
pten::ElementwiseAdd,
......@@ -143,7 +143,7 @@ PT_REGISTER_KERNEL("elementwise_add",
float16,
complex64,
complex128) {}
PT_REGISTER_KERNEL("elementwise_sub",
PT_REGISTER_KERNEL("subtract",
CUDA,
ANY,
pten::ElementwiseSub,
......@@ -154,7 +154,7 @@ PT_REGISTER_KERNEL("elementwise_sub",
float16,
complex64,
complex128) {}
PT_REGISTER_KERNEL("elementwise_div",
PT_REGISTER_KERNEL("divide",
CUDA,
ANY,
pten::ElementwiseDiv,
......@@ -165,7 +165,7 @@ PT_REGISTER_KERNEL("elementwise_div",
float16,
complex64,
complex128) {}
PT_REGISTER_KERNEL("elementwise_mul",
PT_REGISTER_KERNEL("multiply",
CUDA,
ANY,
pten::ElementwiseMul,
......@@ -177,7 +177,7 @@ PT_REGISTER_KERNEL("elementwise_mul",
float16,
complex64,
complex128) {}
PT_REGISTER_KERNEL("reduce_sum",
PT_REGISTER_KERNEL("sum",
CUDA,
ANY,
pten::Sum,
......
......@@ -5,7 +5,7 @@
func : ElementwiseInferMeta
param : [x, y, -1]
kernel :
func : elementwise_add
func : add
param : [x, y, -1]
- api : cast
......@@ -25,7 +25,7 @@
func : ElementwiseInferMeta
param : [x, y, -1]
kernel :
func : elementwise_div
func : divide
param : [x, y, -1]
- api : dot
......@@ -42,7 +42,7 @@
infer_meta :
func : FlattenInferMeta
kernel :
func : flatten_contiguous_range
func : flatten
- api : full
args : (const ScalarArray& shape, const Scalar& value, DataType dtype=DataType::FLOAT32, Backend place=Backend::CPU, DataLayout layout=DataLayout::NCHW)
......@@ -51,7 +51,7 @@
func : FullInferMeta
param : [shape, dtype, layout]
kernel :
func : fill_constant
func : full
param : [shape, value]
data_type : dtype
backend : place
......@@ -64,7 +64,7 @@
func : FullLikeInferMeta
param : [x, dtype, layout]
kernel :
func : fill_any_like
func : full_like
param : [x, value]
data_type : dtype > x
backend : place > x
......@@ -84,7 +84,7 @@
infer_meta :
func : ReduceInferMeta
kernel :
func : reduce_mean
func : mean
param : [x, axis, keep_dim, false, x.dtype(), DataType::UNDEFINED]
- api : multiply
......@@ -94,7 +94,7 @@
func : ElementwiseInferMeta
param : [x, y, -1]
kernel :
func : elementwise_mul
func : multiply
param : [x, y, -1]
- api : ones_like
......@@ -108,7 +108,7 @@
infer_meta :
func : InferMetaFromVecValue
kernel :
func : reshape2
func : reshape
- api : scale
args : (const Tensor& x, const Scalar& scale, float bias, bool bias_after_scale)
......@@ -126,7 +126,7 @@
func : ElementwiseInferMeta
param : [x, y, -1]
kernel :
func : elementwise_sub
func : subtract
param : [x, y, -1]
- api : sum
......@@ -135,7 +135,7 @@
infer_meta :
func : ReduceInferMeta
kernel :
func : reduce_sum
func : sum
param : [x, axis, keep_dim, false, x.dtype(), DataType::UNDEFINED]
- api : zeros_like
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册