From ff6507db5641ff673292098f4729b8efb2f028ff Mon Sep 17 00:00:00 2001 From: YuanRisheng Date: Wed, 8 Dec 2021 16:40:20 +0800 Subject: [PATCH] [PTen]Add alias kernel name (#37881) * add alias kernel name * modify code as suggestions --- paddle/fluid/framework/operator.cc | 3 +- paddle/fluid/framework/pten_utils.cc | 8 ++-- .../operators/elementwise/elementwise_op.h | 15 ++++--- paddle/fluid/operators/fill_any_like_op.cc | 2 +- paddle/fluid/operators/fill_constant_op.cc | 3 +- paddle/fluid/operators/flatten_op.cc | 7 ++-- paddle/fluid/operators/reshape_op.cc | 8 ++-- paddle/pten/core/convert_utils.cc | 10 ++++- paddle/pten/core/convert_utils.h | 2 + paddle/pten/core/kernel_alias_name.h | 41 +++++++++++++++++++ paddle/pten/core/kernel_factory.h | 3 +- paddle/pten/kernels/cpu/creation.cc | 4 +- paddle/pten/kernels/cpu/manipulation.cc | 19 ++++----- paddle/pten/kernels/cpu/math.cc | 12 +++--- paddle/pten/kernels/cuda/creation.cu | 4 +- paddle/pten/kernels/cuda/manipulation.cu | 16 ++++---- paddle/pten/kernels/cuda/math.cu | 12 +++--- python/paddle/utils/code_gen/api.yaml | 20 ++++----- 18 files changed, 118 insertions(+), 71 deletions(-) create mode 100644 paddle/pten/core/kernel_alias_name.h diff --git a/paddle/fluid/framework/operator.cc b/paddle/fluid/framework/operator.cc index 4236fcf8dc..716f0a85c1 100644 --- a/paddle/fluid/framework/operator.cc +++ b/paddle/fluid/framework/operator.cc @@ -1760,7 +1760,8 @@ OpKernelType OperatorWithKernel::GetKernelTypeForVar( KernelSignature OperatorWithKernel::GetExpectedPtenKernelArgs( const ExecutionContext& ctx) const { - return KernelSignatureMap::Instance().Get(Type()); + return KernelSignatureMap::Instance().Get( + pten::TransToPtenKernelName(Type())); } void OperatorWithKernel::BuildPtenKernelContext( diff --git a/paddle/fluid/framework/pten_utils.cc b/paddle/fluid/framework/pten_utils.cc index 51a2d641bb..55254c65fa 100644 --- a/paddle/fluid/framework/pten_utils.cc +++ b/paddle/fluid/framework/pten_utils.cc @@ -101,10 +101,10 @@ KernelSignatureMap& KernelSignatureMap::Instance() { if (pten::KernelFactory::Instance().HasCompatiblePtenKernel(op_type)) { KernelArgsNameMakerByOpProto maker(op_proto); VLOG(10) << "Register kernel signature for " << op_type; - auto success = - kernel_signature_map_->map_ - .emplace(op_type, std::move(maker.GetKernelSignature())) - .second; + auto success = kernel_signature_map_->map_ + .emplace(pten::TransToPtenKernelName(op_type), + std::move(maker.GetKernelSignature())) + .second; PADDLE_ENFORCE_EQ( success, true, platform::errors::PermissionDenied( diff --git a/paddle/fluid/operators/elementwise/elementwise_op.h b/paddle/fluid/operators/elementwise/elementwise_op.h index a52262f66d..91867c890d 100644 --- a/paddle/fluid/operators/elementwise/elementwise_op.h +++ b/paddle/fluid/operators/elementwise/elementwise_op.h @@ -144,26 +144,25 @@ class ElementwiseOp : public framework::OperatorWithKernel { const framework::ExecutionContext &ctx) const override { if (Type() == "elementwise_add") { if (ctx.InputVar("X")->IsType()) { - return framework::KernelSignature("elementwise_add", {"X", "Y"}, - {"axis"}, {"Out"}); + return framework::KernelSignature("add", {"X", "Y"}, {"axis"}, {"Out"}); } } if (Type() == "elementwise_sub") { if (ctx.InputVar("X")->IsType()) { - return framework::KernelSignature("elementwise_sub", {"X", "Y"}, - {"axis"}, {"Out"}); + return framework::KernelSignature("subtract", {"X", "Y"}, {"axis"}, + {"Out"}); } } if (Type() == "elementwise_div") { if (ctx.InputVar("X")->IsType()) { - return framework::KernelSignature("elementwise_div", {"X", "Y"}, - {"axis"}, {"Out"}); + return framework::KernelSignature("divide", {"X", "Y"}, {"axis"}, + {"Out"}); } } if (Type() == "elementwise_mul") { if (ctx.InputVar("X")->IsType()) { - return framework::KernelSignature("elementwise_mul", {"X", "Y"}, - {"axis"}, {"Out"}); + return framework::KernelSignature("multiply", {"X", "Y"}, {"axis"}, + {"Out"}); } } return framework::KernelSignature("None", {"X"}, {}, {"Out"}); diff --git a/paddle/fluid/operators/fill_any_like_op.cc b/paddle/fluid/operators/fill_any_like_op.cc index fb97030520..245a8977c0 100644 --- a/paddle/fluid/operators/fill_any_like_op.cc +++ b/paddle/fluid/operators/fill_any_like_op.cc @@ -50,7 +50,7 @@ class FillAnyLikeOp : public framework::OperatorWithKernel { framework::KernelSignature GetExpectedPtenKernelArgs( const framework::ExecutionContext &ctx) const override { - return framework::KernelSignature("fill_any_like", {}, {"value"}, {"Out"}); + return framework::KernelSignature("full_like", {}, {"value"}, {"Out"}); } }; diff --git a/paddle/fluid/operators/fill_constant_op.cc b/paddle/fluid/operators/fill_constant_op.cc index c28ca45fc1..ee7c0eb96e 100644 --- a/paddle/fluid/operators/fill_constant_op.cc +++ b/paddle/fluid/operators/fill_constant_op.cc @@ -118,8 +118,7 @@ class FillConstantOp : public framework::OperatorWithKernel { value = str_value.empty() ? "value" : "str_value"; } if (!ctx.OutputVar("Out")->IsType()) { - return framework::KernelSignature("fill_constant", {}, {shape, value}, - {"Out"}); + return framework::KernelSignature("full", {}, {shape, value}, {"Out"}); } return framework::KernelSignature("fill_constant.unregistered", {}, {}, {}); } diff --git a/paddle/fluid/operators/flatten_op.cc b/paddle/fluid/operators/flatten_op.cc index 517422af1f..dcd31f1ded 100644 --- a/paddle/fluid/operators/flatten_op.cc +++ b/paddle/fluid/operators/flatten_op.cc @@ -337,11 +337,10 @@ class FlattenContiguousRangeOp : public framework::OperatorWithKernel { framework::KernelSignature GetExpectedPtenKernelArgs( const framework::ExecutionContext &ctx) const override { if (ctx.HasOutput("XShape")) { - return framework::KernelSignature("flatten_contiguous_range.mid", {"X"}, - {"start_axis", "stop_axis"}, - {"Out", "XShape"}); + return framework::KernelSignature( + "flatten.mid", {"X"}, {"start_axis", "stop_axis"}, {"Out", "XShape"}); } else { - return framework::KernelSignature("flatten_contiguous_range", {"X"}, + return framework::KernelSignature("flatten", {"X"}, {"start_axis", "stop_axis"}, {"Out"}); } } diff --git a/paddle/fluid/operators/reshape_op.cc b/paddle/fluid/operators/reshape_op.cc index c12db12938..ed06fac298 100644 --- a/paddle/fluid/operators/reshape_op.cc +++ b/paddle/fluid/operators/reshape_op.cc @@ -555,13 +555,13 @@ class Reshape2Op : public ReshapeOp { const framework::ExecutionContext &ctx) const override { auto multi_inputs = ctx.MultiInput("ShapeTensor"); if (multi_inputs.size() > 0) { - return framework::KernelSignature("reshape2.mulhost", - {"X", "ShapeTensor"}, {}, {"Out"}); + return framework::KernelSignature("reshape.mulhost", {"X", "ShapeTensor"}, + {}, {"Out"}); } else if (ctx.HasInput("Shape")) { - return framework::KernelSignature("reshape2.host", {"X", "Shape"}, {}, + return framework::KernelSignature("reshape.host", {"X", "Shape"}, {}, {"Out"}); } else { - return framework::KernelSignature("reshape2", {"X"}, {"shape"}, {"Out"}); + return framework::KernelSignature("reshape", {"X"}, {"shape"}, {"Out"}); } } }; diff --git a/paddle/pten/core/convert_utils.cc b/paddle/pten/core/convert_utils.cc index 211734f331..936d4effdf 100644 --- a/paddle/pten/core/convert_utils.cc +++ b/paddle/pten/core/convert_utils.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/pten/core/convert_utils.h" - +#include "paddle/pten/core/kernel_alias_name.h" // See Note [ Why still include the fluid headers? ] #include "paddle/fluid/platform/device/gpu/gpu_info.h" @@ -270,4 +270,12 @@ std::string DataType2String(DataType dtype) { } } +const std::string& TransToPtenKernelName(const std::string& fluid_op_name) { + if (kernel_alias_name_map.find(fluid_op_name) != + kernel_alias_name_map.end()) { + return kernel_alias_name_map.at(fluid_op_name); + } + return fluid_op_name; +} + } // namespace pten diff --git a/paddle/pten/core/convert_utils.h b/paddle/pten/core/convert_utils.h index 32ed753b4b..49c905a84e 100644 --- a/paddle/pten/core/convert_utils.h +++ b/paddle/pten/core/convert_utils.h @@ -32,6 +32,8 @@ namespace pten { using DataType = paddle::experimental::DataType; using DataLayout = paddle::experimental::DataLayout; +const std::string& TransToPtenKernelName(const std::string& fluid_op_name); + Backend TransToPtenBackend(const paddle::platform::Place& place); DataType TransToPtenDataType( const paddle::framework::proto::VarType::Type& dtype); diff --git a/paddle/pten/core/kernel_alias_name.h b/paddle/pten/core/kernel_alias_name.h new file mode 100644 index 0000000000..0310b6e6fa --- /dev/null +++ b/paddle/pten/core/kernel_alias_name.h @@ -0,0 +1,41 @@ +/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +// TODO(yuanrisheng): this file may need to be removed +#pragma once + +namespace pten { + +// the key is kernel_name in fluid, the value is the kernel_name in pten +// the key is sorted by key's alphabet +const std::unordered_map kernel_alias_name_map = { + {"elementwise_add", "add"}, + {"elementwise_div", "divide"}, + {"elementwise_mul", "muliply"}, + {"elementwise_sub", "subtract"}, + {"fill_any_like", "full_like"}, + {"fill_constant", "full"}, + {"flatten_contiguous_range", "flatten"}, + // {"matmul_v2", "matmul"}, + {"reduce_mean", "mean"}, + {"reduce_sum", "sum"}, + {"reshape2", "reshape"}, + // fluid kernel "mean/reshape/matmul/flatten/sum" should be deprecated + {"flatten", "deprecated"}, + // {"matmul", "deprecated"}, + {"mean", "deprecated"}, + {"reshape", "deprecated"}, + {"sum", "deprecated"}}; + +} // namespace pten diff --git a/paddle/pten/core/kernel_factory.h b/paddle/pten/core/kernel_factory.h index 4ec80521b4..dbdf90b5bd 100644 --- a/paddle/pten/core/kernel_factory.h +++ b/paddle/pten/core/kernel_factory.h @@ -23,6 +23,7 @@ #include "paddle/pten/common/backend.h" #include "paddle/pten/common/data_type.h" #include "paddle/pten/common/layout.h" +#include "paddle/pten/core/convert_utils.h" #include "paddle/pten/core/kernel_def.h" // See Note [ Why still include the fluid headers? ] @@ -269,7 +270,7 @@ class KernelFactory { } bool HasCompatiblePtenKernel(const std::string& op_type) const { - return compatible_op_types_.count(op_type) > 0; + return compatible_op_types_.count(TransToPtenKernelName(op_type)) > 0; } const Kernel& SelectKernelOrThrowError(const KernelName& kernel_name, diff --git a/paddle/pten/kernels/cpu/creation.cc b/paddle/pten/kernels/cpu/creation.cc index 71c4e9f1eb..db3d5c2bf4 100644 --- a/paddle/pten/kernels/cpu/creation.cc +++ b/paddle/pten/kernels/cpu/creation.cc @@ -63,7 +63,7 @@ void FillConstant(const CPUContext& dev_ctx, PT_REGISTER_MODULE(CreationCPU); -PT_REGISTER_KERNEL("fill_any_like", +PT_REGISTER_KERNEL("full_like", CPU, ANY, pten::FillAnyLike, @@ -74,7 +74,7 @@ PT_REGISTER_KERNEL("fill_any_like", bool, paddle::platform::float16) {} -PT_REGISTER_KERNEL("fill_constant", +PT_REGISTER_KERNEL("full", CPU, ANY, pten::FillConstant, diff --git a/paddle/pten/kernels/cpu/manipulation.cc b/paddle/pten/kernels/cpu/manipulation.cc index 7693e204ea..bf94d00964 100644 --- a/paddle/pten/kernels/cpu/manipulation.cc +++ b/paddle/pten/kernels/cpu/manipulation.cc @@ -135,7 +135,7 @@ PT_REGISTER_MODULE(ManipulationCPU); // TODO(yuanrisheng): "flatten_contiguous_range" is compatible with old kernel // architecture, kernel_name should be "flatten". -PT_REGISTER_KERNEL("flatten_contiguous_range", +PT_REGISTER_KERNEL("flatten", CPU, ANY, pten::Flatten, @@ -146,7 +146,7 @@ PT_REGISTER_KERNEL("flatten_contiguous_range", int, int64_t) {} -PT_REGISTER_KERNEL("flatten_contiguous_range.mid", +PT_REGISTER_KERNEL("flatten.mid", CPU, ANY, pten::FlattenWithXShape, @@ -176,32 +176,29 @@ PT_REGISTER_KERNEL("cast", // TODO(yuanrisheng): "reshape2" is compatible with old kernel // architecture, kernel_name should be "reshape". -PT_REGISTER_KERNEL_WITH_NO_TYPE("reshape2", +PT_REGISTER_KERNEL_WITH_NO_TYPE("reshape", CPU, ANY, pten::ReshapeFromVectorVal) {} -PT_REGISTER_KERNEL_WITH_NO_TYPE("reshape2.mid", +PT_REGISTER_KERNEL_WITH_NO_TYPE("reshape.mid", CPU, ANY, pten::ReshapeFromVectorValWithXShape) {} -PT_REGISTER_KERNEL_WITH_NO_TYPE("reshape2.host", - CPU, - ANY, - pten::ReshapeFromDT) { +PT_REGISTER_KERNEL_WITH_NO_TYPE("reshape.host", CPU, ANY, pten::ReshapeFromDT) { kernel->InputAt(1).SetBackend(pten::Backend::CPU); kernel->InputAt(1).SetDataType(paddle::experimental::DataType::INT32); } -PT_REGISTER_KERNEL_WITH_NO_TYPE("reshape2.host.mid", +PT_REGISTER_KERNEL_WITH_NO_TYPE("reshape.host.mid", CPU, ANY, pten::ReshapeFromDTWithXShape) { kernel->InputAt(1).SetBackend(pten::Backend::CPU); kernel->InputAt(1).SetDataType(paddle::experimental::DataType::INT32); } -PT_REGISTER_KERNEL_WITH_NO_TYPE("reshape2.mulhost", +PT_REGISTER_KERNEL_WITH_NO_TYPE("reshape.mulhost", CPU, ANY, pten::ReshapeFromVectorDT) { @@ -209,7 +206,7 @@ PT_REGISTER_KERNEL_WITH_NO_TYPE("reshape2.mulhost", kernel->InputAt(1).SetDataType(paddle::experimental::DataType::INT32); } -PT_REGISTER_KERNEL_WITH_NO_TYPE("reshape2.mulhost.mid", +PT_REGISTER_KERNEL_WITH_NO_TYPE("reshape.mulhost.mid", CPU, ANY, pten::ReshapeFromVectorDTWithXShape) { diff --git a/paddle/pten/kernels/cpu/math.cc b/paddle/pten/kernels/cpu/math.cc index 05ca7a3ae5..e768d4f1ef 100644 --- a/paddle/pten/kernels/cpu/math.cc +++ b/paddle/pten/kernels/cpu/math.cc @@ -116,7 +116,7 @@ using complex128 = ::paddle::platform::complex; // using bfloat16 = ::paddle::platform::bfloat16; PT_REGISTER_KERNEL("sign", CPU, ANY, pten::Sign, float, double) {} -PT_REGISTER_KERNEL("reduce_mean", CPU, ANY, pten::Mean, float, double, bool) {} +PT_REGISTER_KERNEL("mean", CPU, ANY, pten::Mean, float, double, bool) {} PT_REGISTER_KERNEL("scale", CPU, ANY, @@ -130,7 +130,7 @@ PT_REGISTER_KERNEL("scale", int, int64_t) {} -PT_REGISTER_KERNEL("elementwise_add", +PT_REGISTER_KERNEL("add", CPU, ANY, pten::ElementwiseAdd, @@ -140,7 +140,7 @@ PT_REGISTER_KERNEL("elementwise_add", int64_t, complex64, complex128) {} -PT_REGISTER_KERNEL("elementwise_sub", +PT_REGISTER_KERNEL("subtract", CPU, ANY, pten::ElementwiseSub, @@ -150,7 +150,7 @@ PT_REGISTER_KERNEL("elementwise_sub", int64_t, complex64, complex128) {} -PT_REGISTER_KERNEL("elementwise_div", +PT_REGISTER_KERNEL("divide", CPU, ANY, pten::ElementwiseDiv, @@ -160,7 +160,7 @@ PT_REGISTER_KERNEL("elementwise_div", int64_t, complex64, complex128) {} -PT_REGISTER_KERNEL("elementwise_mul", +PT_REGISTER_KERNEL("multiply", CPU, ANY, pten::ElementwiseMul, @@ -172,7 +172,7 @@ PT_REGISTER_KERNEL("elementwise_mul", complex64, complex128) {} -PT_REGISTER_KERNEL("reduce_sum", +PT_REGISTER_KERNEL("sum", CPU, ANY, pten::Sum, diff --git a/paddle/pten/kernels/cuda/creation.cu b/paddle/pten/kernels/cuda/creation.cu index 92d3b73ff1..84d9fa255c 100644 --- a/paddle/pten/kernels/cuda/creation.cu +++ b/paddle/pten/kernels/cuda/creation.cu @@ -64,7 +64,7 @@ void FillConstant(const CUDAContext& dev_ctx, PT_REGISTER_MODULE(CreationCUDA); -PT_REGISTER_KERNEL("fill_any_like", +PT_REGISTER_KERNEL("full_like", CUDA, ANY, pten::FillAnyLike, @@ -75,7 +75,7 @@ PT_REGISTER_KERNEL("fill_any_like", bool, paddle::platform::float16) {} -PT_REGISTER_KERNEL("fill_constant", +PT_REGISTER_KERNEL("full", CUDA, ANY, pten::FillConstant, diff --git a/paddle/pten/kernels/cuda/manipulation.cu b/paddle/pten/kernels/cuda/manipulation.cu index 1a1d5cef30..9c7fded091 100644 --- a/paddle/pten/kernels/cuda/manipulation.cu +++ b/paddle/pten/kernels/cuda/manipulation.cu @@ -135,7 +135,7 @@ PT_REGISTER_MODULE(ManipulationCUDA); using float16 = paddle::platform::float16; // TODO(yuanrisheng): "flatten_contiguous_range" is compatible with old kernel // architecture, kernel_name should be "flatten". -PT_REGISTER_KERNEL("flatten_contiguous_range", +PT_REGISTER_KERNEL("flatten", CUDA, ANY, pten::Flatten, @@ -147,7 +147,7 @@ PT_REGISTER_KERNEL("flatten_contiguous_range", int, int64_t) {} -PT_REGISTER_KERNEL("flatten_contiguous_range.mid", +PT_REGISTER_KERNEL("flatten.mid", CUDA, ANY, pten::FlattenWithXShape, @@ -184,17 +184,17 @@ PTEN_REGISTER_CAST_CUDA_BASE_TYPE(cast, paddle::platform::bfloat16) PTEN_REGISTER_CAST_CUDA_BASE_TYPE(cast) #endif -PT_REGISTER_KERNEL_WITH_NO_TYPE("reshape2", +PT_REGISTER_KERNEL_WITH_NO_TYPE("reshape", CUDA, ANY, pten::ReshapeFromVectorVal) {} -PT_REGISTER_KERNEL_WITH_NO_TYPE("reshape2.mid", +PT_REGISTER_KERNEL_WITH_NO_TYPE("reshape.mid", CUDA, ANY, pten::ReshapeFromVectorValWithXShape) {} -PT_REGISTER_KERNEL_WITH_NO_TYPE("reshape2.host", +PT_REGISTER_KERNEL_WITH_NO_TYPE("reshape.host", CUDA, ANY, pten::ReshapeFromDT) { @@ -202,7 +202,7 @@ PT_REGISTER_KERNEL_WITH_NO_TYPE("reshape2.host", kernel->InputAt(1).SetDataType(paddle::experimental::DataType::INT32); } -PT_REGISTER_KERNEL_WITH_NO_TYPE("reshape2.host.mid", +PT_REGISTER_KERNEL_WITH_NO_TYPE("reshape.host.mid", CUDA, ANY, pten::ReshapeFromDTWithXShape) { @@ -210,7 +210,7 @@ PT_REGISTER_KERNEL_WITH_NO_TYPE("reshape2.host.mid", kernel->InputAt(1).SetDataType(paddle::experimental::DataType::INT32); } -PT_REGISTER_KERNEL_WITH_NO_TYPE("reshape2.mulhost", +PT_REGISTER_KERNEL_WITH_NO_TYPE("reshape.mulhost", CUDA, ANY, pten::ReshapeFromVectorDT) { @@ -218,7 +218,7 @@ PT_REGISTER_KERNEL_WITH_NO_TYPE("reshape2.mulhost", kernel->InputAt(1).SetDataType(paddle::experimental::DataType::INT32); } -PT_REGISTER_KERNEL_WITH_NO_TYPE("reshape2.mulhost.mid", +PT_REGISTER_KERNEL_WITH_NO_TYPE("reshape.mulhost.mid", CUDA, ANY, pten::ReshapeFromVectorDTWithXShape) { diff --git a/paddle/pten/kernels/cuda/math.cu b/paddle/pten/kernels/cuda/math.cu index 8d6abc9285..e7fa599cb6 100644 --- a/paddle/pten/kernels/cuda/math.cu +++ b/paddle/pten/kernels/cuda/math.cu @@ -119,7 +119,7 @@ using complex64 = ::paddle::platform::complex; using complex128 = ::paddle::platform::complex; PT_REGISTER_KERNEL("sign", CUDA, ANY, pten::Sign, float, double, float16) {} -PT_REGISTER_KERNEL("reduce_mean", CUDA, ANY, pten::Mean, float, double, bool) {} +PT_REGISTER_KERNEL("mean", CUDA, ANY, pten::Mean, float, double, bool) {} PT_REGISTER_KERNEL("scale", CUDA, ANY, @@ -132,7 +132,7 @@ PT_REGISTER_KERNEL("scale", int16_t, int, int64_t) {} -PT_REGISTER_KERNEL("elementwise_add", +PT_REGISTER_KERNEL("add", CUDA, ANY, pten::ElementwiseAdd, @@ -143,7 +143,7 @@ PT_REGISTER_KERNEL("elementwise_add", float16, complex64, complex128) {} -PT_REGISTER_KERNEL("elementwise_sub", +PT_REGISTER_KERNEL("subtract", CUDA, ANY, pten::ElementwiseSub, @@ -154,7 +154,7 @@ PT_REGISTER_KERNEL("elementwise_sub", float16, complex64, complex128) {} -PT_REGISTER_KERNEL("elementwise_div", +PT_REGISTER_KERNEL("divide", CUDA, ANY, pten::ElementwiseDiv, @@ -165,7 +165,7 @@ PT_REGISTER_KERNEL("elementwise_div", float16, complex64, complex128) {} -PT_REGISTER_KERNEL("elementwise_mul", +PT_REGISTER_KERNEL("multiply", CUDA, ANY, pten::ElementwiseMul, @@ -177,7 +177,7 @@ PT_REGISTER_KERNEL("elementwise_mul", float16, complex64, complex128) {} -PT_REGISTER_KERNEL("reduce_sum", +PT_REGISTER_KERNEL("sum", CUDA, ANY, pten::Sum, diff --git a/python/paddle/utils/code_gen/api.yaml b/python/paddle/utils/code_gen/api.yaml index 581aaef62a..3d61caae00 100644 --- a/python/paddle/utils/code_gen/api.yaml +++ b/python/paddle/utils/code_gen/api.yaml @@ -5,7 +5,7 @@ func : ElementwiseInferMeta param : [x, y, -1] kernel : - func : elementwise_add + func : add param : [x, y, -1] - api : cast @@ -25,7 +25,7 @@ func : ElementwiseInferMeta param : [x, y, -1] kernel : - func : elementwise_div + func : divide param : [x, y, -1] - api : dot @@ -42,7 +42,7 @@ infer_meta : func : FlattenInferMeta kernel : - func : flatten_contiguous_range + func : flatten - api : full args : (const ScalarArray& shape, const Scalar& value, DataType dtype=DataType::FLOAT32, Backend place=Backend::CPU, DataLayout layout=DataLayout::NCHW) @@ -51,7 +51,7 @@ func : FullInferMeta param : [shape, dtype, layout] kernel : - func : fill_constant + func : full param : [shape, value] data_type : dtype backend : place @@ -64,7 +64,7 @@ func : FullLikeInferMeta param : [x, dtype, layout] kernel : - func : fill_any_like + func : full_like param : [x, value] data_type : dtype > x backend : place > x @@ -84,7 +84,7 @@ infer_meta : func : ReduceInferMeta kernel : - func : reduce_mean + func : mean param : [x, axis, keep_dim, false, x.dtype(), DataType::UNDEFINED] - api : multiply @@ -94,7 +94,7 @@ func : ElementwiseInferMeta param : [x, y, -1] kernel : - func : elementwise_mul + func : multiply param : [x, y, -1] - api : ones_like @@ -108,7 +108,7 @@ infer_meta : func : InferMetaFromVecValue kernel : - func : reshape2 + func : reshape - api : scale args : (const Tensor& x, const Scalar& scale, float bias, bool bias_after_scale) @@ -126,7 +126,7 @@ func : ElementwiseInferMeta param : [x, y, -1] kernel : - func : elementwise_sub + func : subtract param : [x, y, -1] - api : sum @@ -135,7 +135,7 @@ infer_meta : func : ReduceInferMeta kernel : - func : reduce_sum + func : sum param : [x, axis, keep_dim, false, x.dtype(), DataType::UNDEFINED] - api : zeros_like -- GitLab