diff --git a/paddle/fluid/eager/auto_code_generator/final_state_generator/codegen_utils.py b/paddle/fluid/eager/auto_code_generator/final_state_generator/codegen_utils.py index 2330c84ea09a22d1f80104d1fc80280a480f8caf..ab8c28c33e78ccf2dc156b636d6a032d628809ef 100644 --- a/paddle/fluid/eager/auto_code_generator/final_state_generator/codegen_utils.py +++ b/paddle/fluid/eager/auto_code_generator/final_state_generator/codegen_utils.py @@ -235,7 +235,7 @@ def ParseYamlReturns(string): returns = [x.strip() for x in string.strip().split(",")] for i in range(len(returns)): - ret = returns[i] + ret = returns[i].split("{")[0].strip() ret_name = "" if "(" in ret and ")" in ret: diff --git a/paddle/fluid/framework/infershape_utils.cc b/paddle/fluid/framework/infershape_utils.cc index 17acbde2a09e72a7ac9886e994a416bb4279d6bb..bd71ade7e931157ad785cd2afa52654e62d8ec80 100644 --- a/paddle/fluid/framework/infershape_utils.cc +++ b/paddle/fluid/framework/infershape_utils.cc @@ -308,10 +308,100 @@ void CompatMetaTensor::share_meta(const MetaTensor& meta_tensor) { share_lod(meta_tensor); } -phi::InferMetaContext BuildInferMetaContext(InferShapeContext* ctx, - const std::string& op_type) { +void CompatInferMetaContext::EmplaceBackInput(CompatMetaTensor input) { + int index = compat_inputs_.size(); + compat_inputs_.emplace_back(std::move(input)); + input_range_.emplace_back(std::pair(index, index + 1)); +} +void CompatInferMetaContext::EmplaceBackOutput(CompatMetaTensor output) { + int index = compat_outputs_.size(); + compat_outputs_.emplace_back(std::move(output)); + output_range_.emplace_back(std::pair(index, index + 1)); +} + +void CompatInferMetaContext::EmplaceBackInputs( + paddle::SmallVector inputs) { + int index = compat_inputs_.size(); + input_range_.emplace_back(std::pair(index, index + inputs.size())); + compat_inputs_.insert(compat_inputs_.end(), + std::make_move_iterator(inputs.begin()), + std::make_move_iterator(inputs.end())); +} + +void CompatInferMetaContext::EmplaceBackOutputs( + paddle::SmallVector + outputs) { + int index = compat_outputs_.size(); + output_range_.emplace_back( + std::pair(index, index + outputs.size())); + compat_outputs_.insert(compat_outputs_.end(), + std::make_move_iterator(outputs.begin()), + std::make_move_iterator(outputs.end())); +} + +const phi::MetaTensor& CompatInferMetaContext::InputAt(size_t idx) const { + return compat_inputs_.at(idx); +} + +paddle::optional +CompatInferMetaContext::OptionalInputAt(size_t idx) const { + const auto& input = compat_inputs_.at(idx); + return input.initialized() + ? paddle::optional{input} + : paddle::optional{paddle::none}; +} + +std::vector CompatInferMetaContext::InputsBetween( + size_t start, size_t end) const { + std::vector result; + result.reserve(end - start); + + for (size_t i = start; i < end; ++i) { + auto& in = compat_inputs_.at(i); + result.emplace_back(in.initialized() ? &in : nullptr); + } + + return result; +} + +paddle::optional> +CompatInferMetaContext::OptionalInputsBetween(size_t start, size_t end) const { + const auto& first = compat_inputs_.at(start); + + if (first.initialized()) { + std::vector result; + result.reserve(end - start); + + for (size_t i = start; i < end; ++i) { + auto& in = compat_inputs_.at(i); + result.emplace_back(in.initialized() ? &in : nullptr); + } + + return paddle::optional>(result); + } + return paddle::optional>( + paddle::none); +} + +phi::MetaTensor* CompatInferMetaContext::MutableOutputAt(size_t idx) { + auto& out = compat_outputs_.at(idx); + return out.initialized() ? &out : nullptr; +} + +std::vector CompatInferMetaContext::MutableOutputBetween( + size_t start, size_t end) { + std::vector result; + result.reserve(end - start); + for (size_t i = start; i < end; ++i) { + auto& out = compat_outputs_.at(i); + result.emplace_back(out.initialized() ? &out : nullptr); + } + return result; +} + +CompatInferMetaContext BuildInferMetaContext(InferShapeContext* ctx, + const std::string& op_type) { // 1. get kernel args - InitDefaultKernelSignatureMap(); auto arg_map_fn = phi::OpUtilsMap::Instance().GetArgumentMappingFn(op_type); PADDLE_ENFORCE_NOT_NULL( arg_map_fn, platform::errors::NotFound( @@ -321,52 +411,47 @@ phi::InferMetaContext BuildInferMetaContext(InferShapeContext* ctx, VLOG(3) << "BuildInferMetaContext: op kernel signature - " << signature; // 2. build infermeta context - phi::InferMetaContext infer_meta_context( + CompatInferMetaContext infer_meta_context( {ctx->IsRuntime(), ctx->IsRunMKLDNNKernel()}); auto& input_names = std::get<0>(signature.args); auto& attr_names = std::get<1>(signature.args); auto& output_names = std::get<2>(signature.args); - auto kernels_map = - phi::KernelFactory::Instance().SelectKernelMap(signature.name); - if (kernels_map.size() == 0) { - PADDLE_THROW( - platform::errors::Unimplemented("Not find `%s` kernels when construct " - "InferMetaContext.", - signature.name)); - } - auto attr_defs = kernels_map.cbegin()->second.args_def().attribute_defs(); + const auto& args_def = + phi::KernelFactory::Instance().GetFirstKernelArgsDef(signature.name); + const auto& attr_defs = args_def.attribute_defs(); - // TODO(chenweihang): support multiple inputs and outputs later - phi::InferMetaContext infer_mete_context; for (auto& in_name : input_names) { if (ctx->HasInputs(in_name)) { - auto input_var = ctx->GetInputVarPtrs(in_name); + auto input_var = std::move(ctx->GetInputVarPtrs(in_name)); if (input_var.size() == 1) { infer_meta_context.EmplaceBackInput( - std::make_shared(input_var[0], ctx->IsRuntime())); + std::move(CompatMetaTensor(input_var[0], ctx->IsRuntime()))); } else { - paddle::SmallVector> inputs; - inputs.reserve(input_var.size()); + paddle::SmallVector + inputs; for (const auto& in : input_var) { - inputs.push_back( - std::make_shared(in, ctx->IsRuntime())); + inputs.emplace_back( + std::move(CompatMetaTensor(in, ctx->IsRuntime()))); } infer_meta_context.EmplaceBackInputs(std::move(inputs)); } } else { - infer_meta_context.EmplaceBackInput({nullptr}); + infer_meta_context.EmplaceBackInput( + std::move(CompatMetaTensor(ctx->IsRuntime()))); } } + VLOG(6) << "BuildInferMetaContext: Done inputs"; + auto attr_reader = ctx->Attrs(); for (size_t i = 0; i < attr_names.size(); ++i) { - auto attr_name = attr_names[i]; + auto& attr_name = attr_names[i]; if (attr_defs[i].type_index == std::type_index(typeid(phi::IntArray))) { // When attr is a vector_tensor or tensor, transform it to IntArray if (ctx->HasInputs(attr_name) || ctx->HasInput(attr_name)) { - const auto& infershape_inputs = ctx->GetInputVarPtrs(attr_name); + auto infershape_inputs = std::move(ctx->GetInputVarPtrs(attr_name)); if (ctx->IsRuntime()) { // If is in runtime, we will get tensor's value for IntArray // and push it into attrs @@ -456,7 +541,7 @@ phi::InferMetaContext BuildInferMetaContext(InferShapeContext* ctx, attr_name)); } } else if (ctx->HasInput(attr_name)) { - const auto& infershape_input = ctx->GetInputVarPtrs(attr_name); + auto infershape_input = std::move(ctx->GetInputVarPtrs(attr_name)); if (infershape_input.size() == 1) { if (ctx->IsRuntime()) { Variable* var = BOOST_GET_CONST(Variable*, infershape_input[0]); @@ -581,7 +666,7 @@ phi::InferMetaContext BuildInferMetaContext(InferShapeContext* ctx, // convert from data if (attr_defs[i].type_index == std::type_index(typeid(int32_t))) { if (ctx->IsRuntime()) { - const auto& infershape_inputs = ctx->GetInputVarPtrs(attr_name); + auto infershape_inputs = std::move(ctx->GetInputVarPtrs(attr_name)); auto var_temp = BOOST_GET_CONST(Variable*, infershape_inputs[i]); auto val = experimental::MakePhiScalarFromVar(*var_temp); int32_t val_int = val.template to(); @@ -596,36 +681,41 @@ phi::InferMetaContext BuildInferMetaContext(InferShapeContext* ctx, } } + VLOG(6) << "BuildInferMetaContext: Done attrs"; + for (auto& out_name : output_names) { if (ctx->HasOutputs(out_name, true)) { - auto output_var = ctx->GetOutputVarPtrs(out_name); + auto output_var = std::move(ctx->GetOutputVarPtrs(out_name)); if (output_var.size() == 1) { - infer_meta_context.EmplaceBackOutput(std::make_shared( - output_var[0], ctx->IsRuntime())); + infer_meta_context.EmplaceBackOutput( + std::move(CompatMetaTensor(output_var[0], ctx->IsRuntime()))); } else { - paddle::SmallVector> outputs; - outputs.reserve(output_var.size()); + paddle::SmallVector + outputs; for (const auto& out : output_var) { if (ctx->IsRuntime()) { if (BOOST_GET_CONST(Variable*, out)) { outputs.emplace_back( - std::make_shared(out, ctx->IsRuntime())); + std::move(CompatMetaTensor(out, ctx->IsRuntime()))); continue; } } else if (BOOST_GET_CONST(VarDesc*, out)) { outputs.emplace_back( - std::make_shared(out, ctx->IsRuntime())); + std::move(CompatMetaTensor(out, ctx->IsRuntime()))); continue; } - outputs.emplace_back(nullptr); + outputs.emplace_back(std::move(CompatMetaTensor(ctx->IsRuntime()))); } infer_meta_context.EmplaceBackOutputs(std::move(outputs)); } } else { - infer_meta_context.EmplaceBackOutput({nullptr}); + infer_meta_context.EmplaceBackOutput( + std::move(CompatMetaTensor(ctx->IsRuntime()))); } } + VLOG(6) << "BuildInferMetaContext: Done outputs"; + return infer_meta_context; } diff --git a/paddle/fluid/framework/infershape_utils.h b/paddle/fluid/framework/infershape_utils.h index 022f194b667eb59d5d4aeb94e6626f6902ff0345..e54f2e81e7e9f973ef911f81edb8e81d2d40d981 100644 --- a/paddle/fluid/framework/infershape_utils.h +++ b/paddle/fluid/framework/infershape_utils.h @@ -18,38 +18,24 @@ limitations under the License. */ #include "paddle/fluid/framework/op_info.h" #include "paddle/fluid/framework/shape_inference.h" +#include "paddle/phi/core/infermeta_utils.h" #include "paddle/phi/core/meta_tensor.h" -namespace phi { -class InferMetaContext; -} // namespace phi namespace paddle { namespace framework { -phi::InferMetaContext BuildInferMetaContext(InferShapeContext* ctx, - const std::string& op_type); - -#define DECLARE_INFER_SHAPE_FUNCTOR(op_type, functor_name, fn) \ - struct functor_name : public paddle::framework::InferShapeBase { \ - void operator()( \ - paddle::framework::InferShapeContext* ctx) const override { \ - auto infer_meta_context = \ - paddle::framework::BuildInferMetaContext(ctx, #op_type); \ - fn(&infer_meta_context); \ - } \ - } - // TODO(chenweihang): Support TensorArray later class CompatMetaTensor : public phi::MetaTensor { public: + explicit CompatMetaTensor(bool is_runtime) + : is_runtime_(is_runtime), initialized_(false) {} CompatMetaTensor(InferShapeVarPtr var, bool is_runtime) : var_(std::move(var)), is_runtime_(is_runtime) {} - CompatMetaTensor() = default; - CompatMetaTensor(const CompatMetaTensor&) = default; CompatMetaTensor(CompatMetaTensor&&) = default; - CompatMetaTensor& operator=(const CompatMetaTensor&) = delete; - CompatMetaTensor& operator=(CompatMetaTensor&&) = delete; + CompatMetaTensor& operator=(CompatMetaTensor&&) = default; + CompatMetaTensor(const CompatMetaTensor&) = default; + CompatMetaTensor& operator=(const CompatMetaTensor&) = default; int64_t numel() const override; @@ -71,6 +57,8 @@ class CompatMetaTensor : public phi::MetaTensor { void share_meta(const MetaTensor& meta_tensor) override; + bool initialized() const override { return initialized_; }; + private: const LoD& GetRuntimeLoD() const { auto* var = BOOST_GET_CONST(Variable*, var_); @@ -95,7 +83,62 @@ class CompatMetaTensor : public phi::MetaTensor { InferShapeVarPtr var_; bool is_runtime_; + bool initialized_{true}; +}; + +// Note: In order to avoid using shared_ptr to manage MetaTensor in +// InferMetaContext, inherit and implement InferMetaContext separately +// for compatibility with fluid, shared_ptr will cause significant decrease +// in scheduling performance +class CompatInferMetaContext : public phi::InferMetaContext { + public: + CompatInferMetaContext() = default; + explicit CompatInferMetaContext(phi::MetaConfig config) + : phi::InferMetaContext(config) {} + + void EmplaceBackInput(CompatMetaTensor input); + void EmplaceBackOutput(CompatMetaTensor output); + + void EmplaceBackInputs( + paddle::SmallVector inputs); + void EmplaceBackOutputs( + paddle::SmallVector + outputs); + + const phi::MetaTensor& InputAt(size_t idx) const override; + paddle::optional OptionalInputAt( + size_t idx) const override; + + std::vector InputsBetween(size_t start, + size_t end) const override; + paddle::optional> + OptionalInputsBetween(size_t start, size_t end) const override; + + phi::MetaTensor* MutableOutputAt(size_t idx) override; + std::vector MutableOutputBetween(size_t start, + size_t end) override; + + virtual ~CompatInferMetaContext() = default; + + private: + paddle::SmallVector + compat_inputs_; + paddle::SmallVector + compat_outputs_; }; +CompatInferMetaContext BuildInferMetaContext(InferShapeContext* ctx, + const std::string& op_type); + +#define DECLARE_INFER_SHAPE_FUNCTOR(op_type, functor_name, fn) \ + struct functor_name : public paddle::framework::InferShapeBase { \ + void operator()( \ + paddle::framework::InferShapeContext* ctx) const override { \ + auto infer_meta_context = \ + paddle::framework::BuildInferMetaContext(ctx, #op_type); \ + fn(&infer_meta_context); \ + } \ + } + } // namespace framework } // namespace paddle diff --git a/paddle/fluid/framework/new_executor/new_executor_defs.cc b/paddle/fluid/framework/new_executor/new_executor_defs.cc index ac1a654df47b6222f05b788ba585f641a013ce35..86d534b0b4edd641675ad6e125133d404f05528e 100644 --- a/paddle/fluid/framework/new_executor/new_executor_defs.cc +++ b/paddle/fluid/framework/new_executor/new_executor_defs.cc @@ -328,20 +328,21 @@ bool InterpretercoreInferShapeContext::IsRunMKLDNNKernel() const { } // TODO(paddle-dev): Can this be template? -std::vector InterpretercoreInferShapeContext::GetInputVarPtrs( +paddle::SmallVector +InterpretercoreInferShapeContext::GetInputVarPtrs( const std::string& name) const { const std::vector& vars = InputVars(name); - std::vector res; + paddle::SmallVector res; res.reserve(vars.size()); res.insert(res.begin(), vars.begin(), vars.end()); return res; } -std::vector +paddle::SmallVector InterpretercoreInferShapeContext::GetOutputVarPtrs( const std::string& name) const { const std::vector& vars = OutputVars(name); - std::vector res; + paddle::SmallVector res; res.reserve(vars.size()); res.insert(res.begin(), vars.begin(), vars.end()); return res; diff --git a/paddle/fluid/framework/new_executor/new_executor_defs.h b/paddle/fluid/framework/new_executor/new_executor_defs.h index b223a2ad769054055e88eb1b7f4b575938b94a54..6a1e46e3592421e35d1b8b5b04f6a09916e03e6a 100644 --- a/paddle/fluid/framework/new_executor/new_executor_defs.h +++ b/paddle/fluid/framework/new_executor/new_executor_defs.h @@ -90,11 +90,11 @@ class InterpretercoreInferShapeContext : public InferShapeContext { bool IsRunMKLDNNKernel() const override; // TODO(paddle-dev): Can this be template? - std::vector GetInputVarPtrs( - const std::string& name) const override; + paddle::SmallVector + GetInputVarPtrs(const std::string& name) const override; - std::vector GetOutputVarPtrs( - const std::string& name) const override; + paddle::SmallVector + GetOutputVarPtrs(const std::string& name) const override; DDim GetInputDim(const std::string& name) const override; diff --git a/paddle/fluid/framework/op_desc.cc b/paddle/fluid/framework/op_desc.cc index 15b979086d1eb8ead1e38d1be681d258cb1f8182..d27bf0e150f9785916265556b59d285999344a81 100644 --- a/paddle/fluid/framework/op_desc.cc +++ b/paddle/fluid/framework/op_desc.cc @@ -202,10 +202,10 @@ class CompileTimeInferShapeContext : public InferShapeContext { } } - std::vector GetInputVarPtrs( - const std::string &name) const override { + paddle::SmallVector + GetInputVarPtrs(const std::string &name) const override { const std::vector arg_names = Inputs(name); - std::vector res; + paddle::SmallVector res; res.reserve(arg_names.size()); std::transform(arg_names.begin(), arg_names.end(), std::back_inserter(res), [this](const std::string &name) { @@ -214,10 +214,10 @@ class CompileTimeInferShapeContext : public InferShapeContext { return res; } - std::vector GetOutputVarPtrs( - const std::string &name) const override { + paddle::SmallVector + GetOutputVarPtrs(const std::string &name) const override { const std::vector arg_names = Outputs(name); - std::vector res; + paddle::SmallVector res; res.reserve(arg_names.size()); std::transform(arg_names.begin(), arg_names.end(), std::back_inserter(res), [this](const std::string &name) { diff --git a/paddle/fluid/framework/operator.cc b/paddle/fluid/framework/operator.cc index e3bc22ae88ba30db373b180454886771741b9633..0291309aa0ddd6a911087a781bb6d969d19a2d34 100644 --- a/paddle/fluid/framework/operator.cc +++ b/paddle/fluid/framework/operator.cc @@ -947,19 +947,19 @@ class RuntimeInferShapeContext : public InferShapeContext { } // TODO(paddle-dev): Can this be template? - std::vector GetInputVarPtrs( - const std::string& name) const override { + paddle::SmallVector + GetInputVarPtrs(const std::string& name) const override { const std::vector& vars = InputVars(name); - std::vector res; + paddle::SmallVector res; res.reserve(vars.size()); res.insert(res.begin(), vars.begin(), vars.end()); return res; } - std::vector GetOutputVarPtrs( - const std::string& name) const override { + paddle::SmallVector + GetOutputVarPtrs(const std::string& name) const override { const std::vector& vars = OutputVars(name); - std::vector res; + paddle::SmallVector res; res.reserve(vars.size()); res.insert(res.begin(), vars.begin(), vars.end()); return res; @@ -1326,8 +1326,8 @@ void OperatorWithKernel::RunImpl(const Scope& scope, << ", using_kernel_key:" << *kernel_type_.get(); auto try_pt_kernel_key = TransOpKernelTypeToPhiKernelKey(*kernel_type_.get()); - if (!phi::KernelFactory::Instance().IsSelectKernelValid( - pt_kernel_name, try_pt_kernel_key)) { + if (!phi::KernelFactory::Instance().HasKernel(pt_kernel_name, + try_pt_kernel_key)) { kernel_type_->library_type_ = expected_kernel_key_library_type; VLOG(3) << "modify XPU KP kernel in static graph: " << type_ << " is failed " << *kernel_type_.get(); @@ -2115,10 +2115,12 @@ OpKernelType OperatorWithKernel::GetKernelTypeForVar( KernelSignature OperatorWithKernel::GetExpectedPhiKernelArgs( const ExecutionContext& ctx) const { - InitDefaultKernelSignatureMap(); ExecutionArgumentMappingContext arg_mapping_ctx(ctx); - return phi::OpUtilsMap::Instance().GetArgumentMappingFn(Type())( - arg_mapping_ctx); + if (arg_map_fn_ == nullptr) { + arg_map_fn_.reset(new phi::ArgumentMappingFn( + phi::OpUtilsMap::Instance().GetArgumentMappingFn(Type()))); + } + return (*arg_map_fn_)(arg_mapping_ctx); } Scope* OperatorWithKernel::PreparePhiData( diff --git a/paddle/fluid/framework/operator.h b/paddle/fluid/framework/operator.h index f7fc83f1d6d30fec14df7c66505d3cb71c7f15a4..f0887eb919c309b22e74eaa151401b14c02edfaf 100644 --- a/paddle/fluid/framework/operator.h +++ b/paddle/fluid/framework/operator.h @@ -701,6 +701,7 @@ class OperatorWithKernel : public OperatorBase { mutable bool run_kp_kernel = false; mutable std::unique_ptr pt_kernel_signature_; mutable std::unique_ptr pt_kernel_; + mutable std::unique_ptr arg_map_fn_; }; extern bool OpSupportGPU(const std::string& op_type); diff --git a/paddle/fluid/framework/phi_utils.cc b/paddle/fluid/framework/phi_utils.cc index 8e6f082da10267268855c5fa467f10e49a6692de..75bab0594758b3013eca0dee82201b5615e3e183 100644 --- a/paddle/fluid/framework/phi_utils.cc +++ b/paddle/fluid/framework/phi_utils.cc @@ -25,6 +25,7 @@ limitations under the License. */ #include "paddle/phi/core/compat/convert_utils.h" #include "paddle/phi/core/compat/op_utils.h" #include "paddle/phi/core/kernel_factory.h" +#include "paddle/phi/core/type_defs.h" namespace paddle { namespace framework { @@ -40,9 +41,9 @@ class KernelArgsNameMakerByOpProto : public KernelArgsNameMaker { ~KernelArgsNameMakerByOpProto() {} - const paddle::SmallVector& GetInputArgsNames() override; - const paddle::SmallVector& GetOutputArgsNames() override; - const paddle::SmallVector& GetAttrsArgsNames() override; + const paddle::SmallVector& GetInputArgsNames() override; + const paddle::SmallVector& GetOutputArgsNames() override; + const paddle::SmallVector& GetAttrsArgsNames() override; KernelSignature GetKernelSignature(); @@ -52,9 +53,9 @@ class KernelArgsNameMakerByOpProto : public KernelArgsNameMaker { private: const framework::proto::OpProto* op_proto_; - paddle::SmallVector input_names_; - paddle::SmallVector output_names_; - paddle::SmallVector attr_names_; + paddle::SmallVector input_names_; + paddle::SmallVector output_names_; + paddle::SmallVector attr_names_; }; OpKernelType TransPhiKernelKeyToOpKernelType(const phi::KernelKey& kernel_key) { @@ -102,7 +103,7 @@ phi::KernelKey FallBackToCpu(const OpKernelType& expected_kernel_key, if (platform::is_xpu_place(expected_kernel_key.place_) || paddle::platform::is_in_xpu_black_list(op.Type())) { VLOG(3) << "phi missing XPU kernel: " << op.Type() - << ", phipected_kernel_key:" << expected_kernel_key + << ", expected_kernel_key:" << expected_kernel_key << ", fallbacking to CPU one!"; return phi::KernelKey(phi::Backend::CPU, kernel_key.layout(), kernel_key.dtype()); @@ -111,7 +112,7 @@ phi::KernelKey FallBackToCpu(const OpKernelType& expected_kernel_key, #ifdef PADDLE_WITH_ASCEND_CL if (platform::is_npu_place(expected_kernel_key.place_)) { VLOG(3) << "phi missing NPU kernel: " << op.Type() - << ", phipected_kernel_key:" << expected_kernel_key + << ", expected_kernel_key:" << expected_kernel_key << ", fallbacking to CPU one!"; return phi::KernelKey(phi::Backend::CPU, kernel_key.layout(), kernel_key.dtype()); @@ -120,7 +121,7 @@ phi::KernelKey FallBackToCpu(const OpKernelType& expected_kernel_key, #ifdef PADDLE_WITH_MLU if (platform::is_mlu_place(expected_kernel_key.place_)) { VLOG(3) << "phi missing MLU kernel: " << op.Type() - << ", phipected_kernel_key:" << expected_kernel_key + << ", expected_kernel_key:" << expected_kernel_key << ", fallbacking to CPU one!"; return phi::KernelKey(phi::Backend::CPU, kernel_key.layout(), kernel_key.dtype()); @@ -129,7 +130,7 @@ phi::KernelKey FallBackToCpu(const OpKernelType& expected_kernel_key, #ifdef PADDLE_WITH_IPU if (platform::is_ipu_place(expected_kernel_key.place_)) { VLOG(3) << "phi missing IPU kernel: " << op.Type() - << ", phipected_kernel_key:" << expected_kernel_key + << ", expected_kernel_key:" << expected_kernel_key << ", fallbacking to CPU one!"; return phi::KernelKey(phi::Backend::CPU, kernel_key.layout(), kernel_key.dtype()); @@ -139,7 +140,7 @@ phi::KernelKey FallBackToCpu(const OpKernelType& expected_kernel_key, if (platform::is_custom_place(expected_kernel_key.place_)) { VLOG(3) << "phi missing " << expected_kernel_key.place_.GetDeviceType() << " kernel: " << op.Type() - << ", phipected_kernel_key:" << expected_kernel_key + << ", expected_kernel_key:" << expected_kernel_key << ", fallbacking to CPU one!"; return phi::KernelKey(phi::Backend::CPU, kernel_key.layout(), kernel_key.dtype()); @@ -148,45 +149,52 @@ phi::KernelKey FallBackToCpu(const OpKernelType& expected_kernel_key, return phi::KernelKey(); } -const paddle::SmallVector& +const paddle::SmallVector& KernelArgsNameMakerByOpProto::GetInputArgsNames() { for (int i = 0; i < op_proto_->inputs_size(); ++i) { auto& in = op_proto_->inputs()[i]; auto& in_name = in.name(); if ((in.has_extra() && in.extra()) || (in.has_quant() && in.quant())) { - VLOG(6) << "Parse PhiKernel input: skip extra & quant input - " - << in_name; continue; } // If contains dispensable input, we should override the // OpArgumentMapping method self in phi/ops/compat dir if (in.has_dispensable() && in.dispensable()) { - VLOG(6) << "Parse PhiKernel input: skip dispensable input - " << in_name; continue; } - VLOG(6) << "Parse PhiKernel input: " << in_name; - input_names_.emplace_back(in_name); + input_names_.emplace_back(in_name.c_str()); + } + if (VLOG_IS_ON(10)) { + std::ostringstream sout; + sout << "PhiKernel inputs: "; + std::copy(input_names_.begin(), input_names_.end(), + std::ostream_iterator(sout, ", ")); + VLOG(10) << sout.str(); } return input_names_; } -const paddle::SmallVector& +const paddle::SmallVector& KernelArgsNameMakerByOpProto::GetOutputArgsNames() { for (int i = 0; i < op_proto_->outputs_size(); ++i) { auto& out = op_proto_->outputs()[i]; auto& out_name = out.name(); if ((out.has_extra() && out.extra()) || (out.has_quant() && out.quant())) { - VLOG(6) << "Parse PhiKernel output: skip extra & quant output - " - << out_name; continue; } - VLOG(6) << "Parse PhiKernel output: " << out_name; - output_names_.emplace_back(out_name); + output_names_.emplace_back(out_name.c_str()); + } + if (VLOG_IS_ON(10)) { + std::ostringstream sout; + sout << "PhiKernel outputs: "; + std::copy(output_names_.begin(), output_names_.end(), + std::ostream_iterator(sout, ", ")); + VLOG(10) << sout.str(); } return output_names_; } -const paddle::SmallVector& +const paddle::SmallVector& KernelArgsNameMakerByOpProto::GetAttrsArgsNames() { for (int i = 0; i < op_proto_->attrs_size(); ++i) { auto& attr = op_proto_->attrs()[i]; @@ -195,25 +203,26 @@ KernelArgsNameMakerByOpProto::GetAttrsArgsNames() { attr_name == "op_role" || attr_name == "op_role_var" || attr_name == "op_namescope" || attr_name == "op_callstack" || attr_name == "op_device") { - VLOG(6) << "Parse PhiKernel attribute: skip needless attr - " - << attr_name; continue; } if ((attr.has_extra() && attr.extra()) || (attr.has_quant() && attr.quant())) { - VLOG(6) << "Parse PhiKernel attribute: skip extra & quant attr - " - << attr_name; continue; } - VLOG(6) << "Parse PhiKernel attribute: " << attr_name; - attr_names_.emplace_back(attr_name); + attr_names_.emplace_back(attr_name.c_str()); + } + if (VLOG_IS_ON(10)) { + std::ostringstream sout; + sout << "PhiKernel attributes: "; + std::copy(attr_names_.begin(), attr_names_.end(), + std::ostream_iterator(sout, ", ")); + VLOG(10) << sout.str(); } - return attr_names_; } KernelSignature KernelArgsNameMakerByOpProto::GetKernelSignature() { - return KernelSignature(phi::TransToPhiKernelName(op_proto_->type()), + return KernelSignature(phi::TransToPhiKernelName(op_proto_->type()).c_str(), GetInputArgsNames(), GetAttrsArgsNames(), GetOutputArgsNames()); } @@ -228,7 +237,7 @@ void InitDefaultKernelSignatureMap() { if (phi::KernelFactory::Instance().HasCompatiblePhiKernel(op_type) && op_proto) { paddle::framework::KernelArgsNameMakerByOpProto maker(op_proto); - VLOG(10) << "Register kernel signature for " << op_type; + VLOG(10) << "Register `" << op_type << "` kernel signature:"; phi::DefaultKernelSignatureMap::Instance().Insert( op_type, std::move(maker.GetKernelSignature())); } diff --git a/paddle/fluid/framework/phi_utils.h b/paddle/fluid/framework/phi_utils.h index a17578816921b2337a76d1a0a69a6c8adbc51c4d..392a3f9b06b3c11232b5804acd3acefb6a06c59b 100644 --- a/paddle/fluid/framework/phi_utils.h +++ b/paddle/fluid/framework/phi_utils.h @@ -55,9 +55,9 @@ phi::KernelKey FallBackToCpu(const OpKernelType& expected_kernel_key, class KernelArgsNameMaker { public: virtual ~KernelArgsNameMaker() {} - virtual const paddle::SmallVector& GetInputArgsNames() = 0; - virtual const paddle::SmallVector& GetOutputArgsNames() = 0; - virtual const paddle::SmallVector& GetAttrsArgsNames() = 0; + virtual const paddle::SmallVector& GetInputArgsNames() = 0; + virtual const paddle::SmallVector& GetOutputArgsNames() = 0; + virtual const paddle::SmallVector& GetAttrsArgsNames() = 0; }; void InitDefaultKernelSignatureMap(); diff --git a/paddle/fluid/framework/shape_inference.h b/paddle/fluid/framework/shape_inference.h index 6ba60590cf8f370b3e983ea9c925ec8387eb2fae..bf9731bafce6405421602967317260b247e0698d 100644 --- a/paddle/fluid/framework/shape_inference.h +++ b/paddle/fluid/framework/shape_inference.h @@ -21,6 +21,8 @@ limitations under the License. */ #include "paddle/fluid/framework/var_desc.h" #include "paddle/fluid/framework/variable.h" #include "paddle/phi/core/ddim.h" +#include "paddle/phi/core/type_defs.h" +#include "paddle/utils/small_vector.h" namespace paddle { namespace framework { @@ -106,10 +108,10 @@ class InferShapeContext { virtual bool IsRunMKLDNNKernel() const = 0; - virtual std::vector GetInputVarPtrs( - const std::string &name) const = 0; - virtual std::vector GetOutputVarPtrs( - const std::string &name) const = 0; + virtual paddle::SmallVector + GetInputVarPtrs(const std::string &name) const = 0; + virtual paddle::SmallVector + GetOutputVarPtrs(const std::string &name) const = 0; protected: virtual std::vector GetRepeatedDims(const std::string &name) const = 0; diff --git a/paddle/fluid/imperative/infer_shape_context.h b/paddle/fluid/imperative/infer_shape_context.h index 1e5b112ece21f606b995d922a30b520096d0907b..5b63334c9ea99d0fc6f52339e8fdfcf8c789ee79 100644 --- a/paddle/fluid/imperative/infer_shape_context.h +++ b/paddle/fluid/imperative/infer_shape_context.h @@ -235,9 +235,10 @@ class DygraphInferShapeContext : public framework::InferShapeContext { (op_kernel_type_->data_layout_ == framework::DataLayout::kMKLDNN)); } - std::vector GetInputVarPtrs( - const std::string& name) const override { - std::vector res; + paddle::SmallVector + GetInputVarPtrs(const std::string& name) const override { + paddle::SmallVector + res; auto it = var_map_in_->find(name); PADDLE_ENFORCE_NE( it, var_map_in_->end(), @@ -248,9 +249,11 @@ class DygraphInferShapeContext : public framework::InferShapeContext { return res; } - std::vector GetOutputVarPtrs( - const std::string& name) const override { - std::vector res; + paddle::SmallVector + GetOutputVarPtrs(const std::string& name) const override { + paddle::SmallVector + res; auto it = var_map_out_->find(name); PADDLE_ENFORCE_NE( it, var_map_out_->end(), diff --git a/paddle/fluid/imperative/prepared_operator.cc b/paddle/fluid/imperative/prepared_operator.cc index 0ad5e808b1d1aadb898fcb121cefe4ede3756bab..cef7417ea41951c02e5ec1e4db18fd8f2eafc67f 100644 --- a/paddle/fluid/imperative/prepared_operator.cc +++ b/paddle/fluid/imperative/prepared_operator.cc @@ -36,6 +36,8 @@ DECLARE_bool(run_kp_kernel); namespace paddle { namespace imperative { +static const phi::Kernel empty_kernel; + const std::shared_ptr& GetVariableWrapper( const std::shared_ptr& var) { return var->SharedVar(); @@ -108,12 +110,13 @@ PreparedOp::PreparedOp(const framework::OperatorBase& op, ctx_(ctx), kernel_type_(kernel_type), func_(func), - dev_ctx_(dev_ctx) {} + dev_ctx_(dev_ctx), + pt_kernel_(empty_kernel) {} PreparedOp::PreparedOp(const framework::OperatorBase& op, const framework::RuntimeContext& ctx, const framework::OpKernelType& kernel_type, - const framework::KernelSignature& kernel_signature, + framework::KernelSignature&& kernel_signature, const phi::Kernel& pt_kernel, platform::DeviceContext* dev_ctx) : op_(op), @@ -122,7 +125,7 @@ PreparedOp::PreparedOp(const framework::OperatorBase& op, func_(nullptr), dev_ctx_(dev_ctx), run_phi_kernel_(true), - pt_kernel_signature_(kernel_signature), + pt_kernel_signature_(std::move(kernel_signature)), pt_kernel_(pt_kernel) {} template @@ -170,7 +173,8 @@ PreparedOp PrepareImpl(const NameVarMap& ins, #endif if (phi::KernelFactory::Instance().HasCompatiblePhiKernel(op.Type())) { - pt_kernel_signature = op.GetExpectedPhiKernelArgs(dygraph_exe_ctx); + pt_kernel_signature = + std::move(op.GetExpectedPhiKernelArgs(dygraph_exe_ctx)); VLOG(6) << pt_kernel_signature; pt_kernel_name = pt_kernel_signature.name; @@ -200,8 +204,8 @@ PreparedOp PrepareImpl(const NameVarMap& ins, << ", using_kernel_key:" << expected_kernel_key; phi::KernelKey try_pt_kernel_key = TransOpKernelTypeToPhiKernelKey(expected_kernel_key); - if (!phi::KernelFactory::Instance().IsSelectKernelValid( - pt_kernel_name, try_pt_kernel_key)) { + if (!phi::KernelFactory::Instance().HasKernel(pt_kernel_name, + try_pt_kernel_key)) { expected_kernel_key.library_type_ = expected_kernel_key_library_type; VLOG(3) << "modify XPU KP kernel: " << op.Type() << " is failed " << expected_kernel_key; @@ -211,8 +215,8 @@ PreparedOp PrepareImpl(const NameVarMap& ins, #endif pt_kernel_key = TransOpKernelTypeToPhiKernelKey(expected_kernel_key); - auto pt_kernel = phi::KernelFactory::Instance().SelectKernel(pt_kernel_name, - pt_kernel_key); + auto& pt_kernel = phi::KernelFactory::Instance().SelectKernel( + pt_kernel_name, pt_kernel_key); if (pt_kernel.IsValid() #if defined(PADDLE_WITH_XPU) && !defined(PADDLE_WITH_XPU_KP) @@ -227,9 +231,8 @@ PreparedOp PrepareImpl(const NameVarMap& ins, dev_ctx = pool.Get(expected_kernel_key.place_); } - // TODO(chenweihang): using CPUKernel when miss device kernel case - return PreparedOp(op, ctx, expected_kernel_key, pt_kernel_signature, - pt_kernel, dev_ctx); + return PreparedOp(op, ctx, expected_kernel_key, + std::move(pt_kernel_signature), pt_kernel, dev_ctx); } else { VLOG(6) << "Dynamic mode ChoosePhiKernel - kernel `" << pt_kernel_name << "` not found."; @@ -270,15 +273,16 @@ PreparedOp PrepareImpl(const NameVarMap& ins, if (phi::KernelFactory::Instance().HasCompatiblePhiKernel(op.Type())) { auto pt_cpu_kernel_key = FallBackToCpu(expected_kernel_key, pt_kernel_key, op); - auto pt_cpu_kernel = phi::KernelFactory::Instance().SelectKernel( + auto& pt_cpu_kernel = phi::KernelFactory::Instance().SelectKernel( pt_kernel_name, pt_cpu_kernel_key); if (pt_cpu_kernel.IsValid()) { VLOG(6) << "Dynamic mode PrepareImpl - kernel name: " << pt_kernel_name << " | kernel key: " << pt_cpu_kernel_key << " | kernel: " << pt_cpu_kernel; auto* cpu_ctx = pool.Get(paddle::platform::CPUPlace()); - return PreparedOp(op, ctx, expected_kernel_key, pt_kernel_signature, - pt_cpu_kernel, cpu_ctx); + return PreparedOp(op, ctx, expected_kernel_key, + std::move(pt_kernel_signature), pt_cpu_kernel, + cpu_ctx); } } } @@ -505,7 +509,6 @@ static void PreparedOpRunPtImpl( #endif } - // TODO(chenweihang): add debug flags later if (framework::IsComplexType(kernel_type.data_type_)) { HandleComplexGradToRealGrad(outs); } diff --git a/paddle/fluid/imperative/prepared_operator.h b/paddle/fluid/imperative/prepared_operator.h index 04d0b4ca7a5db4a10dae8ee0c751ee7a70852a4b..b3c5a6b5fa22012a24595d5a1cc3d4872b48496e 100644 --- a/paddle/fluid/imperative/prepared_operator.h +++ b/paddle/fluid/imperative/prepared_operator.h @@ -154,7 +154,7 @@ class PreparedOp { PreparedOp(const framework::OperatorBase& op, const framework::RuntimeContext& ctx, const framework::OpKernelType& kernel_type, - const framework::KernelSignature& kernel_signature, + framework::KernelSignature&& kernel_signature, const phi::Kernel& pt_kernel, platform::DeviceContext* dev_ctx); static PreparedOp Prepare(const NameVarMap& ins, @@ -206,7 +206,7 @@ class PreparedOp { bool run_phi_kernel_{false}; bool run_kp_kernel_{false}; framework::KernelSignature pt_kernel_signature_; - phi::Kernel pt_kernel_; + const phi::Kernel& pt_kernel_; }; const inline framework::Attribute& GetAttr( @@ -289,7 +289,7 @@ void BuildDygraphPhiKernelContext( } } - auto ins_vector = it->second; + auto& ins_vector = it->second; size_t end_idx = start_idx + ins_vector.size(); for (size_t offset = 0; offset < ins_vector.size(); ++offset) { @@ -587,7 +587,7 @@ void PreparePhiData(const phi::Kernel& pt_kernel, auto& ins_vector = ins.at(input_names[i]); for (size_t offset = 0; offset < ins_vector.size(); ++offset) { - auto var = ins_vector[offset]; + auto& var = ins_vector[offset]; const auto* tensor_in = GetTensorFromVar(var->Var()); if (tensor_in && tensor_in->IsInitialized()) { if (in_def.backend == phi::Backend::ALL_BACKEND) { diff --git a/paddle/fluid/inference/api/analysis_predictor.cc b/paddle/fluid/inference/api/analysis_predictor.cc index 0093decea5a156dc8eae8ab00c4a6360adeed129..14e4c3da624970d5db023229b163d52e9ae8a29c 100644 --- a/paddle/fluid/inference/api/analysis_predictor.cc +++ b/paddle/fluid/inference/api/analysis_predictor.cc @@ -226,6 +226,7 @@ bool AnalysisPredictor::PrepareScope( status_is_cloned_ = true; } else { paddle::framework::InitDevices(); + paddle::framework::InitDefaultKernelSignatureMap(); // TODO(wilber): we need to release memory occupied by weights. scope_.reset(new paddle::framework::Scope()); status_is_cloned_ = false; diff --git a/paddle/fluid/inference/api/api_impl.cc b/paddle/fluid/inference/api/api_impl.cc index 73d14f215e2abb05569f771d1a878d76089f2c47..1c4369af646afa84f78bd34b3943406fb5f9c49e 100644 --- a/paddle/fluid/inference/api/api_impl.cc +++ b/paddle/fluid/inference/api/api_impl.cc @@ -92,6 +92,7 @@ bool NativePaddlePredictor::Init( "The sub_scope should not be nullptr.")); } else { paddle::framework::InitDevices(); + paddle::framework::InitDefaultKernelSignatureMap(); scope_.reset(new paddle::framework::Scope()); } diff --git a/paddle/fluid/operators/controlflow/while_op.cc b/paddle/fluid/operators/controlflow/while_op.cc index 03a244a457cd0f03981852fd05014a0820a8ae45..eb44655c88f1848e26219fd87e03e9a4e8e587ae 100644 --- a/paddle/fluid/operators/controlflow/while_op.cc +++ b/paddle/fluid/operators/controlflow/while_op.cc @@ -517,10 +517,8 @@ class WhileGradOpShapeInference : public framework::InferShapeBase { ctx->HasInputs(kOutputs); ctx->HasInputs(framework::GradVarName(kOutputs)); auto pg_ig_names = ctx->Outputs(kXGRAD); - std::vector in_var_ptrs = - ctx->GetInputVarPtrs(kX); - std::vector out_var_ptrs = - ctx->GetOutputVarPtrs(kXGRAD); + auto in_var_ptrs = ctx->GetInputVarPtrs(kX); + auto out_var_ptrs = ctx->GetOutputVarPtrs(kXGRAD); PADDLE_ENFORCE_EQ(in_var_ptrs.size(), out_var_ptrs.size(), platform::errors::InvalidArgument( "The size of Inputs(X) must be the same as " diff --git a/paddle/fluid/operators/detection/collect_fpn_proposals_op.cc b/paddle/fluid/operators/detection/collect_fpn_proposals_op.cc index 44f602237da2e2c8fa26e39326f977d10235155d..92c9ab34aa454fc95602fe8c35d8430ddc21f9c8 100644 --- a/paddle/fluid/operators/detection/collect_fpn_proposals_op.cc +++ b/paddle/fluid/operators/detection/collect_fpn_proposals_op.cc @@ -63,10 +63,8 @@ class CollectFpnProposalsOp : public framework::OperatorWithKernel { context->ShareLoD("MultiLevelRois", "FpnRois"); } if (context->IsRuntime() && !context->HasInputs("MultiLevelRoIsNum")) { - std::vector roi_inputs = - context->GetInputVarPtrs("MultiLevelRois"); - std::vector score_inputs = - context->GetInputVarPtrs("MultiLevelScores"); + auto roi_inputs = context->GetInputVarPtrs("MultiLevelRois"); + auto score_inputs = context->GetInputVarPtrs("MultiLevelScores"); for (size_t i = 0; i < roi_inputs.size(); ++i) { framework::Variable *roi_var = BOOST_GET(framework::Variable *, roi_inputs[i]); diff --git a/paddle/fluid/pybind/imperative.cc b/paddle/fluid/pybind/imperative.cc index 7df6d8f7f791c83a23a956169db12b18cb2864e8..93f10b34b6ca256f40bab842ce6fa907de83e0b3 100644 --- a/paddle/fluid/pybind/imperative.cc +++ b/paddle/fluid/pybind/imperative.cc @@ -60,6 +60,7 @@ limitations under the License. */ #include "paddle/fluid/pybind/uva_utils.h" #include "paddle/phi/core/compat/arg_map_context.h" #include "paddle/phi/core/compat/type_defs.h" +#include "paddle/phi/core/type_defs.h" namespace paddle { namespace pybind { @@ -2027,26 +2028,35 @@ void BindImperative(py::module *m_ptr) { *(imperative::AmpOperators::Instance().GetMutableAllowOps()), *(imperative::AmpOperators::Instance().GetMutableBlockOps())); }) - .def("_get_kernel_signature", - [](imperative::Tracer &self, const std::string &type, - const PyNameVarBaseMap &ins, const PyNameVarBaseMap &outs, - framework::AttributeMap attrs) { - // TODO(xiongkun): move this function outside of tracer. - auto ins_map = ConvertToNameTensorMap(ins); - auto outs_map = ConvertToNameTensorMap(outs); - { - auto to_vector = [](paddle::SmallVector &vec) { - return std::vector(vec.begin(), vec.end()); - }; - auto ret = self.GetExpectedKernelSignature(type, ins_map, - outs_map, attrs); - auto kernelsig_ins = to_vector(std::get<0>(ret.args)); - auto kernelsig_attrs = to_vector(std::get<1>(ret.args)); - auto kernelsig_outs = to_vector(std::get<2>(ret.args)); - return std::make_tuple(kernelsig_ins, kernelsig_attrs, - kernelsig_outs); - } - }) + .def( + "_get_kernel_signature", + [](imperative::Tracer &self, const std::string &type, + const PyNameVarBaseMap &ins, const PyNameVarBaseMap &outs, + framework::AttributeMap attrs) { + // TODO(xiongkun): move this function outside of tracer. + auto ins_map = ConvertToNameTensorMap(ins); + auto outs_map = ConvertToNameTensorMap(outs); + { + auto input_to_vector = + [](paddle::SmallVector &vec) { + return std::vector(vec.begin(), vec.end()); + }; + auto output_to_vector = + [](paddle::SmallVector &vec) { + return std::vector(vec.begin(), vec.end()); + }; + auto attr_to_vector = [](paddle::SmallVector &vec) { + return std::vector(vec.begin(), vec.end()); + }; + auto ret = self.GetExpectedKernelSignature(type, ins_map, + outs_map, attrs); + auto kernelsig_ins = input_to_vector(std::get<0>(ret.args)); + auto kernelsig_attrs = attr_to_vector(std::get<1>(ret.args)); + auto kernelsig_outs = output_to_vector(std::get<2>(ret.args)); + return std::make_tuple(kernelsig_ins, kernelsig_attrs, + kernelsig_outs); + } + }) .def("trace", [](imperative::Tracer &self, const std::string &type, const PyNameVarBaseMap &ins, const PyNameVarBaseMap &outs, diff --git a/paddle/fluid/pybind/pybind.cc b/paddle/fluid/pybind/pybind.cc index 7cc9d2220af23763a8744d1b5ae8fad1f4c30207..5f9db51ee74d38ee0ac37ee31f5d341f735726b2 100644 --- a/paddle/fluid/pybind/pybind.cc +++ b/paddle/fluid/pybind/pybind.cc @@ -2907,6 +2907,8 @@ All parameter, weight, gradient are variables in Paddle. framework::LoadOpMetaInfoAndRegisterOp(dso_name)); }); m.def("init_devices", []() { framework::InitDevices(); }); + m.def("init_default_kernel_signatures", + []() { framework::InitDefaultKernelSignatureMap(); }); m.def("is_compiled_with_cuda", IsCompiledWithCUDA); m.def("is_compiled_with_ascend", IsCompiledWithAscend); m.def("is_compiled_with_rocm", IsCompiledWithROCM); diff --git a/paddle/infrt/dialect/phi/pass/kernel_op_desc.cc b/paddle/infrt/dialect/phi/pass/kernel_op_desc.cc index a26e8e2dca57081d9935883bbe0f01188abf1f1b..b1aa81260968fa45725180968b23a3ff5a95dc3b 100644 --- a/paddle/infrt/dialect/phi/pass/kernel_op_desc.cc +++ b/paddle/infrt/dialect/phi/pass/kernel_op_desc.cc @@ -15,6 +15,7 @@ #include "paddle/infrt/dialect/phi/pass/kernel_op_desc.h" #include #include "paddle/infrt/dialect/phi/data_type.h" +#include "paddle/phi/core/type_defs.h" #include "paddle/phi/kernels/declarations.h" namespace infrt { @@ -92,10 +93,10 @@ std::vector GetCandidateKernels( phi_kernel_desc.input_types.clear(); phi_kernel_desc.output_types.clear(); phi::KernelArgsDef args_def = kernel_key_map.at(kernel_key).args_def(); - const paddle::SmallVector& input_arg = - args_def.input_defs(); - const paddle::SmallVector& output_arg = - args_def.output_defs(); + const paddle::SmallVector& + input_arg = args_def.input_defs(); + const paddle::SmallVector& + output_arg = args_def.output_defs(); for (auto tensor_arg : input_arg) { phi_kernel_desc.input_types.emplace_back(ConvertPlaceFromPhi(tensor_arg)); } diff --git a/paddle/infrt/host_context/value.h b/paddle/infrt/host_context/value.h index ecd118818099df299a1ad9f90756c108dc3d5a5c..1834cb4c0db056939013e2f202a5a0b3e0908cdb 100644 --- a/paddle/infrt/host_context/value.h +++ b/paddle/infrt/host_context/value.h @@ -91,6 +91,7 @@ using ValueVariantType = std::vector<::phi::DenseTensor*>, paddle::experimental::ScalarBase<::phi::DenseTensor>, paddle::experimental::IntArrayBase<::phi::DenseTensor>, + std::vector, std::vector<::phi::MetaTensor*>, ::phi::MetaConfig, paddle::experimental::Backend, diff --git a/paddle/phi/api/lib/api_custom_impl.cc b/paddle/phi/api/lib/api_custom_impl.cc index 8e05f9d9090e25df25216e7df292a3f3550aac3d..70c3b27ede5dd4591b98b911ee56bd43adbada7b 100644 --- a/paddle/phi/api/lib/api_custom_impl.cc +++ b/paddle/phi/api/lib/api_custom_impl.cc @@ -271,10 +271,10 @@ std::vector split_impl(const Tensor& x, // Calculate the number of out tensors size_t out_number; - if (num_or_sections.GetData().size() == 1) { + if (num_or_sections.size() == 1) { out_number = num_or_sections.GetData()[0]; } else { - out_number = num_or_sections.GetData().size(); + out_number = num_or_sections.size(); } std::vector out; @@ -449,54 +449,6 @@ std::tuple momentum_impl( return api_output; } -std::vector unbind_impl(const Tensor& input, int axis) { - auto kernel_key_set = ParseKernelKeyByInputArgs(input); - auto kernel_key = kernel_key_set.GetHighestPriorityKernelKey(); - - Backend kernel_backend = kernel_key.backend(); - DataLayout kernel_layout = kernel_key.layout(); - DataType kernel_data_type = kernel_key.dtype(); - - auto kernel = phi::KernelFactory::Instance().SelectKernelOrThrowError( - "unbind", {kernel_backend, kernel_layout, kernel_data_type}); - VLOG(6) << "unbind API kernel key: [" << kernel_backend << ", " - << kernel_layout << ", " << kernel_data_type << "]"; - VLOG(6) << "unbind API kernel: " << kernel; - - auto* dev_ctx = GetDeviceContextByBackend(kernel_backend); - - auto dense_input = PrepareData(input, kernel.InputAt(0), {}); - - // Calculate the number of out tensors - auto input_shape = input.dims(); - if (axis < 0) { - axis = input_shape.size() + axis; - } - auto out_num = input_shape[axis]; - - std::vector out; - auto dense_outs = SetKernelOutput(out_num, kernel_backend, &out); - std::vector meta_outs; - meta_outs.reserve(out_num); - std::vector meta_out_ptrs; - meta_out_ptrs.reserve(out_num); - for (int64_t i = 0; i < out_num; ++i) { - meta_outs.push_back(dense_outs[i]); - meta_out_ptrs.push_back(&meta_outs.back()); - } - - phi::UnbindInferMeta(MakeMetaTensor(*dense_input), axis, meta_out_ptrs); - - using kernel_signature = void (*)(const phi::DeviceContext&, - const phi::DenseTensor&, - int, - std::vector&); - auto* kernel_fn = kernel.GetVariadicKernelFn(); - (*kernel_fn)(*dev_ctx, *dense_input, axis, dense_outs); - - return out; -} - ////////////////// Backward(grad) api impls ////////////////////// // TODO(chenweihang): the original sum grad op can support higher-level @@ -674,71 +626,6 @@ std::tuple batch_norm_impl( return api_output; } -std::vector concat_grad_impl(const std::vector& x, - const Tensor& out_grad, - const Scalar& axis) { - auto kernel_key_set = ParseKernelKeyByInputArgs(out_grad); - auto kernel_key = kernel_key_set.GetHighestPriorityKernelKey(); - - Backend kernel_backend = kernel_key.backend(); - DataLayout kernel_layout = kernel_key.layout(); - DataType kernel_data_type = kernel_key.dtype(); - - auto kernel = phi::KernelFactory::Instance().SelectKernelOrThrowError( - "concat_grad", {kernel_backend, kernel_layout, kernel_data_type}); - VLOG(6) << "concat_grad API kernel key: [" << kernel_backend << ", " - << kernel_layout << ", " << kernel_data_type << "]"; - VLOG(6) << "concat_grad API kernel: " << kernel; - - auto* dev_ctx = GetDeviceContextByBackend(kernel_backend); - - // std::unique_ptr> - auto dense_x = PrepareData(x, kernel.InputAt(0), {}); - auto dense_out_grad = PrepareData(out_grad, kernel.InputAt(1), {}); - - // Calculate the number of out tensors - size_t out_number = x.size(); - std::vector x_grad; - auto dense_x_grad = SetKernelOutput(out_number, kernel_backend, &x_grad); - - std::vector meta_x; - meta_x.reserve(x.size()); - std::vector meta_x_ptrs; - meta_x_ptrs.reserve(x.size()); - for (const auto& t : *dense_x) { - meta_x.push_back(t); - meta_x_ptrs.push_back(&meta_x.back()); - } - - std::vector meta_x_grad; - meta_x_grad.reserve(x.size()); - std::vector meta_x_grad_ptrs; - meta_x_grad_ptrs.reserve(x.size()); - for (size_t i = 0; i < out_number; ++i) { - meta_x_grad.push_back(*dense_x_grad[i]); - meta_x_grad_ptrs.push_back(&meta_x_grad.back()); - } - - phi::UnchangedMultiInferMeta(meta_x_ptrs, meta_x_grad_ptrs); - - std::vector dense_x_ptr; - dense_x_ptr.reserve(x.size()); - for (const auto& t : *dense_x) { - dense_x_ptr.push_back(&t); - } - - using kernel_signature = void (*)(const platform::DeviceContext&, - const std::vector&, - const phi::DenseTensor&, - const phi::Scalar&, - std::vector); - auto* kernel_fn = kernel.GetVariadicKernelFn(); - (*kernel_fn)( - *dev_ctx, dense_x_ptr, *dense_out_grad, phi::Scalar(axis), dense_x_grad); - - return x_grad; -} - Tensor imag_grad_impl(const Tensor& out_grad) { phi::KernelKey kernel_key{ParseBackend(out_grad), out_grad.layout(), @@ -795,328 +682,5 @@ Tensor real_grad_impl(const Tensor& out_grad) { return out; } -std::vector stack_grad_impl(const std::vector& x, - const Tensor& out_grad, - int axis) { - auto kernel_key_set = ParseKernelKeyByInputArgs(out_grad); - auto kernel_key = kernel_key_set.GetHighestPriorityKernelKey(); - - Backend kernel_backend = kernel_key.backend(); - DataLayout kernel_layout = kernel_key.layout(); - DataType kernel_data_type = kernel_key.dtype(); - - auto kernel = phi::KernelFactory::Instance().SelectKernelOrThrowError( - "stack_grad", {kernel_backend, kernel_layout, kernel_data_type}); - VLOG(6) << "stack_grad API kernel key: [" << kernel_backend << ", " - << kernel_layout << ", " << kernel_data_type << "]"; - VLOG(6) << "stack_grad API kernel: " << kernel; - - auto* dev_ctx = GetDeviceContextByBackend(kernel_backend); - - auto dense_out_grad = PrepareData(out_grad, kernel.InputAt(0), {}); - - size_t out_number = x.size(); - std::vector x_grad; - auto dense_x_grad = SetKernelOutput(out_number, kernel_backend, &x_grad); - std::vector meta_x_grad; - meta_x_grad.reserve(out_number); - std::vector meta_x_grad_ptrs; - meta_x_grad_ptrs.reserve(out_number); - for (size_t i = 0; i < out_number; ++i) { - meta_x_grad.push_back(dense_x_grad[i]); - meta_x_grad_ptrs.push_back(&meta_x_grad.back()); - } - - phi::StackGradInferMeta( - MakeMetaTensor(*dense_out_grad), axis, meta_x_grad_ptrs); - - using kernel_signature = void (*)(const platform::DeviceContext&, - const phi::DenseTensor&, - int axis, - std::vector); - auto* kernel_fn = kernel.GetVariadicKernelFn(); - (*kernel_fn)(*dev_ctx, *dense_out_grad, axis, dense_x_grad); - - return x_grad; -} - -std::vector meshgrid_impl(const std::vector& inputs) { - Backend kernel_backend = Backend::UNDEFINED; - DataLayout kernel_layout = DataLayout::UNDEFINED; - DataType kernel_data_type = DataType::UNDEFINED; - - if (kernel_backend == Backend::UNDEFINED || - kernel_layout == DataLayout::UNDEFINED || - kernel_data_type == DataType::UNDEFINED) { - auto kernel_key_set = ParseKernelKeyByInputArgs(inputs); - auto kernel_key = kernel_key_set.GetHighestPriorityKernelKey(); - if (kernel_backend == Backend::UNDEFINED) { - kernel_backend = kernel_key.backend(); - } - if (kernel_layout == DataLayout::UNDEFINED) { - kernel_layout = kernel_key.layout(); - } - if (kernel_data_type == DataType::UNDEFINED) { - kernel_data_type = kernel_key.dtype(); - } - } - - const auto& kernel = phi::KernelFactory::Instance().SelectKernelOrThrowError( - "meshgrid", {kernel_backend, kernel_layout, kernel_data_type}); - VLOG(6) << "meshgrid API kernel key: [" << kernel_backend << ", " - << kernel_layout << ", " << kernel_data_type << "]"; - VLOG(6) << "meshgrid API kernel: " << kernel; - - auto* dev_ctx = GetDeviceContextByBackend(kernel_backend); - - auto input_inputs_vec = PrepareData(inputs, kernel.InputAt(0), {}); - std::vector input_inputs(input_inputs_vec->size()); - for (size_t i = 0; i < input_inputs.size(); ++i) { - input_inputs[i] = &input_inputs_vec->at(i); - } - - auto x_meta_vec = MakeMetaTensor(input_inputs); - std::vector inputs_metas(x_meta_vec.size()); - for (size_t i = 0; i < x_meta_vec.size(); ++i) { - inputs_metas[i] = &x_meta_vec[i]; - } - - // Calculate the number of out tensors - size_t out_number = inputs.size(); - - std::vector out; - auto dense_outs = SetKernelOutput(out_number, kernel_backend, &out); - - std::vector meta_outs; - meta_outs.reserve(out_number); - std::vector meta_out_ptrs; - meta_out_ptrs.reserve(out_number); - for (size_t i = 0; i < out_number; ++i) { - meta_outs.push_back(dense_outs[i]); - meta_out_ptrs.push_back(&meta_outs.back()); - } - phi::MeshgridInferMeta(inputs_metas, meta_out_ptrs); - - using kernel_signature = void (*)(const platform::DeviceContext&, - const std::vector&, - std::vector&); - auto* kernel_fn = kernel.GetVariadicKernelFn(); - (*kernel_fn)(*dev_ctx, input_inputs, dense_outs); - - return out; -} - -std::vector meshgrid_grad_impl( - const std::vector& inputs, - const std::vector& outputs_grad) { - Backend kernel_backend = Backend::UNDEFINED; - DataLayout kernel_layout = DataLayout::UNDEFINED; - DataType kernel_data_type = DataType::UNDEFINED; - - if (kernel_backend == Backend::UNDEFINED || - kernel_layout == DataLayout::UNDEFINED || - kernel_data_type == DataType::UNDEFINED) { - auto kernel_key_set = ParseKernelKeyByInputArgs(inputs, outputs_grad); - auto kernel_key = kernel_key_set.GetHighestPriorityKernelKey(); - if (kernel_backend == Backend::UNDEFINED) { - kernel_backend = kernel_key.backend(); - } - if (kernel_layout == DataLayout::UNDEFINED) { - kernel_layout = kernel_key.layout(); - } - if (kernel_data_type == DataType::UNDEFINED) { - kernel_data_type = kernel_key.dtype(); - } - } - - const auto& kernel = phi::KernelFactory::Instance().SelectKernelOrThrowError( - "meshgrid_grad", {kernel_backend, kernel_layout, kernel_data_type}); - VLOG(6) << "meshgrid_grad API kernel key: [" << kernel_backend << ", " - << kernel_layout << ", " << kernel_data_type << "]"; - VLOG(6) << "meshgrid_grad API kernel: " << kernel; - - auto* dev_ctx = GetDeviceContextByBackend(kernel_backend); - - auto input_inputs_vec = PrepareData(inputs, kernel.InputAt(0), {}); - std::vector input_inputs(input_inputs_vec->size()); - for (size_t i = 0; i < input_inputs.size(); ++i) { - input_inputs[i] = &input_inputs_vec->at(i); - } - auto input_outputs_grad_vec = - PrepareData(outputs_grad, kernel.InputAt(1), {}); - std::vector input_outputs_grad( - input_outputs_grad_vec->size()); - for (size_t i = 0; i < input_outputs_grad.size(); ++i) { - input_outputs_grad[i] = &input_outputs_grad_vec->at(i); - } - - size_t out_number = inputs.size(); - std::vector api_output; - auto kernel_out = SetKernelOutput(out_number, kernel_backend, &api_output); - - auto inputs_meta_vec = MakeMetaTensor(input_inputs); - std::vector inputs_metas(inputs_meta_vec.size()); - for (size_t i = 0; i < inputs_meta_vec.size(); ++i) { - inputs_metas[i] = &inputs_meta_vec[i]; - } - - auto outputs_grad_meta_vec = MakeMetaTensor(input_outputs_grad); - std::vector outputs_grad_metas( - outputs_grad_meta_vec.size()); - for (size_t i = 0; i < outputs_grad_meta_vec.size(); ++i) { - outputs_grad_metas[i] = &outputs_grad_meta_vec[i]; - } - - std::vector meta_outs; - meta_outs.reserve(out_number); - std::vector meta_out_ptrs; - meta_out_ptrs.reserve(out_number); - for (size_t i = 0; i < out_number; ++i) { - meta_outs.push_back(kernel_out[i]); - meta_out_ptrs.push_back(&meta_outs.back()); - } - - phi::MeshgridGradInferMeta(inputs_metas, outputs_grad_metas, meta_out_ptrs); - - using kernel_signature = void (*)(const platform::DeviceContext&, - const std::vector&, - const std::vector&, - std::vector&); - auto* kernel_fn = kernel.GetVariadicKernelFn(); - (*kernel_fn)(*dev_ctx, input_inputs, input_outputs_grad, kernel_out); - - return api_output; -} - -std::vector multi_dot_grad_impl(const std::vector& x, - const Tensor& out_grad) { - Backend kernel_backend = Backend::UNDEFINED; - DataLayout kernel_layout = DataLayout::UNDEFINED; - DataType kernel_data_type = DataType::UNDEFINED; - - if (kernel_backend == Backend::UNDEFINED || - kernel_layout == DataLayout::UNDEFINED || - kernel_data_type == DataType::UNDEFINED) { - auto kernel_key_set = ParseKernelKeyByInputArgs(x, out_grad); - auto kernel_key = kernel_key_set.GetHighestPriorityKernelKey(); - if (kernel_backend == Backend::UNDEFINED) { - kernel_backend = kernel_key.backend(); - } - if (kernel_layout == DataLayout::UNDEFINED) { - kernel_layout = kernel_key.layout(); - } - if (kernel_data_type == DataType::UNDEFINED) { - kernel_data_type = kernel_key.dtype(); - } - } - - VLOG(6) << "multi_dot_grad API kernel key: [" << kernel_backend << ", " - << kernel_layout << ", " << kernel_data_type << "]"; - const auto& kernel = phi::KernelFactory::Instance().SelectKernelOrThrowError( - "multi_dot_grad", {kernel_backend, kernel_layout, kernel_data_type}); - VLOG(6) << "multi_dot_grad API kernel: " << kernel; - - auto* dev_ctx = GetDeviceContextByBackend(kernel_backend); - - auto input_x_vec = PrepareData(x, kernel.InputAt(0), {}); - std::vector input_x(input_x_vec->size()); - for (size_t i = 0; i < input_x.size(); ++i) { - input_x[i] = &input_x_vec->at(i); - } - auto input_out_grad = PrepareData(out_grad, kernel.InputAt(1), {}); - - size_t out_number = input_x.size(); - std::vector api_output; - auto kernel_out = SetKernelOutput(out_number, kernel_backend, &api_output); - - auto x_meta_vec = MakeMetaTensor(input_x); - std::vector x_metas(x_meta_vec.size()); - for (size_t i = 0; i < x_meta_vec.size(); ++i) { - x_metas[i] = &x_meta_vec[i]; - } - - std::vector meta_outs; - meta_outs.reserve(out_number); - std::vector meta_out_ptrs; - meta_out_ptrs.reserve(out_number); - for (size_t i = 0; i < out_number; ++i) { - meta_outs.push_back(kernel_out[i]); - meta_out_ptrs.push_back(&meta_outs.back()); - } - - phi::MultiDotGradInferMeta( - x_metas, MakeMetaTensor(*input_out_grad), meta_out_ptrs); - - using kernel_signature = void (*)(const platform::DeviceContext&, - const std::vector&, - const phi::DenseTensor&, - std::vector&); - auto* kernel_fn = kernel.GetVariadicKernelFn(); - (*kernel_fn)(*dev_ctx, input_x, *input_out_grad, kernel_out); - - return api_output; -} - -std::vector multiplex_grad_impl(const std::vector& inputs, - const Tensor& ids, - const Tensor& out_grad) { - Backend kernel_backend = Backend::UNDEFINED; - DataLayout kernel_layout = DataLayout::UNDEFINED; - DataType kernel_data_type = DataType::UNDEFINED; - - if (kernel_backend == Backend::UNDEFINED || - kernel_layout == DataLayout::UNDEFINED || - kernel_data_type == DataType::UNDEFINED) { - auto kernel_key_set = ParseKernelKeyByInputArgs(out_grad); - auto kernel_key = kernel_key_set.GetHighestPriorityKernelKey(); - if (kernel_backend == Backend::UNDEFINED) { - kernel_backend = kernel_key.backend(); - } - if (kernel_layout == DataLayout::UNDEFINED) { - kernel_layout = kernel_key.layout(); - } - if (kernel_data_type == DataType::UNDEFINED) { - kernel_data_type = kernel_key.dtype(); - } - } - - VLOG(6) << "multiplex_grad API kernel key: [" << kernel_backend << ", " - << kernel_layout << ", " << kernel_data_type << "]"; - const auto& kernel = phi::KernelFactory::Instance().SelectKernelOrThrowError( - "multiplex_grad", {kernel_backend, kernel_layout, kernel_data_type}); - VLOG(6) << "multiplex_grad API kernel: " << kernel; - - auto* dev_ctx = GetDeviceContextByBackend(kernel_backend); - - auto input_ids = PrepareData(ids, kernel.InputAt(0), {}); - auto input_out_grad = PrepareData(out_grad, kernel.InputAt(1), {}); - - auto out_number = inputs.size(); - std::vector api_output; - auto kernel_out = SetKernelOutput(out_number, kernel_backend, &api_output); - - std::vector meta_outs; - meta_outs.reserve(out_number); - std::vector meta_out_ptrs; - meta_out_ptrs.reserve(out_number); - for (size_t i = 0; i < out_number; ++i) { - meta_outs.push_back(kernel_out[i]); - meta_out_ptrs.push_back(&meta_outs.back()); - } - - phi::MultiplexGradInferMeta(MakeMetaTensor(*input_ids), - MakeMetaTensor(*input_out_grad), - meta_out_ptrs); - - using kernel_signature = void (*)(const platform::DeviceContext&, - const phi::DenseTensor&, - const phi::DenseTensor&, - std::vector&); - auto* kernel_fn = kernel.GetVariadicKernelFn(); - (*kernel_fn)(*dev_ctx, *input_ids, *input_out_grad, kernel_out); - - return api_output; -} - } // namespace experimental } // namespace paddle diff --git a/paddle/phi/api/lib/api_custom_impl.h b/paddle/phi/api/lib/api_custom_impl.h index 0e360ce4a993f024dbf3eff4896b56f9ddf4fe60..0d1ba3e98e53e44998029bf9bf8b7fcb5745899d 100644 --- a/paddle/phi/api/lib/api_custom_impl.h +++ b/paddle/phi/api/lib/api_custom_impl.h @@ -30,6 +30,20 @@ namespace experimental { ////////////////// Forward api impls ////////////////////// +std::tuple batch_norm_impl( + const Tensor& x, + const Tensor& scale, + const Tensor& bias, + const Tensor& mean, + const Tensor& variance, + float momentum, + float epsilon, + const std::string& data_layout, + bool is_test, + bool use_global_stats, + bool trainable_statistics, + bool fuse_with_relu); + Tensor conv2d_impl(const Tensor& input, const Tensor& filter, const std::vector& strides, @@ -62,8 +76,6 @@ std::vector split_impl(const Tensor& x, const IntArray& num_or_sections, const Scalar& axis); -std::vector meshgrid_impl(const std::vector& inputs); - std::tuple momentum_impl( const Tensor& param, const Tensor& grad, @@ -77,49 +89,14 @@ std::tuple momentum_impl( bool multi_precision, float rescale_grad); -std::vector unbind_impl(const Tensor& input, int axis); - ////////////////// Backward(grad) api impls ////////////////////// std::vector add_n_grad_impl(const std::vector& x, const Tensor& out_grad); -std::tuple batch_norm_impl( - const Tensor& x, - const Tensor& scale, - const Tensor& bias, - const Tensor& mean, - const Tensor& variance, - float momentum, - float epsilon, - const std::string& data_layout, - bool is_test, - bool use_global_stats, - bool trainable_statistics, - bool fuse_with_relu); - -/************************ backward api impl ***************************/ - -std::vector concat_grad_impl(const std::vector& x, - const Tensor& out_grad, - const Scalar& axis); - Tensor imag_grad_impl(const Tensor& x); Tensor real_grad_impl(const Tensor& x); -std::vector stack_grad_impl(const std::vector& x, - const Tensor& out_grad, - int axis); -std::vector meshgrid_grad_impl(const std::vector& inputs, - const std::vector& outputs_grad); - -std::vector multi_dot_grad_impl(const std::vector& x, - const Tensor& out_grad); - -std::vector multiplex_grad_impl(const std::vector& inputs, - const Tensor& ids, - const Tensor& out_grad); - } // namespace experimental } // namespace paddle diff --git a/paddle/phi/api/lib/api_gen_utils.cc b/paddle/phi/api/lib/api_gen_utils.cc index 732ecacde94d7d28fd386ef5985eeb1a0a4e30df..f9db152956923a66c1e88af4841d59e834bcb726 100644 --- a/paddle/phi/api/lib/api_gen_utils.cc +++ b/paddle/phi/api/lib/api_gen_utils.cc @@ -76,6 +76,16 @@ std::vector MakeMetaTensor( return meta_tensors; } +std::vector MakeMetaTensor( + const std::vector& tensors) { + std::vector meta_tensors; + meta_tensors.reserve(tensors.size()); + for (auto* t : tensors) { + meta_tensors.emplace_back(*t); + } + return meta_tensors; +} + phi::MetaTensor MakeMetaTensor(const phi::SelectedRows& tensor) { return phi::MetaTensor(tensor); } diff --git a/paddle/phi/api/lib/api_gen_utils.h b/paddle/phi/api/lib/api_gen_utils.h index d7ecef61c5be31f5a30dad9cc1c8e82ba3405af9..035dfc5204720714346a260fc60db1362e542a85 100644 --- a/paddle/phi/api/lib/api_gen_utils.h +++ b/paddle/phi/api/lib/api_gen_utils.h @@ -53,6 +53,9 @@ phi::MetaTensor MakeMetaTensor(const phi::DenseTensor& tensor); std::vector MakeMetaTensor( const std::vector& tensors); +std::vector MakeMetaTensor( + const std::vector& tensors); + phi::MetaTensor MakeMetaTensor(const phi::SelectedRows& tensor); phi::MetaTensor MakeMetaTensor(const phi::StringTensor& tensor); diff --git a/paddle/phi/common/int_array.h b/paddle/phi/common/int_array.h index 490d7dabd4007b7445edfbca0dd2852e4bc2eaab..f9d07249e0fc9a4651932de32d9de4410c14b540 100644 --- a/paddle/phi/common/int_array.h +++ b/paddle/phi/common/int_array.h @@ -96,6 +96,8 @@ class IntArrayBase { template IntArrayBase(const IntArrayBase& other) : array_(other.GetData()) {} + size_t size() const { return array_.size(); } + const std::vector& GetData() const { return array_; } private: diff --git a/paddle/phi/core/compat/arg_map_context.h b/paddle/phi/core/compat/arg_map_context.h index 71cec011411641ffe34918f03162800b111275a2..122ebed21942a808d8e2f9d6224b37938778a06a 100644 --- a/paddle/phi/core/compat/arg_map_context.h +++ b/paddle/phi/core/compat/arg_map_context.h @@ -19,45 +19,33 @@ limitations under the License. */ #include #include "paddle/phi/common/place.h" +#include "paddle/phi/core/type_defs.h" #include "paddle/utils/any.h" #include "paddle/utils/flat_hash_map.h" #include "paddle/utils/small_vector.h" namespace phi { -constexpr char kGradVarSuffix[] = "@GRAD"; - -constexpr size_t kGradVarSuffixSize = 5U; - -inline std::string GradVarName(const std::string& var_name) { - std::string result; - result.reserve(var_name.size() + kGradVarSuffixSize); - result += var_name; - result += kGradVarSuffix; - return result; -} - // tuple(input_names, attr_names, output_names) -using KernelArgsTuple = std::tuple, - paddle::SmallVector, - paddle::SmallVector>; +using KernelArgsTuple = std::tuple, + paddle::SmallVector, + paddle::SmallVector>; struct KernelSignature { - std::string name; + const char* name; KernelArgsTuple args; KernelSignature() = default; - KernelSignature(std::string&& kernel_name, - paddle::SmallVector&& inputs, - paddle::SmallVector&& attrs, - paddle::SmallVector&& outputs) - : name(std::move(kernel_name)), - args(std::make_tuple(inputs, attrs, outputs)) {} - KernelSignature(const std::string& kernel_name, - const paddle::SmallVector& inputs, - const paddle::SmallVector& attrs, - const paddle::SmallVector& outputs) + KernelSignature(const char* kernel_name, + paddle::SmallVector&& inputs, + paddle::SmallVector&& attrs, + paddle::SmallVector&& outputs) + : name(kernel_name), args(std::make_tuple(inputs, attrs, outputs)) {} + KernelSignature(const char* kernel_name, + const paddle::SmallVector& inputs, + const paddle::SmallVector& attrs, + const paddle::SmallVector& outputs) : name(kernel_name), args(std::make_tuple(inputs, attrs, outputs)) {} // TODO(chenweihang): add assign constructor to solve windows compile diff --git a/paddle/phi/core/compat/convert_utils.cc b/paddle/phi/core/compat/convert_utils.cc index 43febb2ac0430d3e835c0b6f41b6be7a73c374d9..4fa11ac7860ef534e2e48973723407c4e318dfd2 100644 --- a/paddle/phi/core/compat/convert_utils.cc +++ b/paddle/phi/core/compat/convert_utils.cc @@ -102,7 +102,7 @@ phi::Place TransToPhiPlace(const Backend& backend, bool set_device_id) { } } -std::string TransToPhiKernelName(const std::string& fluid_op_name) { +const std::string& TransToPhiKernelName(const std::string& fluid_op_name) { return OpUtilsMap::Instance().GetBaseKernelName(fluid_op_name); } diff --git a/paddle/phi/core/compat/convert_utils.h b/paddle/phi/core/compat/convert_utils.h index 621459764873e6681d57813b227076db0b44dd04..5982ab0deff83da2242ab22e8b0cb154cc142e57 100644 --- a/paddle/phi/core/compat/convert_utils.h +++ b/paddle/phi/core/compat/convert_utils.h @@ -22,7 +22,7 @@ limitations under the License. */ namespace phi { -std::string TransToPhiKernelName(const std::string& fluid_op_name); +const std::string& TransToPhiKernelName(const std::string& fluid_op_name); const std::string& TransToFluidOpName(const std::string& phi_kernel_name); Backend TransToPhiBackend(const phi::Place& place); diff --git a/paddle/phi/core/compat/op_utils.h b/paddle/phi/core/compat/op_utils.h index 6716f4791803d470e326ba84f9322d6ee189e7a3..9c926fa871b67d02e299c928cff7ad9baba2d751 100644 --- a/paddle/phi/core/compat/op_utils.h +++ b/paddle/phi/core/compat/op_utils.h @@ -26,6 +26,8 @@ limitations under the License. */ namespace phi { +const static std::string deprecated_kernel_name = "deprecated"; // NOLINT + const std::unordered_set standard_kernel_suffixs({ "sr", // SelectedRows kernel "raw" // fallback kernel of origfinal fluid op @@ -134,9 +136,9 @@ class OpUtilsMap { arg_mapping_fn_map_.insert({std::move(op_type), std::move(fn)}); } - std::string GetBaseKernelName(const std::string& op_type) const { + const std::string& GetBaseKernelName(const std::string& op_type) const { if (deprecated_op_names.find(op_type) != deprecated_op_names.end()) { - return "deprecated"; + return deprecated_kernel_name; } auto it = base_kernel_name_map_.find(op_type); if (it == base_kernel_name_map_.end()) { @@ -150,7 +152,7 @@ class OpUtilsMap { auto it = arg_mapping_fn_map_.find(op_type); if (it == arg_mapping_fn_map_.end()) { auto func = - [op_type](const ArgumentMappingContext& ctx) -> KernelSignature { + [&op_type](const ArgumentMappingContext& ctx) -> KernelSignature { return DefaultKernelSignatureMap::Instance().Get(op_type); }; return func; diff --git a/paddle/phi/core/infermeta_utils.cc b/paddle/phi/core/infermeta_utils.cc index 0496d727e8d3b4d06e2860b2b6729d432591a690..70f26102cbad1a3d7e1116a7c9352ca54435ea80 100644 --- a/paddle/phi/core/infermeta_utils.cc +++ b/paddle/phi/core/infermeta_utils.cc @@ -20,14 +20,12 @@ void InferMetaContext::SetMetaConfig(MetaConfig config) { config_ = std::move(config); } -void InferMetaContext::EmplaceBackInput( - std::shared_ptr input) { +void InferMetaContext::EmplaceBackInput(MetaTensor input) { int index = inputs_.size(); inputs_.emplace_back(std::move(input)); input_range_.emplace_back(std::pair(index, index + 1)); } -void InferMetaContext::EmplaceBackOutput( - std::shared_ptr output) { +void InferMetaContext::EmplaceBackOutput(MetaTensor output) { int index = outputs_.size(); outputs_.emplace_back(std::move(output)); output_range_.emplace_back(std::pair(index, index + 1)); @@ -37,7 +35,7 @@ void InferMetaContext::EmplaceBackAttr(paddle::any attr) { } void InferMetaContext::EmplaceBackInputs( - paddle::SmallVector> inputs) { + paddle::SmallVector inputs) { int index = inputs_.size(); input_range_.emplace_back(std::pair(index, index + inputs.size())); inputs_.insert(inputs_.end(), @@ -45,7 +43,7 @@ void InferMetaContext::EmplaceBackInputs( std::make_move_iterator(inputs.end())); } void InferMetaContext::EmplaceBackOutputs( - paddle::SmallVector> outputs) { + paddle::SmallVector outputs) { int index = outputs_.size(); output_range_.emplace_back( std::pair(index, index + outputs.size())); @@ -64,24 +62,25 @@ const std::pair& InferMetaContext::OutputRangeAt(size_t idx) const { const MetaConfig& InferMetaContext::GetMetaConfig() const { return config_; } const MetaTensor& InferMetaContext::InputAt(size_t idx) const { - return *inputs_.at(idx); + return inputs_.at(idx); } -paddle::optional InferMetaContext::OptionalInputAt( +paddle::optional InferMetaContext::OptionalInputAt( size_t idx) const { const auto& input = inputs_.at(idx); - return input ? paddle::optional{static_cast< - const phi::MetaTensor&>(*input)} - : paddle::optional{paddle::none}; + return input.initialized() + ? paddle::optional{input} + : paddle::optional{paddle::none}; } -std::vector InferMetaContext::InputsBetween(size_t start, - size_t end) const { - std::vector result; +std::vector InferMetaContext::InputsBetween( + size_t start, size_t end) const { + std::vector result; result.reserve(end - start); for (size_t i = start; i < end; ++i) { - result.push_back(inputs_.at(i).get()); + auto& in = inputs_.at(i); + result.emplace_back(in.initialized() ? &in : nullptr); } return result; @@ -91,12 +90,13 @@ paddle::optional> InferMetaContext::OptionalInputsBetween(size_t start, size_t end) const { const auto& first = inputs_.at(start); - if (first) { + if (first.initialized()) { std::vector result; result.reserve(end - start); for (size_t i = start; i < end; ++i) { - result.push_back(inputs_.at(i).get()); + auto& in = inputs_.at(i); + result.emplace_back(in.initialized() ? &in : nullptr); } return paddle::optional>(result); @@ -105,7 +105,8 @@ InferMetaContext::OptionalInputsBetween(size_t start, size_t end) const { } MetaTensor* InferMetaContext::MutableOutputAt(size_t idx) { - return outputs_.at(idx).get(); + auto& out = outputs_.at(idx); + return out.initialized() ? &out : nullptr; } std::vector InferMetaContext::MutableOutputBetween(size_t start, @@ -113,7 +114,8 @@ std::vector InferMetaContext::MutableOutputBetween(size_t start, std::vector result; result.reserve(end - start); for (size_t i = start; i < end; ++i) { - result.emplace_back(outputs_.at(i).get()); + auto& out = outputs_.at(i); + result.emplace_back(out.initialized() ? &out : nullptr); } return result; } diff --git a/paddle/phi/core/infermeta_utils.h b/paddle/phi/core/infermeta_utils.h index fad437f82c331ae4f144be7f2d4fef4e1414a931..699c38ebd470236dcbdb641eaeb6873829e13f40 100644 --- a/paddle/phi/core/infermeta_utils.h +++ b/paddle/phi/core/infermeta_utils.h @@ -37,28 +37,28 @@ class InferMetaContext { explicit InferMetaContext(MetaConfig config) : config_(config) {} void SetMetaConfig(MetaConfig config); - void EmplaceBackInput(std::shared_ptr input); - void EmplaceBackOutput(std::shared_ptr output); + const MetaConfig& GetMetaConfig() const; + + void EmplaceBackInput(MetaTensor input); + void EmplaceBackOutput(MetaTensor output); void EmplaceBackAttr(paddle::any attr); void EmplaceBackInputs( - paddle::SmallVector> inputs); + paddle::SmallVector inputs); void EmplaceBackOutputs( - paddle::SmallVector> outputs); + paddle::SmallVector outputs); - const std::pair& InputRangeAt(size_t idx) const; - const std::pair& OutputRangeAt(size_t idx) const; + virtual const MetaTensor& InputAt(size_t idx) const; + virtual paddle::optional OptionalInputAt(size_t idx) const; - const MetaConfig& GetMetaConfig() const; - - const MetaTensor& InputAt(size_t idx) const; - paddle::optional OptionalInputAt(size_t idx) const; - std::vector InputsBetween(size_t start, size_t end) const; - paddle::optional> + virtual std::vector InputsBetween(size_t start, + size_t end) const; + virtual paddle::optional> OptionalInputsBetween(size_t start, size_t end) const; - MetaTensor* MutableOutputAt(size_t idx); - std::vector MutableOutputBetween(size_t start, size_t end); + virtual MetaTensor* MutableOutputAt(size_t idx); + virtual std::vector MutableOutputBetween(size_t start, + size_t end); template AttrType AttrAt(size_t idx) { @@ -73,19 +73,24 @@ class InferMetaContext { } } - private: + const std::pair& InputRangeAt(size_t idx) const; + const std::pair& OutputRangeAt(size_t idx) const; + + virtual ~InferMetaContext() = default; + + protected: MetaConfig config_; - // NOTE(chenweihang): Because the MetaTensor is a base class, and MetaTensor - // objects are all created in each round, so we have to use smart pointer - // here, maybe we can implemented a new InferMetaContext and a series utils - // specifically for fluid to avoid using shared_ptr - paddle::SmallVector> inputs_; - paddle::SmallVector> outputs_; - paddle::SmallVector attrs_; + paddle::SmallVector attrs_; - paddle::SmallVector> input_range_; - paddle::SmallVector> output_range_; + paddle::SmallVector, phi::kInputSmallVectorSize> + input_range_; + paddle::SmallVector, phi::kOutputSmallVectorSize> + output_range_; + + private: + paddle::SmallVector inputs_; + paddle::SmallVector outputs_; }; #define PD_INFER_META(...) \ @@ -159,7 +164,7 @@ struct InferMetaFnImpl { }; template - struct InferMetaFnCallHelper&, Tail...> { + struct InferMetaFnCallHelper&, Tail...> { template static void Call(InferMetaContext* ctx, PreviousArgs&... pargs) { static_assert(attr_idx == 0, @@ -167,7 +172,7 @@ struct InferMetaFnImpl { static_assert(out_idx == 0, "InferMeta's Input should appear before Outputs."); const std::pair range = ctx->InputRangeAt(in_idx); - std::vector arg = + std::vector arg = ctx->InputsBetween(range.first, range.second); InferMetaFnCallHelper< Tail...>::template Call(ctx, diff --git a/paddle/phi/core/kernel_context.cc b/paddle/phi/core/kernel_context.cc index 234e3528c363b948c0a3e3b22d5ee676660fce76..cf862cbde18f99b2b300ccfd794a4434b70dee97 100644 --- a/paddle/phi/core/kernel_context.cc +++ b/paddle/phi/core/kernel_context.cc @@ -79,7 +79,7 @@ void KernelContext::EmplaceBackAttr(paddle::any attr) { void KernelContext::AssignInputRange(std::pair&& range, size_t idx) { if (idx < input_range_.size()) { - input_range_[idx] = range; + input_range_[idx] = std::move(range); } else if (idx == input_range_.size()) { input_range_.emplace_back(range); } else { @@ -93,7 +93,7 @@ void KernelContext::AssignInputRange(std::pair&& range, size_t idx) { void KernelContext::AssignOutputRange(std::pair&& range, size_t idx) { if (idx < output_range_.size()) { - output_range_[idx] = range; + output_range_[idx] = std::move(range); } else if (idx == output_range_.size()) { output_range_.emplace_back(range); } else { diff --git a/paddle/phi/core/kernel_factory.cc b/paddle/phi/core/kernel_factory.cc index a1ce90c2c78aeb98396df90f41808eba7fdc863a..d3fd2e0204e54f1cbaed8049ac83abcad7efed7a 100644 --- a/paddle/phi/core/kernel_factory.cc +++ b/paddle/phi/core/kernel_factory.cc @@ -19,6 +19,8 @@ namespace phi { +const static Kernel empty_kernel; // NOLINT + uint32_t KernelKey::Hash::operator()(const KernelKey& key) const { uint32_t hash_value = 0; // |----31-20------|---19-12---|---11-8----|---7-0---| @@ -37,15 +39,15 @@ KernelFactory& KernelFactory::Instance() { return g_op_kernel_factory; } -Kernel KernelFactory::SelectKernel(const std::string& kernel_name, - const KernelKey& kernel_key) const { +const Kernel& KernelFactory::SelectKernel(const std::string& kernel_name, + const KernelKey& kernel_key) const { auto iter = kernels_.find(kernel_name); if (iter == kernels_.end()) { - return Kernel(); + return empty_kernel; } auto kernel_iter = iter->second.find(kernel_key); if (kernel_iter == iter->second.end()) { - return Kernel(); + return empty_kernel; } return kernel_iter->second; } @@ -59,8 +61,8 @@ KernelKeyMap KernelFactory::SelectKernelMap( return iter->second; } -bool KernelFactory::IsSelectKernelValid(const std::string& kernel_name, - const KernelKey& kernel_key) const { +bool KernelFactory::HasKernel(const std::string& kernel_name, + const KernelKey& kernel_key) const { auto iter = kernels_.find(kernel_name); PADDLE_ENFORCE_NE( iter, @@ -128,6 +130,16 @@ const Kernel& KernelFactory::SelectKernelOrThrowError( KernelKey(backend, layout, dtype)); } +const KernelArgsDef& KernelFactory::GetFirstKernelArgsDef( + const std::string& kernel_name) const { + auto iter = kernels_.find(kernel_name); + PADDLE_ENFORCE_NE( + iter, + kernels_.end(), + phi::errors::NotFound("The kernel `%s` is not registered.", kernel_name)); + return iter->second.cbegin()->second.args_def(); +} + // print kernel info with json format: // { // "(CPU, Undefined(AnyLayout), complex64)": { diff --git a/paddle/phi/core/kernel_factory.h b/paddle/phi/core/kernel_factory.h index 8fd25b691bdeb419f53413b87fdf3c5bc2edf543..812b6222cb5e293ffdaa1051462dcf945052fef9 100644 --- a/paddle/phi/core/kernel_factory.h +++ b/paddle/phi/core/kernel_factory.h @@ -151,30 +151,38 @@ class KernelArgsDef { attribute_defs_.emplace_back(AttributeArgDef(type_index)); } - const paddle::SmallVector& input_defs() const { + const paddle::SmallVector& input_defs() + const { return input_defs_; } - const paddle::SmallVector& output_defs() const { + const paddle::SmallVector& output_defs() + const { return output_defs_; } - const paddle::SmallVector& attribute_defs() const { + const paddle::SmallVector& + attribute_defs() const { return attribute_defs_; } - paddle::SmallVector& input_defs() { return input_defs_; } + paddle::SmallVector& input_defs() { + return input_defs_; + } - paddle::SmallVector& output_defs() { return output_defs_; } + paddle::SmallVector& output_defs() { + return output_defs_; + } - paddle::SmallVector& attribute_defs() { + paddle::SmallVector& attribute_defs() { return attribute_defs_; } private: - paddle::SmallVector input_defs_{{}}; - paddle::SmallVector output_defs_{{}}; - paddle::SmallVector attribute_defs_{{}}; + paddle::SmallVector input_defs_{{}}; + paddle::SmallVector output_defs_{{}}; + paddle::SmallVector attribute_defs_{ + {}}; }; class Kernel { @@ -209,7 +217,7 @@ class Kernel { TensorArgDef& OutputAt(size_t idx) { return args_def_.output_defs().at(idx); } - bool IsValid() { return fn_ != nullptr; } + bool IsValid() const { return fn_ != nullptr; } private: KernelFn fn_{nullptr}; @@ -246,14 +254,17 @@ class KernelFactory { DataLayout layout, DataType dtype) const; - bool IsSelectKernelValid(const std::string& kernel_name, - const KernelKey& kernel_key) const; + bool HasKernel(const std::string& kernel_name, + const KernelKey& kernel_key) const; - Kernel SelectKernel(const std::string& kernel_name, - const KernelKey& kernel_key) const; + const Kernel& SelectKernel(const std::string& kernel_name, + const KernelKey& kernel_key) const; KernelKeyMap SelectKernelMap(const std::string& kernel_name) const; + const KernelArgsDef& GetFirstKernelArgsDef( + const std::string& kernel_name) const; + private: KernelFactory() = default; diff --git a/paddle/phi/core/meta_tensor.cc b/paddle/phi/core/meta_tensor.cc index 04dfbf96031c2eb4fb021dbbd985f0df42a6fe34..2178855aa0fee147da59995b9b67b9423292793e 100644 --- a/paddle/phi/core/meta_tensor.cc +++ b/paddle/phi/core/meta_tensor.cc @@ -148,4 +148,6 @@ void MetaTensor::share_dims(const MetaTensor& meta_tensor) { } } +bool MetaTensor::initialized() const { return tensor_ != nullptr; } + } // namespace phi diff --git a/paddle/phi/core/meta_tensor.h b/paddle/phi/core/meta_tensor.h index 10c3a7c1a3de376d21805a12ff0b2c98ab4fcbd3..3cdbfda61d69c0bc3007a2f6a1128b60e9c8b87d 100644 --- a/paddle/phi/core/meta_tensor.h +++ b/paddle/phi/core/meta_tensor.h @@ -45,10 +45,10 @@ class MetaTensor { : tensor_(const_cast(&tensor)) {} MetaTensor(TensorBase& tensor) : tensor_(&tensor) {} // NOLINT - MetaTensor(const MetaTensor&) = default; MetaTensor(MetaTensor&&) = default; - MetaTensor& operator=(const MetaTensor&) = delete; - MetaTensor& operator=(MetaTensor&&) = delete; + MetaTensor& operator=(MetaTensor&&) = default; + MetaTensor(const MetaTensor&) = default; + MetaTensor& operator=(const MetaTensor&) = default; virtual ~MetaTensor() = default; @@ -64,6 +64,8 @@ class MetaTensor { virtual void share_meta(const MetaTensor& meta_tensor); virtual void share_dims(const MetaTensor& meta_tensor); + virtual bool initialized() const; + private: // Because the lod in compiletime and runtime is different, // so `LoD` cannot in public methods diff --git a/paddle/phi/core/type_defs.h b/paddle/phi/core/type_defs.h index 3c879267bb8444fc0448194bc7718b482e98f77e..a1e7836088389561bd3f5e1ed2bf36abbdb33caa 100644 --- a/paddle/phi/core/type_defs.h +++ b/paddle/phi/core/type_defs.h @@ -22,7 +22,7 @@ class Kernel; class KernelKey; class KernelArgsDef; class KernelContext; -class KernelSignature; +struct KernelSignature; class ArgumentMappingContext; class InferMetaContext; @@ -35,4 +35,9 @@ using ArgumentMappingFn = std::function; using InferMetaFn = void (*)(InferMetaContext* ctx); +// Global SmallVector size setting +constexpr size_t kInputSmallVectorSize = 10U; +constexpr size_t kAttrSmallVectorSize = 10U; +constexpr size_t kOutputSmallVectorSize = 5U; + } // namespace phi diff --git a/paddle/phi/infermeta/backward.cc b/paddle/phi/infermeta/backward.cc index 84db67978fc23ca0b8e49b5cab0fa8207393a0f0..567f39a915c02f2a8e7a6b4d33f2bc43fecdc360 100644 --- a/paddle/phi/infermeta/backward.cc +++ b/paddle/phi/infermeta/backward.cc @@ -315,8 +315,8 @@ void MaxPoolWithIndexGradInferMeta(const MetaTensor& x, dx->share_meta(x); } -void MeshgridGradInferMeta(const std::vector& inputs, - const std::vector& outputs_grad, +void MeshgridGradInferMeta(const std::vector& inputs, + const std::vector& outputs_grad, std::vector inputs_grad) { PADDLE_ENFORCE_GT(outputs_grad.size(), 1, @@ -329,7 +329,7 @@ void MeshgridGradInferMeta(const std::vector& inputs, } } -void MultiDotGradInferMeta(const std::vector& x, +void MultiDotGradInferMeta(const std::vector& x, const MetaTensor& out_grad, std::vector x_grad) { PADDLE_ENFORCE_EQ( diff --git a/paddle/phi/infermeta/backward.h b/paddle/phi/infermeta/backward.h index c51708bb5439488c63797b98927ad83d10f842f6..6807438ebbb75350f9d03a92a62d019d2c4e8733 100644 --- a/paddle/phi/infermeta/backward.h +++ b/paddle/phi/infermeta/backward.h @@ -151,11 +151,11 @@ void MaxPoolWithIndexGradInferMeta(const MetaTensor& x, bool adaptive, MetaTensor* dx); -void MeshgridGradInferMeta(const std::vector& inputs, - const std::vector& outputs_grad, +void MeshgridGradInferMeta(const std::vector& inputs, + const std::vector& outputs_grad, std::vector inputs_grad); -void MultiDotGradInferMeta(const std::vector& x, +void MultiDotGradInferMeta(const std::vector& x, const MetaTensor& out_grad, std::vector x_grad); diff --git a/paddle/phi/infermeta/multiary.cc b/paddle/phi/infermeta/multiary.cc index 3ce24139fe18aba96e3b8c0b96bafec77a237d7d..152e04b74b0a9cf5fab013125a448d7395d0a25a 100644 --- a/paddle/phi/infermeta/multiary.cc +++ b/paddle/phi/infermeta/multiary.cc @@ -21,7 +21,8 @@ limitations under the License. */ #include "paddle/phi/kernels/funcs/concat_funcs.h" namespace phi { -std::vector GetMetaTensorsDim(const std::vector& tensors) { +std::vector GetMetaTensorsDim( + const std::vector& tensors) { std::vector dims; dims.reserve(tensors.size()); for (const MetaTensor* tensor : tensors) { @@ -148,7 +149,7 @@ void AdamaxInferMeta(const MetaTensor& param, inf_norm_out->set_dtype(inf_norm.dtype()); } -void AddNInferMeta(const std::vector& x, +void AddNInferMeta(const std::vector& x, MetaTensor* out, MetaConfig config) { auto N = x.size(); @@ -511,7 +512,7 @@ void BilinearTensorProductInferMeta(const MetaTensor& x, out->set_dtype(x.dtype()); } -void BroadcastTensorsInferMeta(const std::vector& x, +void BroadcastTensorsInferMeta(const std::vector& x, std::vector out) { int target_rank = 0; const auto& input_dims = GetMetaTensorsDim(x); @@ -565,7 +566,7 @@ void BroadcastTensorsInferMeta(const std::vector& x, } } -void ConcatInferMeta(const std::vector& x, +void ConcatInferMeta(const std::vector& x, const Scalar& axis_scalar, MetaTensor* out, MetaConfig config) { @@ -1357,7 +1358,7 @@ void InterpolateInferMeta( } } -void MeshgridInferMeta(const std::vector& inputs, +void MeshgridInferMeta(const std::vector& inputs, std::vector outputs) { const size_t inputs_num = inputs.size(); @@ -1420,7 +1421,8 @@ void MomentumInferMeta(const MetaTensor& param, } } -void MultiDotInferMeta(const std::vector& x, MetaTensor* out) { +void MultiDotInferMeta(const std::vector& x, + MetaTensor* out) { auto inputs_dims = GetMetaTensorsDim(x); const size_t inputs_num = inputs_dims.size(); @@ -1493,7 +1495,7 @@ void MultiDotInferMeta(const std::vector& x, MetaTensor* out) { out->share_lod(*x.at(0)); } -void MultiplexInferMeta(const std::vector& ins, +void MultiplexInferMeta(const std::vector& ins, const MetaTensor& ids, MetaTensor* out) { PADDLE_ENFORCE_NE( @@ -1672,8 +1674,8 @@ void RmspropInferMeta(const MetaTensor& param, } void RnnInferMeta(const MetaTensor& x, - const std::vector& pre_state, - const std::vector& weight_list, + const std::vector& pre_state, + const std::vector& weight_list, paddle::optional sequence_length, float dropout_prob, bool is_bidirec, @@ -1779,7 +1781,7 @@ void SGDInferMeta(const MetaTensor& param, param_out->set_dtype(param.dtype()); } -void StackInferMeta(const std::vector& x, +void StackInferMeta(const std::vector& x, int axis, MetaTensor* out) { PADDLE_ENFORCE_GT(x.size(), @@ -1825,7 +1827,7 @@ void StackInferMeta(const std::vector& x, out->share_lod(*x.at(0)); } -void UnchangedMultiInferMeta(const std::vector& x, +void UnchangedMultiInferMeta(const std::vector& x, std::vector out) { for (size_t i = 0; i < x.size(); ++i) { out[i]->share_meta(*x[i]); diff --git a/paddle/phi/infermeta/multiary.h b/paddle/phi/infermeta/multiary.h index 7db4480ffb2e69024bfa39d7d88003e2da775de3..bf3e1d8af6e6f0d6d58c4482c449b756ff347bca 100644 --- a/paddle/phi/infermeta/multiary.h +++ b/paddle/phi/infermeta/multiary.h @@ -35,7 +35,8 @@ namespace phi { // // NOTE: The InferMeta Functions in this file are arranged in alphabetic order. -std::vector GetMetaTensorsDim(const std::vector& tensors); +std::vector GetMetaTensorsDim( + const std::vector& tensors); void AdadeltaInferMeta(const MetaTensor& param, const MetaTensor& grad, @@ -68,7 +69,7 @@ void AdamaxInferMeta(const MetaTensor& param, MetaTensor* moment_out, MetaTensor* inf_norm_out); -void AddNInferMeta(const std::vector& x, +void AddNInferMeta(const std::vector& x, MetaTensor* out, MetaConfig config = MetaConfig()); @@ -124,10 +125,10 @@ void BilinearTensorProductInferMeta(const MetaTensor& x, MetaTensor* out, MetaConfig config = MetaConfig()); -void BroadcastTensorsInferMeta(const std::vector& x, +void BroadcastTensorsInferMeta(const std::vector& x, std::vector out); -void ConcatInferMeta(const std::vector& x, +void ConcatInferMeta(const std::vector& x, const Scalar& axis_scalar, MetaTensor* out, MetaConfig config = MetaConfig()); @@ -178,7 +179,7 @@ void InterpolateInferMeta( MetaTensor* output, MetaConfig config = MetaConfig()); -void MeshgridInferMeta(const std::vector& inputs, +void MeshgridInferMeta(const std::vector& inputs, std::vector outputs); void MomentumInferMeta(const MetaTensor& param, @@ -196,9 +197,10 @@ void MomentumInferMeta(const MetaTensor& param, MetaTensor* velocity_out, MetaTensor* master_param_out); -void MultiDotInferMeta(const std::vector& x, MetaTensor* out); +void MultiDotInferMeta(const std::vector& x, + MetaTensor* out); -void MultiplexInferMeta(const std::vector& ins, +void MultiplexInferMeta(const std::vector& ins, const MetaTensor& ids, MetaTensor* out); @@ -227,8 +229,8 @@ void RmspropInferMeta(const MetaTensor& param, MetaTensor* mean_grad_out); void RnnInferMeta(const MetaTensor& x, - const std::vector& pre_state, - const std::vector& weight_list, + const std::vector& pre_state, + const std::vector& weight_list, paddle::optional sequence_length, float dropout_prob, bool is_bidirec, @@ -251,11 +253,11 @@ void SGDInferMeta(const MetaTensor& param, MetaTensor* param_out, MetaTensor* master_param_out); -void StackInferMeta(const std::vector& x, +void StackInferMeta(const std::vector& x, int axis, MetaTensor* out); -void UnchangedMultiInferMeta(const std::vector& x, +void UnchangedMultiInferMeta(const std::vector& x, std::vector out); void WarpctcInferMeta(const MetaTensor& logits, diff --git a/paddle/phi/kernels/concat_kernel.h b/paddle/phi/kernels/concat_kernel.h index cf83ab9aaabe135573a2887a01166f4a7bd0d5e1..f5ac2d3cbb75e9d175d7fad342723a5aa1db1dea 100644 --- a/paddle/phi/kernels/concat_kernel.h +++ b/paddle/phi/kernels/concat_kernel.h @@ -32,7 +32,7 @@ DenseTensor Concat(const Context& dev_ctx, const Scalar& axis) { std::vector meta_x; meta_x.reserve(x.size()); - std::vector meta_x_ptr; + std::vector meta_x_ptr; for (const auto* t : x) { meta_x.emplace_back(*t); meta_x_ptr.push_back(&meta_x.back()); diff --git a/paddle/phi/ops/compat/abs_sig.cc b/paddle/phi/ops/compat/abs_sig.cc index b4b94457e6be9f15ffbecad64cd9189c3e2c3b08..92d29dd0189b59699b3b847bacf4a0b8c8e3e4d8 100644 --- a/paddle/phi/ops/compat/abs_sig.cc +++ b/paddle/phi/ops/compat/abs_sig.cc @@ -21,8 +21,7 @@ KernelSignature AbsOpArgumentMapping(const ArgumentMappingContext& ctx) { } KernelSignature AbsGradOpArgumentMapping(const ArgumentMappingContext& ctx) { - return KernelSignature( - "abs_grad", {"X", GradVarName("Out")}, {}, {GradVarName("X")}); + return KernelSignature("abs_grad", {"X", "Out@GRAD"}, {}, {"X@GRAD"}); } KernelSignature AbsDoubleGradOpArgumentMapping( diff --git a/paddle/phi/ops/compat/activation_sig.cc b/paddle/phi/ops/compat/activation_sig.cc index 8add832c366cfdc6bdf9e4cfdbe5b025afcf9b13..5900b49946623e1943132f0a38a82d0b25d76998 100644 --- a/paddle/phi/ops/compat/activation_sig.cc +++ b/paddle/phi/ops/compat/activation_sig.cc @@ -19,26 +19,22 @@ namespace phi { #define DEFINE_ACT_GRAD_DEPX_OP_ARGMAP(func_name, op_name, attrs) \ KernelSignature func_name##GradOpArgumentMapping( \ const ArgumentMappingContext& ctx) { \ - return KernelSignature(op_name "_grad", \ - {"X", GradVarName("Out")}, \ - {attrs}, \ - {GradVarName("X")}); \ + return KernelSignature( \ + op_name "_grad", {"X", "Out@GRAD"}, {attrs}, {"X@GRAD"}); \ } #define DEFINE_ACT_GRAD_DEPOUT_OP_ARGMAP(func_name, op_name, attrs) \ KernelSignature func_name##GradOpArgumentMapping( \ const ArgumentMappingContext& ctx) { \ - return KernelSignature(op_name "_grad", \ - {"Out", GradVarName("Out")}, \ - {attrs}, \ - {GradVarName("X")}); \ + return KernelSignature( \ + op_name "_grad", {"Out", "Out@GRAD"}, {attrs}, {"X@GRAD"}); \ } -#define DEFINE_ACT_GRAD_NODEP_OP_ARGMAP(func_name, op_name, attrs) \ - KernelSignature func_name##GradOpArgumentMapping( \ - const ArgumentMappingContext& ctx) { \ - return KernelSignature( \ - op_name "_grad", {GradVarName("Out")}, {attrs}, {GradVarName("X")}); \ +#define DEFINE_ACT_GRAD_NODEP_OP_ARGMAP(func_name, op_name, attrs) \ + KernelSignature func_name##GradOpArgumentMapping( \ + const ArgumentMappingContext& ctx) { \ + return KernelSignature( \ + op_name "_grad", {"Out@GRAD"}, {attrs}, {"X@GRAD"}); \ } #define comma , @@ -165,15 +161,12 @@ KernelSignature EluOpArgumentMapping(const ArgumentMappingContext& ctx) { } KernelSignature LogitGradOpArgumentMapping(const ArgumentMappingContext& ctx) { - return KernelSignature( - "logit_grad", {"X", GradVarName("Out")}, {"eps"}, {GradVarName("X")}); + return KernelSignature("logit_grad", {"X", "Out@GRAD"}, {"eps"}, {"X@GRAD"}); } KernelSignature EluGradOpArgumentMapping(const ArgumentMappingContext& ctx) { - return KernelSignature("elu_grad", - {"X", "Out", GradVarName("Out")}, - {"alpha"}, - {GradVarName("X")}); + return KernelSignature( + "elu_grad", {"X", "Out", "Out@GRAD"}, {"alpha"}, {"X@GRAD"}); } KernelSignature EluDoubleGradOpArgumentMapping( @@ -198,13 +191,11 @@ KernelSignature PowOpArgumentMapping(const ArgumentMappingContext& ctx) { KernelSignature PowGradOpArgumentMapping(const ArgumentMappingContext& ctx) { if (ctx.HasInput("FactorTensor")) { - return KernelSignature("pow_grad", - {"X", GradVarName("Out")}, - {"FactorTensor"}, - {GradVarName("X")}); + return KernelSignature( + "pow_grad", {"X", "Out@GRAD"}, {"FactorTensor"}, {"X@GRAD"}); } else { return KernelSignature( - "pow_grad", {"X", GradVarName("Out")}, {"factor"}, {GradVarName("X")}); + "pow_grad", {"X", "Out@GRAD"}, {"factor"}, {"X@GRAD"}); } } diff --git a/paddle/phi/ops/compat/addmm_sig.cc b/paddle/phi/ops/compat/addmm_sig.cc index b3bc0bb23a71e25aafe1c2e5038a60fdcf865a12..3919c875f56062b3991f866e38e659beccb162a2 100644 --- a/paddle/phi/ops/compat/addmm_sig.cc +++ b/paddle/phi/ops/compat/addmm_sig.cc @@ -17,11 +17,10 @@ namespace phi { KernelSignature AddmmGradOpArgumentMapping(const ArgumentMappingContext& ctx) { - return KernelSignature( - "addmm_grad", - {"Input", "X", "Y", GradVarName("Out")}, - {"Alpha", "Beta"}, - {GradVarName("Input"), GradVarName("X"), GradVarName("Y")}); + return KernelSignature("addmm_grad", + {"Input", "X", "Y", "Out@GRAD"}, + {"Alpha", "Beta"}, + {"Input@GRAD", "X@GRAD", "Y@GRAD"}); } } // namespace phi diff --git a/paddle/phi/ops/compat/argsort_sig.cc b/paddle/phi/ops/compat/argsort_sig.cc index 62133a441ff126af9c1d65cf7c8af6f6571d8b32..70531f16916dd7f373131a87e2869e82603f2d31 100644 --- a/paddle/phi/ops/compat/argsort_sig.cc +++ b/paddle/phi/ops/compat/argsort_sig.cc @@ -19,9 +19,9 @@ namespace phi { KernelSignature ArgsortGradOpArgumentMapping( const ArgumentMappingContext& ctx) { return KernelSignature("argsort_grad", - {"Indices", "X", GradVarName("Out")}, + {"Indices", "X", "Out@GRAD"}, {"axis", "descending"}, - {GradVarName("X")}); + {"X@GRAD"}); } } // namespace phi diff --git a/paddle/phi/ops/compat/atan2_sig.cc b/paddle/phi/ops/compat/atan2_sig.cc index 8a6049e67b668e4cd97e928414bbca10bf29c0c4..9fef8560df979853fb61033ab15fd730bc4c74d3 100644 --- a/paddle/phi/ops/compat/atan2_sig.cc +++ b/paddle/phi/ops/compat/atan2_sig.cc @@ -17,10 +17,8 @@ namespace phi { KernelSignature Atan2GradOpArgumentMapping(const ArgumentMappingContext& ctx) { - return KernelSignature("atan2_grad", - {"X1", "X2", GradVarName("Out")}, - {}, - {GradVarName("X1"), GradVarName("X2")}); + return KernelSignature( + "atan2_grad", {"X1", "X2", "Out@GRAD"}, {}, {"X1@GRAD", "X2@GRAD"}); } } // namespace phi diff --git a/paddle/phi/ops/compat/batch_norm_sig.cc b/paddle/phi/ops/compat/batch_norm_sig.cc index cfd9f4def933a70ef8e75750a8663a828df767b0..14affe60b9d55e5797f5be7d861ae36543201ba7 100644 --- a/paddle/phi/ops/compat/batch_norm_sig.cc +++ b/paddle/phi/ops/compat/batch_norm_sig.cc @@ -57,27 +57,26 @@ KernelSignature BatchNormOpArgumentMapping(const ArgumentMappingContext& ctx) { KernelSignature BatchNormGradOpArgumentMapping( const ArgumentMappingContext& ctx) { - return KernelSignature( - "batch_norm_grad", - { - "X", - "Scale", - "Bias", - "Mean", - "Variance", - "SavedMean", - "SavedVariance", - "ReserveSpace", - GradVarName("Y"), - }, - {"momentum", - "epsilon", - "data_layout", - "is_test", - "use_global_stats", - "trainable_statistics", - "fuse_with_relu"}, - {GradVarName("X"), GradVarName("Scale"), GradVarName("Bias")}); + return KernelSignature("batch_norm_grad", + { + "X", + "Scale", + "Bias", + "Mean", + "Variance", + "SavedMean", + "SavedVariance", + "ReserveSpace", + "Y@GRAD", + }, + {"momentum", + "epsilon", + "data_layout", + "is_test", + "use_global_stats", + "trainable_statistics", + "fuse_with_relu"}, + {"X@GRAD", "Scale@GRAD", "Bias@GRAD"}); } KernelSignature BatchNormGradGradOpArgumentMapping( diff --git a/paddle/phi/ops/compat/bce_loss_sig.cc b/paddle/phi/ops/compat/bce_loss_sig.cc index 17f76067d22db57970d86165c9a1a204a3c34bda..5575fa277eb7feffd771fbc6e5dc931d1fc5e487 100644 --- a/paddle/phi/ops/compat/bce_loss_sig.cc +++ b/paddle/phi/ops/compat/bce_loss_sig.cc @@ -18,10 +18,8 @@ namespace phi { KernelSignature BCELossGradOpArgumentMapping( const ArgumentMappingContext& ctx) { - return KernelSignature("bce_loss_grad", - {"X", "Label", GradVarName("Out")}, - {}, - {GradVarName("X")}); + return KernelSignature( + "bce_loss_grad", {"X", "Label", "Out@GRAD"}, {}, {"X@GRAD"}); } } // namespace phi diff --git a/paddle/phi/ops/compat/bilinear_tensor_product_sig.cc b/paddle/phi/ops/compat/bilinear_tensor_product_sig.cc index 570bf7ce943d6de8693639bacf50c5883b2ec4e2..95a867fd3f7412eace8a9fd77c9179b4f339afb8 100644 --- a/paddle/phi/ops/compat/bilinear_tensor_product_sig.cc +++ b/paddle/phi/ops/compat/bilinear_tensor_product_sig.cc @@ -25,12 +25,9 @@ KernelSignature BilinearTensorProductOpArgumentMapping( KernelSignature BilinearTensorProductGradOpArgumentMapping( const ArgumentMappingContext& ctx) { return KernelSignature("bilinear_tensor_product_grad", - {"X", "Y", "Weight", GradVarName("Out")}, + {"X", "Y", "Weight", "Out@GRAD"}, {}, - {GradVarName("X"), - GradVarName("Y"), - GradVarName("Weight"), - GradVarName("Bias")}); + {"X@GRAD", "Y@GRAD", "Weight@GRAD", "Bias@GRAD"}); } } // namespace phi diff --git a/paddle/phi/ops/compat/broadcast_tensors_sig.cc b/paddle/phi/ops/compat/broadcast_tensors_sig.cc index 2c979c4aedcc88c3b6bc6664de9ae3175272eec6..d0fcbb33be2a752f16a1de99928935180cd9d3da 100644 --- a/paddle/phi/ops/compat/broadcast_tensors_sig.cc +++ b/paddle/phi/ops/compat/broadcast_tensors_sig.cc @@ -19,7 +19,7 @@ namespace phi { KernelSignature BroadcastTensorsGradOpArgumentMapping( const ArgumentMappingContext& ctx) { return KernelSignature( - "broadcast_tensors_grad", {GradVarName("Out")}, {}, {GradVarName("X")}); + "broadcast_tensors_grad", {"Out@GRAD"}, {}, {"X@GRAD"}); } } // namespace phi diff --git a/paddle/phi/ops/compat/cholesky_sig.cc b/paddle/phi/ops/compat/cholesky_sig.cc index 8c7ca75704669bf3af3c3b698deb8f61a6501693..9a26ea5c0c57bec5ed045b62dd1a1abedf1d5045 100644 --- a/paddle/phi/ops/compat/cholesky_sig.cc +++ b/paddle/phi/ops/compat/cholesky_sig.cc @@ -18,10 +18,8 @@ namespace phi { KernelSignature CholeskyGradOpArgumentMapping( const ArgumentMappingContext& ctx) { - return KernelSignature("cholesky_grad", - {"Out", GradVarName("Out")}, - {"upper"}, - {GradVarName("X")}); + return KernelSignature( + "cholesky_grad", {"Out", "Out@GRAD"}, {"upper"}, {"X@GRAD"}); } } // namespace phi diff --git a/paddle/phi/ops/compat/cholesky_solve_sig.cc b/paddle/phi/ops/compat/cholesky_solve_sig.cc index 6a9759f8352a0e073fcd282c6ec40f73adea9e7f..2696d80a49f43d7f09e2fd87b07715b02c08538b 100644 --- a/paddle/phi/ops/compat/cholesky_solve_sig.cc +++ b/paddle/phi/ops/compat/cholesky_solve_sig.cc @@ -19,9 +19,9 @@ namespace phi { KernelSignature CholeskySolveGradOpArgumentMapping( const ArgumentMappingContext& ctx) { return KernelSignature("cholesky_solve_grad", - {"X", "Y", "Out", GradVarName("Out")}, + {"X", "Y", "Out", "Out@GRAD"}, {"upper"}, - {GradVarName("X"), GradVarName("Y")}); + {"X@GRAD", "Y@GRAD"}); } } // namespace phi diff --git a/paddle/phi/ops/compat/clip_sig.cc b/paddle/phi/ops/compat/clip_sig.cc index 78fa6c36a51492d9339958621c95dac8457e97bd..25a34f2b9c89f82c2cc27128b0bf10dad6f7ea78 100644 --- a/paddle/phi/ops/compat/clip_sig.cc +++ b/paddle/phi/ops/compat/clip_sig.cc @@ -18,7 +18,7 @@ namespace phi { KernelSignature ClipOpArgumentMapping(const ArgumentMappingContext& ctx) { - paddle::SmallVector attr_names; + paddle::SmallVector attr_names; attr_names.emplace_back(ctx.HasInput("Min") ? "Min" : "min"); attr_names.emplace_back(ctx.HasInput("Max") ? "Max" : "max"); if (ctx.IsDenseTensorInput("X")) { @@ -57,27 +57,19 @@ KernelSignature ClipOpArgumentMapping(const ArgumentMappingContext& ctx) { KernelSignature ClipGradOpArgumentMapping(const ArgumentMappingContext& ctx) { if (ctx.HasInput("Min")) { if (ctx.HasInput("Max")) { - return KernelSignature("clip_grad", - {"X", GradVarName("Out")}, - {"Min", "Max"}, - {GradVarName("X")}); + return KernelSignature( + "clip_grad", {"X", "Out@GRAD"}, {"Min", "Max"}, {"X@GRAD"}); } else { - return KernelSignature("clip_grad", - {"X", GradVarName("Out")}, - {"Min", "max"}, - {GradVarName("X")}); + return KernelSignature( + "clip_grad", {"X", "Out@GRAD"}, {"Min", "max"}, {"X@GRAD"}); } } else { if (ctx.HasInput("Max")) { - return KernelSignature("clip_grad", - {"X", GradVarName("Out")}, - {"min", "Max"}, - {GradVarName("X")}); + return KernelSignature( + "clip_grad", {"X", "Out@GRAD"}, {"min", "Max"}, {"X@GRAD"}); } else { - return KernelSignature("clip_grad", - {"X", GradVarName("Out")}, - {"min", "max"}, - {GradVarName("X")}); + return KernelSignature( + "clip_grad", {"X", "Out@GRAD"}, {"min", "max"}, {"X@GRAD"}); } } } diff --git a/paddle/phi/ops/compat/complex_sig.cc b/paddle/phi/ops/compat/complex_sig.cc index b9f59c97fb50f1b5baaf907f3308f94e7e624424..88156677d34df82dd7fd68a910efbf9a8ac459d3 100644 --- a/paddle/phi/ops/compat/complex_sig.cc +++ b/paddle/phi/ops/compat/complex_sig.cc @@ -17,13 +17,11 @@ namespace phi { KernelSignature RealGradOpArgumentMapping(const ArgumentMappingContext& ctx) { - return KernelSignature( - "real_grad", {GradVarName("Out")}, {}, {GradVarName("X")}); + return KernelSignature("real_grad", {"Out@GRAD"}, {}, {"X@GRAD"}); } KernelSignature ImagGradOpArgumentMapping(const ArgumentMappingContext& ctx) { - return KernelSignature( - "imag_grad", {GradVarName("Out")}, {}, {GradVarName("X")}); + return KernelSignature("imag_grad", {"Out@GRAD"}, {}, {"X@GRAD"}); } } // namespace phi diff --git a/paddle/phi/ops/compat/concat_sig.cc b/paddle/phi/ops/compat/concat_sig.cc index d443f521c6146bfd21c57c73bd11eb0f2eec85cf..d53bb5793bc3a458aacfd633794a6a878e53f30e 100644 --- a/paddle/phi/ops/compat/concat_sig.cc +++ b/paddle/phi/ops/compat/concat_sig.cc @@ -25,15 +25,11 @@ KernelSignature ConcatOpArgumentMapping(const ArgumentMappingContext& ctx) { KernelSignature ConcatGradOpArgumentMapping(const ArgumentMappingContext& ctx) { if (ctx.HasInput("AxisTensor")) { - return KernelSignature("concat_grad", - {"X", {GradVarName("Out")}}, - {"AxisTensor"}, - {{GradVarName("X")}}); + return KernelSignature( + "concat_grad", {"X", {"Out@GRAD"}}, {"AxisTensor"}, {{"X@GRAD"}}); } - return KernelSignature("concat_grad", - {"X", {GradVarName("Out")}}, - {"axis"}, - {{GradVarName("X")}}); + return KernelSignature( + "concat_grad", {"X", {"Out@GRAD"}}, {"axis"}, {{"X@GRAD"}}); } } // namespace phi diff --git a/paddle/phi/ops/compat/conv2d_sig.cc b/paddle/phi/ops/compat/conv2d_sig.cc index 7cc0d6ad17535e5b4a62a1d0e5384d6c98cb3ca6..617c6e289bf2b21df624654026c0bede089b6d6c 100644 --- a/paddle/phi/ops/compat/conv2d_sig.cc +++ b/paddle/phi/ops/compat/conv2d_sig.cc @@ -46,7 +46,7 @@ KernelSignature Conv2dOpArgumentMapping(const ArgumentMappingContext& ctx) { KernelSignature Conv2dGradOpArgumentMapping(const ArgumentMappingContext& ctx) { return KernelSignature("conv2d_grad", - {"Input", "Filter", GradVarName("Output")}, + {"Input", "Filter", "Output@GRAD"}, {"strides", "paddings", "padding_algorithm", @@ -56,7 +56,7 @@ KernelSignature Conv2dGradOpArgumentMapping(const ArgumentMappingContext& ctx) { "use_addto", "workspace_size_MB", "exhaustive_search"}, - {GradVarName("Input"), GradVarName("Filter")}); + {"Input@GRAD", "Filter@GRAD"}); } KernelSignature Conv2dDoubleGradOpArgumentMapping( diff --git a/paddle/phi/ops/compat/conv3d_sig.cc b/paddle/phi/ops/compat/conv3d_sig.cc index b24c08b60c9507e2af9f3d423f9f44c0a39bd5dc..c6aae1bf5bb54e5557e1d925fa6b7c4aa5d1e500 100644 --- a/paddle/phi/ops/compat/conv3d_sig.cc +++ b/paddle/phi/ops/compat/conv3d_sig.cc @@ -33,7 +33,7 @@ KernelSignature Conv3dOpArgumentMapping(const ArgumentMappingContext& ctx) { KernelSignature Conv3dGradOpArgumentMapping(const ArgumentMappingContext& ctx) { return KernelSignature("conv2d_grad", - {"Input", "Filter", GradVarName("Output")}, + {"Input", "Filter", "Output@GRAD"}, {"strides", "paddings", "padding_algorithm", @@ -43,7 +43,7 @@ KernelSignature Conv3dGradOpArgumentMapping(const ArgumentMappingContext& ctx) { "use_addto", "workspace_size_MB", "exhaustive_search"}, - {GradVarName("Input"), GradVarName("Filter")}); + {"Input@GRAD", "Filter@GRAD"}); } KernelSignature Conv3dDoubleGradOpArgumentMapping( diff --git a/paddle/phi/ops/compat/conv_transpose_sig.cc b/paddle/phi/ops/compat/conv_transpose_sig.cc index 8697168b8274736ef0eb2db58135283928d3611c..a040bce6f78ee8581a2e5cd990883af97a303925 100644 --- a/paddle/phi/ops/compat/conv_transpose_sig.cc +++ b/paddle/phi/ops/compat/conv_transpose_sig.cc @@ -34,7 +34,7 @@ KernelSignature Conv2dTransposeOpArgumentMapping( KernelSignature Conv2dTransposeGradOpArgumentMapping( const ArgumentMappingContext& ctx) { return KernelSignature("conv2d_transpose_grad", - {"Input", "Filter", GradVarName("Output")}, + {"Input", "Filter", "Output@GRAD"}, {"strides", "paddings", "output_padding", @@ -43,7 +43,7 @@ KernelSignature Conv2dTransposeGradOpArgumentMapping( "groups", "dilations", "data_format"}, - {GradVarName("Input"), GradVarName("Filter")}); + {"Input@GRAD", "Filter@GRAD"}); } KernelSignature Conv2dTransposeDoubleGradOpArgumentMapping( @@ -79,7 +79,7 @@ KernelSignature Conv3dTransposeOpArgumentMapping( KernelSignature Conv3dTransposeGradOpArgumentMapping( const ArgumentMappingContext& ctx) { return KernelSignature("conv3d_transpose_grad", - {"Input", "Filter", GradVarName("Output")}, + {"Input", "Filter", "Output@GRAD"}, {"strides", "paddings", "output_padding", @@ -88,7 +88,7 @@ KernelSignature Conv3dTransposeGradOpArgumentMapping( "groups", "dilations", "data_format"}, - {GradVarName("Input"), GradVarName("Filter")}); + {"Input@GRAD", "Filter@GRAD"}); } KernelSignature DepthwiseConv2dTransposeOpArgumentMapping( @@ -109,7 +109,7 @@ KernelSignature DepthwiseConv2dTransposeOpArgumentMapping( KernelSignature DepthwiseConv2dTransposeGradOpArgumentMapping( const ArgumentMappingContext& ctx) { return KernelSignature("depthwise_conv2d_transpose_grad", - {"Input", "Filter", GradVarName("Output")}, + {"Input", "Filter", "Output@GRAD"}, {"strides", "paddings", "output_padding", @@ -118,7 +118,7 @@ KernelSignature DepthwiseConv2dTransposeGradOpArgumentMapping( "groups", "dilations", "data_format"}, - {GradVarName("Input"), GradVarName("Filter")}); + {"Input@GRAD", "Filter@GRAD"}); } } // namespace phi diff --git a/paddle/phi/ops/compat/cross_sig.cc b/paddle/phi/ops/compat/cross_sig.cc index 307c2ac5164b5daf24ae95ce3e6de53d9a7bfad0..2a8a46678cd280bf76a8118c8c06c1c6104b8733 100644 --- a/paddle/phi/ops/compat/cross_sig.cc +++ b/paddle/phi/ops/compat/cross_sig.cc @@ -21,10 +21,8 @@ KernelSignature CrossOpArgumentMapping(const ArgumentMappingContext& ctx) { } KernelSignature CrossGradOpArgumentMapping(const ArgumentMappingContext& ctx) { - return KernelSignature("cross_grad", - {"X", "Y", GradVarName("Out")}, - {"dim"}, - {GradVarName("X"), GradVarName("Y")}); + return KernelSignature( + "cross_grad", {"X", "Y", "Out@GRAD"}, {"dim"}, {"X@GRAD", "Y@GRAD"}); } } // namespace phi diff --git a/paddle/phi/ops/compat/cumprod_sig.cc b/paddle/phi/ops/compat/cumprod_sig.cc index 01084e764ed9e41ffb1e67cda26051f5a61fdeeb..ffe0ba75bb9df2472746874b8e2c5829cae15311 100644 --- a/paddle/phi/ops/compat/cumprod_sig.cc +++ b/paddle/phi/ops/compat/cumprod_sig.cc @@ -18,10 +18,8 @@ namespace phi { KernelSignature CumprodGradGradOpArgumentMapping( const ArgumentMappingContext& ctx) { - return KernelSignature("cumprod_grad", - {"X", "Out", GradVarName("Out")}, - {"dim"}, - {GradVarName("X")}); + return KernelSignature( + "cumprod_grad", {"X", "Out", "Out@GRAD"}, {"dim"}, {"X@GRAD"}); } } // namespace phi diff --git a/paddle/phi/ops/compat/deformable_conv_sig.cc b/paddle/phi/ops/compat/deformable_conv_sig.cc index a84a0840090873ea00b9384de7a5af80e0abd7d8..aa2537aa10e138554f36d40cd4fbfdeb8b8f6b45 100644 --- a/paddle/phi/ops/compat/deformable_conv_sig.cc +++ b/paddle/phi/ops/compat/deformable_conv_sig.cc @@ -33,17 +33,14 @@ KernelSignature DeformableConvGradOpArgumentMapping( const ArgumentMappingContext& ctx) { return KernelSignature( "deformable_conv_grad", - {"Input", "Offset", "Filter", "Mask", GradVarName("Output")}, + {"Input", "Offset", "Filter", "Mask", "Output@GRAD"}, {"strides", "paddings", "dilations", "deformable_groups", "groups", "im2col_step"}, - {GradVarName("Input"), - GradVarName("Offset"), - GradVarName("Filter"), - GradVarName("Mask")}); + {"Input@GRAD", "Offset@GRAD", "Filter@GRAD", "Mask@GRAD"}); } } // namespace phi diff --git a/paddle/phi/ops/compat/depthwise_conv2d_sig.cc b/paddle/phi/ops/compat/depthwise_conv2d_sig.cc index d2d7451ecafcecf84c6d345dfb5bde8cf38d4aac..1014d45e70a3fd4024c941fbfb55723ee7c3881a 100644 --- a/paddle/phi/ops/compat/depthwise_conv2d_sig.cc +++ b/paddle/phi/ops/compat/depthwise_conv2d_sig.cc @@ -36,7 +36,7 @@ KernelSignature DepthwiseConv2dOpArgumentMapping( KernelSignature DepthwiseConv2dGradOpArgumentMapping( const ArgumentMappingContext& ctx) { return KernelSignature("depthwise_conv2d_grad", - {"Input", "Filter", GradVarName("Output")}, + {"Input", "Filter", "Output@GRAD"}, {"strides", "paddings", "padding_algorithm", @@ -47,7 +47,7 @@ KernelSignature DepthwiseConv2dGradOpArgumentMapping( "workspace_size_MB", "exhaustive_search", "fuse_relu_before_depthwise_conv"}, - {GradVarName("Input"), GradVarName("Filter")}); + {"Input@GRAD", "Filter@GRAD"}); } KernelSignature DepthwiseConv2dDoubleGradOpArgumentMapping( diff --git a/paddle/phi/ops/compat/determinant_sig.cc b/paddle/phi/ops/compat/determinant_sig.cc index 7bcd30ec5d79b9e137c3dc3fa38f0498e9fe01de..ee1d53704c123f490f81aad6d6548a0a9fb85a4b 100644 --- a/paddle/phi/ops/compat/determinant_sig.cc +++ b/paddle/phi/ops/compat/determinant_sig.cc @@ -18,10 +18,8 @@ namespace phi { KernelSignature DeterminantGradOpArgumentMapping( const ArgumentMappingContext& ctx) { - return KernelSignature("determinant_grad", - {"Input", "Out", GradVarName("Out")}, - {}, - {GradVarName("Input")}); + return KernelSignature( + "determinant_grad", {"Input", "Out", "Out@GRAD"}, {}, {"Input@GRAD"}); } } // namespace phi diff --git a/paddle/phi/ops/compat/diag_sig.cc b/paddle/phi/ops/compat/diag_sig.cc index f3245b922c0d913a87b58f813bd0ca142ecb6287..b232c714c97100bbcc2e4d7633d6a4185360d15d 100644 --- a/paddle/phi/ops/compat/diag_sig.cc +++ b/paddle/phi/ops/compat/diag_sig.cc @@ -22,7 +22,7 @@ KernelSignature DiagOpArgumentMapping(const ArgumentMappingContext& ctx) { KernelSignature DiagGradOpArgumentMapping(const ArgumentMappingContext& ctx) { return KernelSignature( - "diag_grad", {"X", GradVarName("Out")}, {"offset"}, {GradVarName("X")}); + "diag_grad", {"X", "Out@GRAD"}, {"offset"}, {"X@GRAD"}); } } // namespace phi diff --git a/paddle/phi/ops/compat/diagonal_sig.cc b/paddle/phi/ops/compat/diagonal_sig.cc index b4a424ec06bf2b018de5a0aea4d268f669685fe9..94cecc3042a546f7ce21c4f5d68533b95f56c4d3 100644 --- a/paddle/phi/ops/compat/diagonal_sig.cc +++ b/paddle/phi/ops/compat/diagonal_sig.cc @@ -19,9 +19,9 @@ namespace phi { KernelSignature DiagonalGradOpArgumentMapping( const ArgumentMappingContext& ctx) { return KernelSignature("diagonal_grad", - {"Input", GradVarName("Out")}, + {"Input", "Out@GRAD"}, {"offset", "axis1", "axis2"}, - {GradVarName("Input")}); + {"Input@GRAD"}); } } // namespace phi diff --git a/paddle/phi/ops/compat/digamma_sig.cc b/paddle/phi/ops/compat/digamma_sig.cc index 12ef3056f1e680398e7ded901e72ed201d2f4a17..6c14dd9bf17449ef8aa38d3c92d96a5078acd267 100644 --- a/paddle/phi/ops/compat/digamma_sig.cc +++ b/paddle/phi/ops/compat/digamma_sig.cc @@ -18,8 +18,7 @@ namespace phi { KernelSignature DigammaGradOpArgumentMapping( const ArgumentMappingContext& ctx) { - return KernelSignature( - "digamma_grad", {"X", GradVarName("Out")}, {}, {GradVarName("X")}); + return KernelSignature("digamma_grad", {"X", "Out@GRAD"}, {}, {"X@GRAD"}); } } // namespace phi diff --git a/paddle/phi/ops/compat/dist_sig.cc b/paddle/phi/ops/compat/dist_sig.cc index 18a30b9b840483d7df3b3b009d079aea35a7d6bc..cc702fefbc940fc6c3bfb4949cba239e9f3b0f88 100644 --- a/paddle/phi/ops/compat/dist_sig.cc +++ b/paddle/phi/ops/compat/dist_sig.cc @@ -17,10 +17,8 @@ limitations under the License. */ namespace phi { KernelSignature DistGradOpArgumentMapping(const ArgumentMappingContext& ctx) { - return KernelSignature("dist_grad", - {"X", "Y", "Out", GradVarName("Out")}, - {"p"}, - {GradVarName("X"), GradVarName("Y")}); + return KernelSignature( + "dist_grad", {"X", "Y", "Out", "Out@GRAD"}, {"p"}, {"X@GRAD", "Y@GRAD"}); } } // namespace phi diff --git a/paddle/phi/ops/compat/dot_sig.cc b/paddle/phi/ops/compat/dot_sig.cc index 2437ecc1ca76720007f68ddb94439f03cb291a9a..2187a7eb4fca08c095ab3e6b7c58d5239abcce09 100644 --- a/paddle/phi/ops/compat/dot_sig.cc +++ b/paddle/phi/ops/compat/dot_sig.cc @@ -17,10 +17,8 @@ limitations under the License. */ namespace phi { KernelSignature DotGradOpArgumentMapping(const ArgumentMappingContext& ctx) { - return KernelSignature("dot_grad", - {"X", "Y", GradVarName("Out")}, - {}, - {GradVarName("X"), GradVarName("Y")}); + return KernelSignature( + "dot_grad", {"X", "Y", "Out@GRAD"}, {}, {"X@GRAD", "Y@GRAD"}); } } // namespace phi diff --git a/paddle/phi/ops/compat/dropout_sig.cc b/paddle/phi/ops/compat/dropout_sig.cc index 6bf229c98bd07face5a5ba7778318cf1662f29a9..712c5cbb0d634bef52deaa1a04105484d6d7aad7 100644 --- a/paddle/phi/ops/compat/dropout_sig.cc +++ b/paddle/phi/ops/compat/dropout_sig.cc @@ -27,9 +27,9 @@ KernelSignature DropoutOpArgumentMapping(const ArgumentMappingContext& ctx) { KernelSignature DropoutGradOpArgumentMapping( const ArgumentMappingContext& ctx) { return KernelSignature("dropout_grad", - {"Mask", GradVarName("Out")}, + {"Mask", "Out@GRAD"}, {"dropout_prob", "is_test", "dropout_implementation"}, - {GradVarName("X")}); + {"X@GRAD"}); } } // namespace phi diff --git a/paddle/phi/ops/compat/eigh_sig.cc b/paddle/phi/ops/compat/eigh_sig.cc index e50a9a5a12a56493b8b81a1eacdb12051a3c362f..58718b6e32c66309482007d9a479263add84c36a 100644 --- a/paddle/phi/ops/compat/eigh_sig.cc +++ b/paddle/phi/ops/compat/eigh_sig.cc @@ -17,13 +17,11 @@ namespace phi { KernelSignature EighGradOpArgumentMapping(const ArgumentMappingContext& ctx) { - return KernelSignature("eigh_grad", - {"Eigenvalues", - "Eigenvectors", - GradVarName("Eigenvalues"), - GradVarName("Eigenvectors")}, - {}, - {GradVarName("X")}); + return KernelSignature( + "eigh_grad", + {"Eigenvalues", "Eigenvectors", "Eigenvalues@GRAD", "Eigenvectors@GRAD"}, + {}, + {"X@GRAD"}); } } // namespace phi diff --git a/paddle/phi/ops/compat/elementwise_sig.cc b/paddle/phi/ops/compat/elementwise_sig.cc index 0a58d86b05b06be6da363b0b274c8efdaedfe06a..19110eb0e0ab8c2d5b291f49873ac3f4d8c906e6 100644 --- a/paddle/phi/ops/compat/elementwise_sig.cc +++ b/paddle/phi/ops/compat/elementwise_sig.cc @@ -106,10 +106,8 @@ KernelSignature ElementwisePowOpArgumentMapping( KernelSignature ElementwiseAddGradOpArgumentMapping( const ArgumentMappingContext& ctx) { - return KernelSignature("add_grad", - {"X", "Y", GradVarName("Out")}, - {"axis"}, - {GradVarName("X"), GradVarName("Y")}); + return KernelSignature( + "add_grad", {"X", "Y", "Out@GRAD"}, {"axis"}, {"X@GRAD", "Y@GRAD"}); } KernelSignature ElementwiseAddDoubleGradOpArgumentMapping( @@ -128,10 +126,8 @@ KernelSignature ElementwiseAddTripleGradOpArgumentMapping( KernelSignature ElementwiseSubGradOpArgumentMapping( const ArgumentMappingContext& ctx) { - return KernelSignature("subtract_grad", - {"X", "Y", GradVarName("Out")}, - {"axis"}, - {GradVarName("X"), GradVarName("Y")}); + return KernelSignature( + "subtract_grad", {"X", "Y", "Out@GRAD"}, {"axis"}, {"X@GRAD", "Y@GRAD"}); } KernelSignature ElementwiseSubDoubleGradOpArgumentMapping( @@ -143,17 +139,15 @@ KernelSignature ElementwiseSubDoubleGradOpArgumentMapping( KernelSignature ElementwiseDivGradOpArgumentMapping( const ArgumentMappingContext& ctx) { return KernelSignature("divide_grad", - {"X", "Y", "Out", GradVarName("Out")}, + {"X", "Y", "Out", "Out@GRAD"}, {"axis"}, - {GradVarName("X"), GradVarName("Y")}); + {"X@GRAD", "Y@GRAD"}); } KernelSignature ElementwiseFMinGradOpArgumentMapping( const ArgumentMappingContext& ctx) { - return KernelSignature("fmin_grad", - {"X", "Y", GradVarName("Out")}, - {"axis"}, - {GradVarName("X"), GradVarName("Y")}); + return KernelSignature( + "fmin_grad", {"X", "Y", "Out@GRAD"}, {"axis"}, {"X@GRAD", "Y@GRAD"}); } KernelSignature ElementwiseDivDoubleGradOpArgumentMapping( @@ -161,15 +155,13 @@ KernelSignature ElementwiseDivDoubleGradOpArgumentMapping( return KernelSignature("divide_double_grad", {"Y", "Out", "DX", "DDX", "DDY"}, {"axis"}, - {GradVarName("Y"), "DOut", "DDOut"}); + {"Y@GRAD", "DOut", "DDOut"}); } KernelSignature ElementwiseMulGradOpArgumentMapping( const ArgumentMappingContext& ctx) { - return KernelSignature("multiply_grad", - {"X", "Y", GradVarName("Out")}, - {"axis"}, - {GradVarName("X"), GradVarName("Y")}); + return KernelSignature( + "multiply_grad", {"X", "Y", "Out@GRAD"}, {"axis"}, {"X@GRAD", "Y@GRAD"}); } KernelSignature ElementwiseFMaxOpArgumentMapping( @@ -184,10 +176,8 @@ KernelSignature ElementwiseFMinOpArgumentMapping( KernelSignature ElementwiseFMaxGradOpArgumentMapping( const ArgumentMappingContext& ctx) { - return KernelSignature("fmax_grad", - {"X", "Y", GradVarName("Out")}, - {"axis"}, - {GradVarName("X"), GradVarName("Y")}); + return KernelSignature( + "fmax_grad", {"X", "Y", "Out@GRAD"}, {"axis"}, {"X@GRAD", "Y@GRAD"}); } KernelSignature ElementwiseMulDoubleGradOpArgumentMapping( @@ -195,7 +185,7 @@ KernelSignature ElementwiseMulDoubleGradOpArgumentMapping( return KernelSignature("multiply_double_grad", {"X", "Y", "DOut", "DDX", "DDY"}, {"axis"}, - {GradVarName("X"), GradVarName("Y"), "DDOut"}); + {"X@GRAD", "Y@GRAD", "DDOut"}); } KernelSignature ElementwiseMulTripleGradOpArgumentMapping( @@ -209,25 +199,21 @@ KernelSignature ElementwiseMulTripleGradOpArgumentMapping( KernelSignature ElementwiseMaxGradOpArgumentMapping( const ArgumentMappingContext& ctx) { - return KernelSignature("maximum_grad", - {"X", "Y", GradVarName("Out")}, - {"axis"}, - {GradVarName("X"), GradVarName("Y")}); + return KernelSignature( + "maximum_grad", {"X", "Y", "Out@GRAD"}, {"axis"}, {"X@GRAD", "Y@GRAD"}); } KernelSignature ElementwiseMinGradOpArgumentMapping( const ArgumentMappingContext& ctx) { - return KernelSignature("minimum_grad", - {"X", "Y", GradVarName("Out")}, - {"axis"}, - {GradVarName("X"), GradVarName("Y")}); + return KernelSignature( + "minimum_grad", {"X", "Y", "Out@GRAD"}, {"axis"}, {"X@GRAD", "Y@GRAD"}); } KernelSignature ElementwisePowGradOpArgumentMapping( const ArgumentMappingContext& ctx) { return KernelSignature("elementwise_pow_grad", - {"X", "Y", GradVarName("Out")}, + {"X", "Y", "Out@GRAD"}, {"axis"}, - {GradVarName("X"), GradVarName("Y")}); + {"X@GRAD", "Y@GRAD"}); } } // namespace phi diff --git a/paddle/phi/ops/compat/embedding_sig.cc b/paddle/phi/ops/compat/embedding_sig.cc index b79a381dcecc7943d0e82dbf122ece783cc33791..48debcafaf23564b7b841cda5effa5c597b057fa 100644 --- a/paddle/phi/ops/compat/embedding_sig.cc +++ b/paddle/phi/ops/compat/embedding_sig.cc @@ -30,26 +30,26 @@ KernelSignature EmbeddingGradOpArgumentMapping( if (ctx.IsDenseTensorInput("W")) { if ((paddle::any_cast(ctx.Attr("is_sparse"))) == true) { return KernelSignature("embedding_sparse_grad", - {"Ids", "W", GradVarName("Out")}, + {"Ids", "W", "Out@GRAD"}, {"padding_idx"}, - {GradVarName("W")}); + {"W@GRAD"}); } else { return KernelSignature("embedding_grad", - {"Ids", "W", GradVarName("Out")}, + {"Ids", "W", "Out@GRAD"}, {"padding_idx"}, - {GradVarName("W")}); + {"W@GRAD"}); } } else { if ((paddle::any_cast(ctx.Attr("is_sparse"))) == true) { return KernelSignature("sparse_weight_embedding_sparse_grad", - {"Ids", "W", GradVarName("Out")}, + {"Ids", "W", "Out@GRAD"}, {"padding_idx"}, - {GradVarName("W")}); + {"W@GRAD"}); } else { return KernelSignature("sparse_weight_embedding_grad", - {"Ids", "W", GradVarName("Out")}, + {"Ids", "W", "Out@GRAD"}, {"padding_idx"}, - {GradVarName("W")}); + {"W@GRAD"}); } } } diff --git a/paddle/phi/ops/compat/erf_sig.cc b/paddle/phi/ops/compat/erf_sig.cc index 784727a98042db820b4f83bac84014ebd0e1302e..6cd94e46c3ec3bcb8a805052c657626c25e9fc39 100644 --- a/paddle/phi/ops/compat/erf_sig.cc +++ b/paddle/phi/ops/compat/erf_sig.cc @@ -17,8 +17,7 @@ namespace phi { KernelSignature ErfGradOpArgumentMapping(const ArgumentMappingContext& ctx) { - return KernelSignature( - "erf_grad", {"X", GradVarName("Out")}, {}, {GradVarName("X")}); + return KernelSignature("erf_grad", {"X", "Out@GRAD"}, {}, {"X@GRAD"}); } } // namespace phi diff --git a/paddle/phi/ops/compat/erfinv_sig.cc b/paddle/phi/ops/compat/erfinv_sig.cc index 490573191533f506bce082b264a9cf0520125d67..37d30aaaeb685681910df6e7a3bd5c1f7c98d6cc 100644 --- a/paddle/phi/ops/compat/erfinv_sig.cc +++ b/paddle/phi/ops/compat/erfinv_sig.cc @@ -17,8 +17,7 @@ namespace phi { KernelSignature ErfinvGradOpArgumentMapping(const ArgumentMappingContext& ctx) { - return KernelSignature( - "erfinv_grad", {"Out", GradVarName("Out")}, {}, {GradVarName("X")}); + return KernelSignature("erfinv_grad", {"Out", "Out@GRAD"}, {}, {"X@GRAD"}); } } // namespace phi diff --git a/paddle/phi/ops/compat/expand_as_sig.cc b/paddle/phi/ops/compat/expand_as_sig.cc index a616b63c10b3c6ef9bca8c906655da99e8912244..03b308f4a8b1dbaaefe7068fc3c0feba58346ccf 100644 --- a/paddle/phi/ops/compat/expand_as_sig.cc +++ b/paddle/phi/ops/compat/expand_as_sig.cc @@ -22,10 +22,8 @@ KernelSignature ExpandAsOpArgumentMapping(const ArgumentMappingContext& ctx) { KernelSignature ExpandAsGradOpArgumentMapping( const ArgumentMappingContext& ctx) { - return KernelSignature("expand_as_grad", - {"X", GradVarName("Out")}, - {"target_shape"}, - {GradVarName("X")}); + return KernelSignature( + "expand_as_grad", {"X", "Out@GRAD"}, {"target_shape"}, {"X@GRAD"}); } } // namespace phi diff --git a/paddle/phi/ops/compat/expand_sig.cc b/paddle/phi/ops/compat/expand_sig.cc index 9b0a1f5ab7df4aad1eec5deafc4203a8b1116399..b0f4ff79b4c5c268b4d2aac840bcab9529a2e8e1 100644 --- a/paddle/phi/ops/compat/expand_sig.cc +++ b/paddle/phi/ops/compat/expand_sig.cc @@ -39,20 +39,14 @@ KernelSignature ExpandGradOpArgumentMapping(const ArgumentMappingContext& ctx) { "expand_grad", {"X", "Out@GRAD"}, {"shape"}, {"X@GRAD"}); } if (ctx.HasInput("Shape")) { - return KernelSignature("expand_grad", - {"X", GradVarName("Out")}, - {"Shape"}, - {GradVarName("X")}); + return KernelSignature( + "expand_grad", {"X", "Out@GRAD"}, {"Shape"}, {"X@GRAD"}); } else if (ctx.InputSize("expand_shapes_tensor") > 0) { - return KernelSignature("expand_grad", - {"X", GradVarName("Out")}, - {"expand_shapes_tensor"}, - {GradVarName("X")}); + return KernelSignature( + "expand_grad", {"X", "Out@GRAD"}, {"expand_shapes_tensor"}, {"X@GRAD"}); } else { - return KernelSignature("expand_grad", - {"X", GradVarName("Out")}, - {"shape"}, - {GradVarName("X")}); + return KernelSignature( + "expand_grad", {"X", "Out@GRAD"}, {"shape"}, {"X@GRAD"}); } } diff --git a/paddle/phi/ops/compat/flatten_sig.cc b/paddle/phi/ops/compat/flatten_sig.cc index 3e8119c38cf510f2b134ae19e975b9e38cb0357f..122e0efa22b7c32ded4f9c1b5c88b84455053bba 100644 --- a/paddle/phi/ops/compat/flatten_sig.cc +++ b/paddle/phi/ops/compat/flatten_sig.cc @@ -31,7 +31,7 @@ KernelSignature FlattenOpArgumentMapping(const ArgumentMappingContext& ctx) { KernelSignature FlattenGradOpArgumentMapping( const ArgumentMappingContext& ctx) { return KernelSignature( - "flatten_grad", {"XShape", GradVarName("Out")}, {}, {GradVarName("X")}); + "flatten_grad", {"XShape", "Out@GRAD"}, {}, {"X@GRAD"}); } } // namespace phi diff --git a/paddle/phi/ops/compat/frobenius_norm_sig.cc b/paddle/phi/ops/compat/frobenius_norm_sig.cc index 8fddee5edb1d8a25f39786fb5d294e87b60c57bf..1fb53c36cafb2615b699fd53217b3924be998016 100644 --- a/paddle/phi/ops/compat/frobenius_norm_sig.cc +++ b/paddle/phi/ops/compat/frobenius_norm_sig.cc @@ -25,9 +25,9 @@ KernelSignature FrobeniusNormOpArgumentMapping( KernelSignature FrobeniusNormGradOpArgumentMapping( const ArgumentMappingContext& ctx) { return KernelSignature("frobenius_norm_grad", - {"X", "Out", GradVarName("Out")}, + {"X", "Out", "Out@GRAD"}, {"dim", "keep_dim", "reduce_all"}, - {GradVarName("X")}); + {"X@GRAD"}); } } // namespace phi diff --git a/paddle/phi/ops/compat/gather_scatter_sig.cc b/paddle/phi/ops/compat/gather_scatter_sig.cc index f71e30f85b09df041b02fbd4f34b69c0e85f92da..a942ebb44086f5e0199fa5c9434f40cf2eb8c0b8 100644 --- a/paddle/phi/ops/compat/gather_scatter_sig.cc +++ b/paddle/phi/ops/compat/gather_scatter_sig.cc @@ -17,25 +17,23 @@ namespace phi { KernelSignature GatherNdGradArgumentMapping(const ArgumentMappingContext& ctx) { - return KernelSignature("gather_nd_grad", - {"X", "Index", GradVarName("Out")}, - {}, - {GradVarName("X")}); + return KernelSignature( + "gather_nd_grad", {"X", "Index", "Out@GRAD"}, {}, {"X@GRAD"}); } KernelSignature ScatterGradArgumentMapping(const ArgumentMappingContext& ctx) { return KernelSignature("scatter_grad", - {"Ids", "Updates", GradVarName("Out")}, + {"Ids", "Updates", "Out@GRAD"}, {"overwrite"}, - {GradVarName("X"), GradVarName("Updates")}); + {"X@GRAD", "Updates@GRAD"}); } KernelSignature ScatterNdAddGradArgumentMapping( const ArgumentMappingContext& ctx) { return KernelSignature("scatter_nd_add_grad", - {"Index", "Updates", GradVarName("Out")}, + {"Index", "Updates", "Out@GRAD"}, {}, - {GradVarName("X"), GradVarName("Updates")}); + {"X@GRAD", "Updates@GRAD"}); } } // namespace phi diff --git a/paddle/phi/ops/compat/gather_sig.cc b/paddle/phi/ops/compat/gather_sig.cc index 6c47bbe48b8ee18527cfef41fad3488bef6c1dd9..af9e50638ce7026000e53e73c68794c2c9b01cda 100644 --- a/paddle/phi/ops/compat/gather_sig.cc +++ b/paddle/phi/ops/compat/gather_sig.cc @@ -27,14 +27,14 @@ KernelSignature GatherOpArgumentMapping(const ArgumentMappingContext& ctx) { KernelSignature GatherGradOpArgumentMapping(const ArgumentMappingContext& ctx) { if (ctx.HasInput("Axis")) { return KernelSignature("gather_grad", - {"X", "Index", GradVarName("Out")}, + {"X", "Index", "Out@GRAD"}, {"Axis", "overwrite"}, - {GradVarName("X")}); + {"X@GRAD"}); } else { return KernelSignature("gather_grad", - {"X", "Index", GradVarName("Out")}, + {"X", "Index", "Out@GRAD"}, {"axis", "overwrite"}, - {GradVarName("X")}); + {"X@GRAD"}); } } diff --git a/paddle/phi/ops/compat/gelu_sig.cc b/paddle/phi/ops/compat/gelu_sig.cc index bf4b47bcf5fa9c1fb9d03f6b332c0c867211f5ac..45a0ecea713f9cc53e2eb560f0b17db4d725235c 100644 --- a/paddle/phi/ops/compat/gelu_sig.cc +++ b/paddle/phi/ops/compat/gelu_sig.cc @@ -21,10 +21,8 @@ KernelSignature GeluOpArgumentMapping(const ArgumentMappingContext& ctx) { } KernelSignature GeluGradOpArgumentMapping(const ArgumentMappingContext& ctx) { - return KernelSignature("gelu_grad", - {"X", GradVarName("Out")}, - {"approximate"}, - {GradVarName("X")}); + return KernelSignature( + "gelu_grad", {"X", "Out@GRAD"}, {"approximate"}, {"X@GRAD"}); } } // namespace phi diff --git a/paddle/phi/ops/compat/graph_send_recv_sig.cc b/paddle/phi/ops/compat/graph_send_recv_sig.cc index cf36b9baa2d03e04ca883fed0ce5f4f80cd8bba7..9df2cf4d0fe9180bffe78bde3c0940cad34c86cd 100644 --- a/paddle/phi/ops/compat/graph_send_recv_sig.cc +++ b/paddle/phi/ops/compat/graph_send_recv_sig.cc @@ -28,9 +28,9 @@ KernelSignature GraphSendRecvGradOpArgumentMapping( const ArgumentMappingContext& ctx) { return KernelSignature( "graph_send_recv_grad", - {"X", "Src_index", "Dst_index", "Out", "Dst_count", GradVarName("Out")}, + {"X", "Src_index", "Dst_index", "Out", "Dst_count", "Out@GRAD"}, {"pool_type"}, - {GradVarName("X")}); + {"X@GRAD"}); } } // namespace phi diff --git a/paddle/phi/ops/compat/grid_sampler_sig.cc b/paddle/phi/ops/compat/grid_sampler_sig.cc index b76a9770d4dede5ea604f69858201c2fb035070d..486d5230ee7a619bf2ee12e53ed05594cf81c753 100644 --- a/paddle/phi/ops/compat/grid_sampler_sig.cc +++ b/paddle/phi/ops/compat/grid_sampler_sig.cc @@ -27,9 +27,9 @@ KernelSignature GridSamplerOpArgumentMapping( KernelSignature GridSamplerGradOpArgumentMapping( const ArgumentMappingContext& ctx) { return KernelSignature("grid_sample_grad", - {"X", "Grid", GradVarName("Output")}, + {"X", "Grid", "Output@GRAD"}, {"mode", "padding_mode", "align_corners"}, - {GradVarName("X"), GradVarName("Grid")}); + {"X@GRAD", "Grid@GRAD"}); } } // namespace phi diff --git a/paddle/phi/ops/compat/gumbel_softmax_sig.cc b/paddle/phi/ops/compat/gumbel_softmax_sig.cc index c7585a4e5f39acc2d7793526f6a5ca7948c370f3..65537f8c8948a80711566b523c1932c481b9c66d 100644 --- a/paddle/phi/ops/compat/gumbel_softmax_sig.cc +++ b/paddle/phi/ops/compat/gumbel_softmax_sig.cc @@ -18,10 +18,8 @@ namespace phi { KernelSignature GumbelSoftmaxGradOpArgumentMapping( const ArgumentMappingContext& ctx) { - return KernelSignature("gumbel_softmax_grad", - {"Out", GradVarName("Out")}, - {"axis"}, - {GradVarName("X")}); + return KernelSignature( + "gumbel_softmax_grad", {"Out", "Out@GRAD"}, {"axis"}, {"X@GRAD"}); } } // namespace phi diff --git a/paddle/phi/ops/compat/hierarchical_sigmoid_sig.cc b/paddle/phi/ops/compat/hierarchical_sigmoid_sig.cc index 58c190fb657bb1d1952a16843dfb4a013d0a3365..5393439901b915771f0d56d3b90a09d6c800c067 100644 --- a/paddle/phi/ops/compat/hierarchical_sigmoid_sig.cc +++ b/paddle/phi/ops/compat/hierarchical_sigmoid_sig.cc @@ -32,44 +32,42 @@ KernelSignature HierarchicalSigmoidOpArgumentMapping( KernelSignature HierarchicalSigmoidGradOpArgumentMapping( const ArgumentMappingContext& ctx) { - if (ctx.IsDenseTensorOutput(GradVarName("W"))) { - return KernelSignature( - "hierarchical_sigmoid_grad", - {"X", - "W", - "Label", - "PathTable", - "PathCode", - "Bias", - "PreOut", - GradVarName("Out")}, - {"num_classes", - "remote_prefetch", - "trainer_id", - "height_sections", - "epmap", - "table_names", - "is_sparse"}, - {GradVarName("X"), GradVarName("W"), GradVarName("Bias")}); - } else if (ctx.IsSelectedRowsOutput(GradVarName("W"))) { - return KernelSignature( - "hierarchical_sigmoid_grad_sr", - {"X", - "W", - "Label", - "PathTable", - "PathCode", - "Bias", - "PreOut", - GradVarName("Out")}, - {"num_classes", - "remote_prefetch", - "trainer_id", - "height_sections", - "epmap", - "table_names", - "is_sparse"}, - {GradVarName("X"), GradVarName("W"), GradVarName("Bias")}); + if (ctx.IsDenseTensorOutput("W@GRAD")) { + return KernelSignature("hierarchical_sigmoid_grad", + {"X", + "W", + "Label", + "PathTable", + "PathCode", + "Bias", + "PreOut", + "Out@GRAD"}, + {"num_classes", + "remote_prefetch", + "trainer_id", + "height_sections", + "epmap", + "table_names", + "is_sparse"}, + {"X@GRAD", "W@GRAD", "Bias@GRAD"}); + } else if (ctx.IsSelectedRowsOutput("W@GRAD")) { + return KernelSignature("hierarchical_sigmoid_grad_sr", + {"X", + "W", + "Label", + "PathTable", + "PathCode", + "Bias", + "PreOut", + "Out@GRAD"}, + {"num_classes", + "remote_prefetch", + "trainer_id", + "height_sections", + "epmap", + "table_names", + "is_sparse"}, + {"X@GRAD", "W@GRAD", "Bias@GRAD"}); } else { return KernelSignature("unregistered", {}, {}, {}); } diff --git a/paddle/phi/ops/compat/huber_loss_sig.cc b/paddle/phi/ops/compat/huber_loss_sig.cc index 6f669a4a8b697a1df83429773b257014d709756c..b7bf143fd404158067cd530599f64d4c3b86827a 100644 --- a/paddle/phi/ops/compat/huber_loss_sig.cc +++ b/paddle/phi/ops/compat/huber_loss_sig.cc @@ -24,9 +24,9 @@ KernelSignature HuberLossOpArgumentMapping(const ArgumentMappingContext& ctx) { KernelSignature HuberLossGradOpArgumentMapping( const ArgumentMappingContext& ctx) { return KernelSignature("huber_loss_grad", - {"Residual", GradVarName("Out")}, + {"Residual", "Out@GRAD"}, {"delta"}, - {GradVarName("X"), GradVarName("Y")}); + {"X@GRAD", "Y@GRAD"}); } } // namespace phi diff --git a/paddle/phi/ops/compat/index_sample_sig.cc b/paddle/phi/ops/compat/index_sample_sig.cc index 3b7e3f063d6c108d8a366c68720317c8188aa5a4..9c1b7e27f04ec701e02c66ddf3735ef0b02b7f40 100644 --- a/paddle/phi/ops/compat/index_sample_sig.cc +++ b/paddle/phi/ops/compat/index_sample_sig.cc @@ -18,10 +18,8 @@ namespace phi { KernelSignature IndexSampleGradOpArgumentMapping( const ArgumentMappingContext& ctx) { - return KernelSignature("index_sample_grad", - {"X", "Index", GradVarName("Out")}, - {}, - {GradVarName("X")}); + return KernelSignature( + "index_sample_grad", {"X", "Index", "Out@GRAD"}, {}, {"X@GRAD"}); } } // namespace phi diff --git a/paddle/phi/ops/compat/index_select_sig.cc b/paddle/phi/ops/compat/index_select_sig.cc index 53eff1bbcd7ed5269299ccfe41631a699e3d0a32..096ad2332c9ab187ecd352a1527fd0ccb6cdf156 100644 --- a/paddle/phi/ops/compat/index_select_sig.cc +++ b/paddle/phi/ops/compat/index_select_sig.cc @@ -18,10 +18,8 @@ namespace phi { KernelSignature IndexSelectGradOpArgumentMapping( const ArgumentMappingContext& ctx) { - return KernelSignature("index_select_grad", - {"X", "Index", GradVarName("Out")}, - {"dim"}, - {GradVarName("X")}); + return KernelSignature( + "index_select_grad", {"X", "Index", "Out@GRAD"}, {"dim"}, {"X@GRAD"}); } } // namespace phi diff --git a/paddle/phi/ops/compat/interpolate_sig.cc b/paddle/phi/ops/compat/interpolate_sig.cc index ba0e971e4ab009506bb5504a40e6b21d866061d0..61b022407306076c0852322b9a38ebdf7e14f75f 100644 --- a/paddle/phi/ops/compat/interpolate_sig.cc +++ b/paddle/phi/ops/compat/interpolate_sig.cc @@ -92,81 +92,76 @@ KernelSignature BicubicInterpOpArgumentMapping( KernelSignature BilinearInterpGradOpArgumentMapping( const ArgumentMappingContext& ctx) { - return KernelSignature( - "bilinear_interp_v2_grad", - {"X", "OutSize", "SizeTensor", "Scale", GradVarName("Out")}, - {"data_layout", - "out_d", - "out_h", - "out_w", - "scale", - "interp_method", - "align_corners", - "align_mode"}, - {GradVarName("X")}); + return KernelSignature("bilinear_interp_v2_grad", + {"X", "OutSize", "SizeTensor", "Scale", "Out@GRAD"}, + {"data_layout", + "out_d", + "out_h", + "out_w", + "scale", + "interp_method", + "align_corners", + "align_mode"}, + {"X@GRAD"}); } KernelSignature NearestInterpGradOpArgumentMapping( const ArgumentMappingContext& ctx) { - return KernelSignature( - "nearest_interp_v2_grad", - {"X", "OutSize", "SizeTensor", "Scale", GradVarName("Out")}, - {"data_layout", - "out_d", - "out_h", - "out_w", - "scale", - "interp_method", - "align_corners", - "align_mode"}, - {GradVarName("X")}); + return KernelSignature("nearest_interp_v2_grad", + {"X", "OutSize", "SizeTensor", "Scale", "Out@GRAD"}, + {"data_layout", + "out_d", + "out_h", + "out_w", + "scale", + "interp_method", + "align_corners", + "align_mode"}, + {"X@GRAD"}); } KernelSignature TrilinearInterpGradOpArgumentMapping( const ArgumentMappingContext& ctx) { - return KernelSignature( - "trilinear_interp_v2_grad", - {"X", "OutSize", "SizeTensor", "Scale", GradVarName("Out")}, - {"data_layout", - "out_d", - "out_h", - "out_w", - "scale", - "interp_method", - "align_corners", - "align_mode"}, - {GradVarName("X")}); + return KernelSignature("trilinear_interp_v2_grad", + {"X", "OutSize", "SizeTensor", "Scale", "Out@GRAD"}, + {"data_layout", + "out_d", + "out_h", + "out_w", + "scale", + "interp_method", + "align_corners", + "align_mode"}, + {"X@GRAD"}); } KernelSignature LinearInterpGradOpArgumentMapping( const ArgumentMappingContext& ctx) { - return KernelSignature( - "linear_interp_v2_grad", - {"X", "OutSize", "SizeTensor", "Scale", GradVarName("Out")}, - {"data_layout", - "out_d", - "out_h", - "out_w", - "scale", - "interp_method", - "align_corners", - "align_mode"}, - {GradVarName("X")}); + return KernelSignature("linear_interp_v2_grad", + {"X", "OutSize", "SizeTensor", "Scale", "Out@GRAD"}, + {"data_layout", + "out_d", + "out_h", + "out_w", + "scale", + "interp_method", + "align_corners", + "align_mode"}, + {"X@GRAD"}); } KernelSignature BicubicInterpGradOpArgumentMapping( const ArgumentMappingContext& ctx) { - return KernelSignature( - "bicubic_interp_v2_grad", - {"X", "OutSize", "SizeTensor", "Scale", GradVarName("Out")}, - {"data_layout", - "out_d", - "out_h", - "out_w", - "scale", - "interp_method", - "align_corners", - "align_mode"}, - {GradVarName("X")}); + return KernelSignature("bicubic_interp_v2_grad", + {"X", "OutSize", "SizeTensor", "Scale", "Out@GRAD"}, + {"data_layout", + "out_d", + "out_h", + "out_w", + "scale", + "interp_method", + "align_corners", + "align_mode"}, + {"X@GRAD"}); } } // namespace phi diff --git a/paddle/phi/ops/compat/kldiv_loss_sig.cc b/paddle/phi/ops/compat/kldiv_loss_sig.cc index 22d2f074e9f13c7ba65c6bcbb4b5542881d4128c..8af0edd316487479a8b6d725c9b32eebf93eef10 100644 --- a/paddle/phi/ops/compat/kldiv_loss_sig.cc +++ b/paddle/phi/ops/compat/kldiv_loss_sig.cc @@ -20,9 +20,9 @@ namespace phi { KernelSignature KLDivLossGradOpArgumentMapping( const ArgumentMappingContext& ctx) { return KernelSignature("kldiv_loss_grad", - {"X", "Target", GradVarName("Loss")}, + {"X", "Target", "Loss@GRAD"}, {"reduction"}, - {GradVarName("X")}); + {"X@GRAD"}); } } // namespace phi diff --git a/paddle/phi/ops/compat/kron_sig.cc b/paddle/phi/ops/compat/kron_sig.cc index 06b6545f58e7c12964f82fd8b6199270c519c16a..e2ba41dcadd9d705e9d8c14b4c8430d4ec216a89 100644 --- a/paddle/phi/ops/compat/kron_sig.cc +++ b/paddle/phi/ops/compat/kron_sig.cc @@ -17,10 +17,8 @@ namespace phi { KernelSignature KronGradOpArgumentMapping(const ArgumentMappingContext& ctx) { - return KernelSignature("kron_grad", - {"X", "Y", GradVarName("Out")}, - {}, - {GradVarName("X"), GradVarName("Y")}); + return KernelSignature( + "kron_grad", {"X", "Y", "Out@GRAD"}, {}, {"X@GRAD", "Y@GRAD"}); } } // namespace phi diff --git a/paddle/phi/ops/compat/kthvalue_sig.cc b/paddle/phi/ops/compat/kthvalue_sig.cc index 3b1a6a45f9a0ab6a0bd298a47432e26cd191265c..b04726ec3b3a19f4c503c9ad5fb652d15cee0859 100644 --- a/paddle/phi/ops/compat/kthvalue_sig.cc +++ b/paddle/phi/ops/compat/kthvalue_sig.cc @@ -20,9 +20,9 @@ namespace phi { KernelSignature KthvalueGradOpArgumentMapping( const ArgumentMappingContext& ctx) { return KernelSignature("kthvalue_grad", - {"X", "Indices", GradVarName("Out")}, + {"X", "Indices", "Out@GRAD"}, {"k", "axis", "keepdim"}, - {GradVarName("X")}); + {"X@GRAD"}); } } // namespace phi diff --git a/paddle/phi/ops/compat/label_smooth_sig.cc b/paddle/phi/ops/compat/label_smooth_sig.cc index 4fb62a8ca2675e08896cbd40ea44b5225e5d02a5..7607af2b61b7c856da05dc2c3dcd20193db4585b 100644 --- a/paddle/phi/ops/compat/label_smooth_sig.cc +++ b/paddle/phi/ops/compat/label_smooth_sig.cc @@ -24,10 +24,8 @@ KernelSignature LabelSmoothOpArgumentMapping( KernelSignature LabelSmoothGradOpArgumentMapping( const ArgumentMappingContext& ctx) { - return KernelSignature("label_smooth_grad", - {GradVarName("Out")}, - {"epsilon"}, - {GradVarName("X")}); + return KernelSignature( + "label_smooth_grad", {"Out@GRAD"}, {"epsilon"}, {"X@GRAD"}); } } // namespace phi diff --git a/paddle/phi/ops/compat/layer_norm_sig.cc b/paddle/phi/ops/compat/layer_norm_sig.cc index 17a81e9ec012f2c116762ff2d653bb96f0e1c4f4..eb47c516ab392fc5e352890dc50c87061a000606 100644 --- a/paddle/phi/ops/compat/layer_norm_sig.cc +++ b/paddle/phi/ops/compat/layer_norm_sig.cc @@ -25,11 +25,10 @@ KernelSignature LayerNormOpArgumentMapping(const ArgumentMappingContext& ctx) { KernelSignature LayerNormGradOpArgumentMapping( const ArgumentMappingContext& ctx) { - return KernelSignature( - "layer_norm_grad", - {"X", "Mean", "Variance", "Scale", "Bias", GradVarName("Y")}, - {"epsilon", "begin_norm_axis", "is_test"}, - {GradVarName("X"), GradVarName("Scale"), GradVarName("Bias")}); + return KernelSignature("layer_norm_grad", + {"X", "Mean", "Variance", "Scale", "Bias", "Y@GRAD"}, + {"epsilon", "begin_norm_axis", "is_test"}, + {"X@GRAD", "Scale@GRAD", "Bias@GRAD"}); } } // namespace phi diff --git a/paddle/phi/ops/compat/lerp_sig.cc b/paddle/phi/ops/compat/lerp_sig.cc index 3a8b23ca4c4a4a87f1b157679fd4e2d769deeb29..154424468d6605615f415ea7db7741119a3c6fb0 100644 --- a/paddle/phi/ops/compat/lerp_sig.cc +++ b/paddle/phi/ops/compat/lerp_sig.cc @@ -22,9 +22,9 @@ KernelSignature LerpOpArgumentMapping(const ArgumentMappingContext& ctx) { KernelSignature LerpGradOpArgumentMapping(const ArgumentMappingContext& ctx) { return KernelSignature("lerp_grad", - {"X", "Y", "Weight", "Out", GradVarName("Out")}, + {"X", "Y", "Weight", "Out", "Out@GRAD"}, {}, - {GradVarName("X"), GradVarName("Y")}); + {"X@GRAD", "Y@GRAD"}); } } // namespace phi diff --git a/paddle/phi/ops/compat/lgamma_sig.cc b/paddle/phi/ops/compat/lgamma_sig.cc index 452ba5e2b45a1577955e83afb55d7604d063fcbb..192754cc846dc99046e82c2335ba99a4656ac7e2 100644 --- a/paddle/phi/ops/compat/lgamma_sig.cc +++ b/paddle/phi/ops/compat/lgamma_sig.cc @@ -17,8 +17,7 @@ namespace phi { KernelSignature LgammaGradOpArgumentMapping(const ArgumentMappingContext& ctx) { - return KernelSignature( - "lgamma_grad", {"X", GradVarName("Out")}, {}, {GradVarName("X")}); + return KernelSignature("lgamma_grad", {"X", "Out@GRAD"}, {}, {"X@GRAD"}); } } // namespace phi diff --git a/paddle/phi/ops/compat/log_loss_sig.cc b/paddle/phi/ops/compat/log_loss_sig.cc index c4ae746e975a7ef7fe3de26cbe16aa221bca8164..adf40bac000e3f10a81370f199ac6e0ebecf6965 100644 --- a/paddle/phi/ops/compat/log_loss_sig.cc +++ b/paddle/phi/ops/compat/log_loss_sig.cc @@ -19,9 +19,9 @@ namespace phi { KernelSignature LogLossGradOpArgumentMapping( const ArgumentMappingContext& ctx) { return KernelSignature("log_loss_grad", - {"Predicted", "Labels", GradVarName("Loss")}, + {"Predicted", "Labels", "Loss@GRAD"}, {"epsilon"}, - {GradVarName("Predicted")}); + {"Predicted@GRAD"}); } } // namespace phi diff --git a/paddle/phi/ops/compat/log_softmax_sig.cc b/paddle/phi/ops/compat/log_softmax_sig.cc index b1ecc6d56768f069c208a0230722929200f1dfe0..20635c89875f89dbfcf07b404fa57a654ecd8bd8 100644 --- a/paddle/phi/ops/compat/log_softmax_sig.cc +++ b/paddle/phi/ops/compat/log_softmax_sig.cc @@ -18,10 +18,8 @@ namespace phi { KernelSignature LogSoftmaxGradOpArgumentMapping( const ArgumentMappingContext& ctx) { - return KernelSignature("log_softmax_grad", - {"Out", GradVarName("Out")}, - {"axis"}, - {GradVarName("X")}); + return KernelSignature( + "log_softmax_grad", {"Out", "Out@GRAD"}, {"axis"}, {"X@GRAD"}); } } // namespace phi diff --git a/paddle/phi/ops/compat/logsumexp_sig.cc b/paddle/phi/ops/compat/logsumexp_sig.cc index ca7345dbe704999183a784489f13bea05e30fdc0..6d988c71880cb9cc57fbda3639d9a274eb73b1e8 100644 --- a/paddle/phi/ops/compat/logsumexp_sig.cc +++ b/paddle/phi/ops/compat/logsumexp_sig.cc @@ -19,9 +19,9 @@ namespace phi { KernelSignature LogsumexpGradOpArgumentMapping( const ArgumentMappingContext& ctx) { return KernelSignature("logsumexp_grad", - {"X", "Out", GradVarName("Out")}, + {"X", "Out", "Out@GRAD"}, {"axis", "keepdim", "reduce_all"}, - {GradVarName("X")}); + {"X@GRAD"}); } } // namespace phi diff --git a/paddle/phi/ops/compat/masked_select_sig.cc b/paddle/phi/ops/compat/masked_select_sig.cc index ec0eb90315bc1b43533e15fa54e52de4cdb0b17b..47b4f2fac31556c644ea70d6647fe288ea4d2fda 100644 --- a/paddle/phi/ops/compat/masked_select_sig.cc +++ b/paddle/phi/ops/compat/masked_select_sig.cc @@ -23,10 +23,8 @@ KernelSignature MaskedSelectOpArgumentMapping( KernelSignature MaskedSelectGradOpArgumentMapping( const ArgumentMappingContext& ctx) { - return KernelSignature("masked_select_grad", - {"X", "Mask", GradVarName("Y")}, - {}, - {GradVarName("X")}); + return KernelSignature( + "masked_select_grad", {"X", "Mask", "Y@GRAD"}, {}, {"X@GRAD"}); } } // namespace phi diff --git a/paddle/phi/ops/compat/matmul_sig.cc b/paddle/phi/ops/compat/matmul_sig.cc index 771a7c3acc39dfade48c210d9937fbf719ad911a..4e125f0dbea27146644b63a2084e703a34aba27e 100644 --- a/paddle/phi/ops/compat/matmul_sig.cc +++ b/paddle/phi/ops/compat/matmul_sig.cc @@ -19,14 +19,14 @@ namespace phi { KernelSignature MatmulGradOpArgumentMapping(const ArgumentMappingContext& ctx) { if (ctx.HasAttr("use_addto")) { return KernelSignature("addto_matmul_grad", - {"X", "Y", GradVarName("Out")}, + {"X", "Y", "Out@GRAD"}, {"trans_x", "trans_y", "use_addto"}, - {GradVarName("X"), GradVarName("Y")}); + {"X@GRAD", "Y@GRAD"}); } else { return KernelSignature("matmul_grad", - {"X", "Y", GradVarName("Out")}, + {"X", "Y", "Out@GRAD"}, {"trans_x", "trans_y"}, - {GradVarName("X"), GradVarName("Y")}); + {"X@GRAD", "Y@GRAD"}); } } diff --git a/paddle/phi/ops/compat/matrix_power_sig.cc b/paddle/phi/ops/compat/matrix_power_sig.cc index 4c9ad4e74ab460c905f5c9e11f64cf8fa332dad0..00cb1f82b8047c8a8a2901887ae4121e5af9b0e5 100644 --- a/paddle/phi/ops/compat/matrix_power_sig.cc +++ b/paddle/phi/ops/compat/matrix_power_sig.cc @@ -18,10 +18,8 @@ namespace phi { KernelSignature MatrixPowerGradOpArgumentMapping( const ArgumentMappingContext& ctx) { - return KernelSignature("matrix_power_grad", - {"X", "Out", GradVarName("Out")}, - {"n"}, - {GradVarName("X")}); + return KernelSignature( + "matrix_power_grad", {"X", "Out", "Out@GRAD"}, {"n"}, {"X@GRAD"}); } } // namespace phi diff --git a/paddle/phi/ops/compat/maxout_sig.cc b/paddle/phi/ops/compat/maxout_sig.cc index d16dd1c8617fe224c3e3fb7ab8dfc6cb5b2d2d63..9e028bc81fbc3c72e07248cc28d263b05100fd38 100644 --- a/paddle/phi/ops/compat/maxout_sig.cc +++ b/paddle/phi/ops/compat/maxout_sig.cc @@ -21,10 +21,8 @@ KernelSignature MaxoutArgumentMapping(const ArgumentMappingContext& ctx) { } KernelSignature MaxoutGradArgumentMapping(const ArgumentMappingContext& ctx) { - return KernelSignature("maxout_grad", - {"X", "Out", GradVarName("Out")}, - {"groups", "axis"}, - {GradVarName("X")}); + return KernelSignature( + "maxout_grad", {"X", "Out", "Out@GRAD"}, {"groups", "axis"}, {"X@GRAD"}); } } // namespace phi diff --git a/paddle/phi/ops/compat/mean_sig.cc b/paddle/phi/ops/compat/mean_sig.cc index 6decd0da0b08698f942ccef1b25f070098f7a501..461d6ab32cec4cb3580a37e1b86ef557d31a1b72 100644 --- a/paddle/phi/ops/compat/mean_sig.cc +++ b/paddle/phi/ops/compat/mean_sig.cc @@ -22,8 +22,7 @@ KernelSignature MeanOpArgumentMapping(const ArgumentMappingContext& ctx) { KernelSignature MeanGradOpGradArgumentMapping( const ArgumentMappingContext& ctx) { - return KernelSignature( - "mean_all_grad", {"X", GradVarName("Out")}, {}, {GradVarName("X")}); + return KernelSignature("mean_all_grad", {"X", "Out@GRAD"}, {}, {"X@GRAD"}); } } // namespace phi diff --git a/paddle/phi/ops/compat/meshgrid_sig.cc b/paddle/phi/ops/compat/meshgrid_sig.cc index 44671c84e7afb5ec781cf1c103c18b0c3886c8be..f0c8cc7ea6234ce9b0ed7d98ba77b43bad9080c4 100644 --- a/paddle/phi/ops/compat/meshgrid_sig.cc +++ b/paddle/phi/ops/compat/meshgrid_sig.cc @@ -22,8 +22,7 @@ KernelSignature MeshgridOpArgumentMapping(const ArgumentMappingContext& ctx) { KernelSignature MeshgridGradOpArgumentMapping( const ArgumentMappingContext& ctx) { - return KernelSignature( - "meshgrid_grad", {"X", GradVarName("Out")}, {}, {GradVarName("X")}); + return KernelSignature("meshgrid_grad", {"X", "Out@GRAD"}, {}, {"X@GRAD"}); } } // namespace phi diff --git a/paddle/phi/ops/compat/mode_sig.cc b/paddle/phi/ops/compat/mode_sig.cc index 20994c08aa73c33328568e334d258c44eef68171..e21cd69bf60a154e5989eadabaa38d5a072a8624 100644 --- a/paddle/phi/ops/compat/mode_sig.cc +++ b/paddle/phi/ops/compat/mode_sig.cc @@ -23,9 +23,9 @@ KernelSignature ModeOpArgumentMapping(const ArgumentMappingContext& ctx) { KernelSignature ModeGradOpArgumentMapping(const ArgumentMappingContext& ctx) { return KernelSignature("mode_grad", - {"X", "Indices", GradVarName("Out")}, + {"X", "Indices", "Out@GRAD"}, {"axis", "keepdim"}, - {GradVarName("X")}); + {"X@GRAD"}); } } // namespace phi diff --git a/paddle/phi/ops/compat/mul_sig.cc b/paddle/phi/ops/compat/mul_sig.cc index 8770db1039eb6d38ca36d0cd7d5ac1711eb12f21..4afff4aa1d7a563d64440a7b68a240a6e8138dd3 100644 --- a/paddle/phi/ops/compat/mul_sig.cc +++ b/paddle/phi/ops/compat/mul_sig.cc @@ -18,9 +18,9 @@ namespace phi { KernelSignature MulGradOpArgumentMapping(const ArgumentMappingContext& ctx) { return KernelSignature("matmul_with_flatten_grad", - {"X", "Y", GradVarName("Out")}, + {"X", "Y", "Out@GRAD"}, {"x_num_col_dims", "y_num_col_dims"}, - {GradVarName("X"), GradVarName("Y")}); + {"X@GRAD", "Y@GRAD"}); } KernelSignature MulDoubleGradOpArgumentMapping( diff --git a/paddle/phi/ops/compat/multi_dot_sig.cc b/paddle/phi/ops/compat/multi_dot_sig.cc index 2e05bd6d1557acc2a18b5a2c31ecb928bbb50ec3..29af82c9d1d16b5a69db7afc3e79252f854e28b2 100644 --- a/paddle/phi/ops/compat/multi_dot_sig.cc +++ b/paddle/phi/ops/compat/multi_dot_sig.cc @@ -18,8 +18,7 @@ namespace phi { KernelSignature MultiDotGradOpArgumentMapping( const ArgumentMappingContext& ctx) { - return KernelSignature( - "multi_dot_grad", {"X", GradVarName("Out")}, {}, {GradVarName("X")}); + return KernelSignature("multi_dot_grad", {"X", "Out@GRAD"}, {}, {"X@GRAD"}); } } // namespace phi diff --git a/paddle/phi/ops/compat/multiplex_sig.cc b/paddle/phi/ops/compat/multiplex_sig.cc index 9dab4655d172312a7389d0bb243e31ee39ef5981..538b1c13dda58a8d50c2a9cb40b8493ca1d0cfd8 100644 --- a/paddle/phi/ops/compat/multiplex_sig.cc +++ b/paddle/phi/ops/compat/multiplex_sig.cc @@ -22,8 +22,7 @@ KernelSignature MultiplexOpArgumentMapping(const ArgumentMappingContext& ctx) { KernelSignature MultiplexGradOpArgumentMapping( const ArgumentMappingContext& ctx) { - return KernelSignature( - "multiplex_grad", {"Ids", GradVarName("Out")}, {}, {GradVarName("X")}); + return KernelSignature("multiplex_grad", {"Ids", "Out@GRAD"}, {}, {"X@GRAD"}); } } // namespace phi diff --git a/paddle/phi/ops/compat/mv_sig.cc b/paddle/phi/ops/compat/mv_sig.cc index 0012f8e1ccb41169175b9f539b839850bd901b82..e965ddbb72657e781472fb86408378b56bece085 100644 --- a/paddle/phi/ops/compat/mv_sig.cc +++ b/paddle/phi/ops/compat/mv_sig.cc @@ -17,10 +17,8 @@ namespace phi { KernelSignature MvGradOpArgumentMapping(const ArgumentMappingContext& ctx) { - return KernelSignature("mv_grad", - {"X", "Vec", GradVarName("Out")}, - {}, - {GradVarName("X"), GradVarName("Vec")}); + return KernelSignature( + "mv_grad", {"X", "Vec", "Out@GRAD"}, {}, {"X@GRAD", "Vec@GRAD"}); } } // namespace phi diff --git a/paddle/phi/ops/compat/nll_loss_sig.cc b/paddle/phi/ops/compat/nll_loss_sig.cc index 87a060ce7a672f401ee28cb1b123f13178c52815..f3f9c531781923fd06818fac6ba3f2f6d3d4b54b 100644 --- a/paddle/phi/ops/compat/nll_loss_sig.cc +++ b/paddle/phi/ops/compat/nll_loss_sig.cc @@ -27,11 +27,10 @@ KernelSignature NllLossOpArgumentMapping(const ArgumentMappingContext& ctx) { KernelSignature NllLossGradOpArgumentMapping( const ArgumentMappingContext& ctx) { - return KernelSignature( - "nll_loss_grad", - {"X", "Label", "Weight", "Total_weight", GradVarName("Out")}, - {"ignore_index", "reduction"}, - {GradVarName("X")}); + return KernelSignature("nll_loss_grad", + {"X", "Label", "Weight", "Total_weight", "Out@GRAD"}, + {"ignore_index", "reduction"}, + {"X@GRAD"}); } } // namespace phi diff --git a/paddle/phi/ops/compat/norm_sig.cc b/paddle/phi/ops/compat/norm_sig.cc index a74db9b5686c8d79c4a59bb55a33264443ddf886..b9e56f3d166d4f818fab8e6c204fbebe52c6dccf 100644 --- a/paddle/phi/ops/compat/norm_sig.cc +++ b/paddle/phi/ops/compat/norm_sig.cc @@ -23,9 +23,9 @@ KernelSignature NormOpArgumentMapping(const ArgumentMappingContext& ctx) { KernelSignature NormGradOpArgumentMapping(const ArgumentMappingContext& ctx) { return KernelSignature("norm_grad", - {"X", "Norm", GradVarName("Out")}, + {"X", "Norm", "Out@GRAD"}, {"axis", "epsilon", "is_test"}, - {GradVarName("X")}); + {"X@GRAD"}); } } // namespace phi diff --git a/paddle/phi/ops/compat/p_norm_sig.cc b/paddle/phi/ops/compat/p_norm_sig.cc index d3bff55346c4520ebabe249dcbe9c34fab3a340a..82b88aa09ff2f51525e76374157d63d29134940b 100644 --- a/paddle/phi/ops/compat/p_norm_sig.cc +++ b/paddle/phi/ops/compat/p_norm_sig.cc @@ -17,9 +17,9 @@ namespace phi { KernelSignature PNormGradOpArgumentMapping(const ArgumentMappingContext& ctx) { return KernelSignature("p_norm_grad", - {"X", "Out", GradVarName("Out")}, + {"X", "Out", "Out@GRAD"}, {"porder", "axis", "epsilon", "keepdim", "asvector"}, - {GradVarName("X")}); + {"X@GRAD"}); } } // namespace phi diff --git a/paddle/phi/ops/compat/pad3d_sig.cc b/paddle/phi/ops/compat/pad3d_sig.cc index c43b98fa27e6baef55ad1dcbc11cb764ba9cb944..dd8a37d24b75f4d29da6ef67ebc179d27a36c9bb 100644 --- a/paddle/phi/ops/compat/pad3d_sig.cc +++ b/paddle/phi/ops/compat/pad3d_sig.cc @@ -29,14 +29,14 @@ KernelSignature Pad3dOpArgumentMapping(const ArgumentMappingContext& ctx) { KernelSignature Pad3dGradOpArgumentMapping(const ArgumentMappingContext& ctx) { if (ctx.HasInput("Paddings")) { return KernelSignature("pad3d_grad", - {"X", GradVarName("Out")}, + {"X", "Out@GRAD"}, {"Paddings", "mode", "value", "data_format"}, - {GradVarName("X")}); + {"X@GRAD"}); } return KernelSignature("pad3d_grad", - {"X", GradVarName("Out")}, + {"X", "Out@GRAD"}, {"paddings", "mode", "value", "data_format"}, - {GradVarName("X")}); + {"X@GRAD"}); } } // namespace phi diff --git a/paddle/phi/ops/compat/pad_sig.cc b/paddle/phi/ops/compat/pad_sig.cc index 4eadbfa98beded121c4e6738384487a9ec10be42..bb870eb256c9e07f82a5a03d0ad3904a3a340f0f 100644 --- a/paddle/phi/ops/compat/pad_sig.cc +++ b/paddle/phi/ops/compat/pad_sig.cc @@ -18,10 +18,8 @@ namespace phi { KernelSignature PadGradOpArgumentMapping(const ArgumentMappingContext& ctx) { - return KernelSignature("pad_grad", - {GradVarName("Out")}, - {"paddings", "pad_value"}, - {GradVarName("X")}); + return KernelSignature( + "pad_grad", {"Out@GRAD"}, {"paddings", "pad_value"}, {"X@GRAD"}); } } // namespace phi diff --git a/paddle/phi/ops/compat/pixel_shuffle_sig.cc b/paddle/phi/ops/compat/pixel_shuffle_sig.cc index 641288cf12ae2e44147f6bd35434a6661727e9cd..96cb01a38fc50e96c8ebad8cc687cd61911c80dd 100644 --- a/paddle/phi/ops/compat/pixel_shuffle_sig.cc +++ b/paddle/phi/ops/compat/pixel_shuffle_sig.cc @@ -25,9 +25,9 @@ KernelSignature PixelShuffleOpArgumentMapping( KernelSignature PixelShuffleGradOpArgumentMapping( const ArgumentMappingContext& ctx) { return KernelSignature("pixel_shuffle_grad", - {GradVarName("Out")}, + {"Out@GRAD"}, {"upscale_factor", "data_format"}, - {GradVarName("X")}); + {"X@GRAD"}); } } // namespace phi diff --git a/paddle/phi/ops/compat/poisson_sig.cc b/paddle/phi/ops/compat/poisson_sig.cc index e45640c11b6ee97a626552f30eec10d0bc083b80..6022c3b608dfb0c4df50e3084a91ddc359781fcd 100644 --- a/paddle/phi/ops/compat/poisson_sig.cc +++ b/paddle/phi/ops/compat/poisson_sig.cc @@ -18,8 +18,7 @@ namespace phi { KernelSignature PoissonGradOpArgumentMapping( const ArgumentMappingContext& ctx) { - return KernelSignature( - "poisson_grad", {GradVarName("Out")}, {}, {GradVarName("X")}); + return KernelSignature("poisson_grad", {"Out@GRAD"}, {}, {"X@GRAD"}); } } // namespace phi diff --git a/paddle/phi/ops/compat/pool_sig.cc b/paddle/phi/ops/compat/pool_sig.cc index 390d3db5e785ba7642213e9b7a8db2b718ff19f0..b807b21a1c0b1c2dfd6d60208a5b263a653fc47b 100644 --- a/paddle/phi/ops/compat/pool_sig.cc +++ b/paddle/phi/ops/compat/pool_sig.cc @@ -34,7 +34,7 @@ KernelSignature Pool2dOpArgumentMapping(const ArgumentMappingContext& ctx) { KernelSignature Pool2dGradOpArgumentMapping(const ArgumentMappingContext& ctx) { return KernelSignature("pool2d_grad", - {"X", "Out", GradVarName("Out")}, + {"X", "Out", "Out@GRAD"}, {"ksize", "strides", "paddings", @@ -45,7 +45,7 @@ KernelSignature Pool2dGradOpArgumentMapping(const ArgumentMappingContext& ctx) { "global_pooling", "adaptive", "padding_algorithm"}, - {GradVarName("X")}); + {"X@GRAD"}); } KernelSignature Pool2dDoubleGradOpArgumentMapping( @@ -78,9 +78,9 @@ KernelSignature MaxPool2dWithIndexGradOpArgumentMapping( const ArgumentMappingContext& ctx) { return KernelSignature( "max_pool2d_with_index_grad", - {"X", "Mask", GradVarName("Out")}, + {"X", "Mask", "Out@GRAD"}, {"ksize", "strides", "paddings", "global_pooling", "adaptive"}, - {GradVarName("X")}); + {"X@GRAD"}); } KernelSignature Pool3dOpArgumentMapping(const ArgumentMappingContext& ctx) { @@ -101,7 +101,7 @@ KernelSignature Pool3dOpArgumentMapping(const ArgumentMappingContext& ctx) { KernelSignature Pool3dGradOpArgumentMapping(const ArgumentMappingContext& ctx) { return KernelSignature("pool3d_grad", - {"X", "Out", GradVarName("Out")}, + {"X", "Out", "Out@GRAD"}, {"ksize", "strides", "paddings", @@ -112,7 +112,7 @@ KernelSignature Pool3dGradOpArgumentMapping(const ArgumentMappingContext& ctx) { "global_pooling", "adaptive", "padding_algorithm"}, - {GradVarName("X")}); + {"X@GRAD"}); } KernelSignature MaxPool3dWithIndexOpArgumentMapping( @@ -128,9 +128,9 @@ KernelSignature MaxPool3dWithIndexGradOpArgumentMapping( const ArgumentMappingContext& ctx) { return KernelSignature( "max_pool3d_with_index_grad", - {"X", "Mask", GradVarName("Out")}, + {"X", "Mask", "Out@GRAD"}, {"ksize", "strides", "paddings", "global_pooling", "adaptive"}, - {GradVarName("X")}); + {"X@GRAD"}); } } // namespace phi diff --git a/paddle/phi/ops/compat/prelu_sig.cc b/paddle/phi/ops/compat/prelu_sig.cc index 43e5f20a92676df24eba85c53e00f5ef66592d8f..6e25e1d9f754bf5ca0419ce179660a09b8d00d9e 100644 --- a/paddle/phi/ops/compat/prelu_sig.cc +++ b/paddle/phi/ops/compat/prelu_sig.cc @@ -23,9 +23,9 @@ KernelSignature PReluOpArgumentMapping(const ArgumentMappingContext& ctx) { KernelSignature PReluGradOpArgumentMapping(const ArgumentMappingContext& ctx) { return KernelSignature("prelu_grad", - {"X", "Alpha", GradVarName("Out")}, + {"X", "Alpha", "Out@GRAD"}, {"data_format", "mode"}, - {GradVarName("X"), GradVarName("Alpha")}); + {"X@GRAD", "Alpha@GRAD"}); } } // namespace phi diff --git a/paddle/phi/ops/compat/psroi_pool_sig.cc b/paddle/phi/ops/compat/psroi_pool_sig.cc index 4d694d9a7759d9e3cdf0c385164a367260f2a020..df1dc1113cc184dc4785c4ae8b9347a18f407bb3 100644 --- a/paddle/phi/ops/compat/psroi_pool_sig.cc +++ b/paddle/phi/ops/compat/psroi_pool_sig.cc @@ -28,9 +28,9 @@ KernelSignature PsroiPoolGradOpArgumentMapping( const ArgumentMappingContext& ctx) { return KernelSignature( "psroi_pool_grad", - {"X", "ROIs", "RoisNum", GradVarName("Out")}, + {"X", "ROIs", "RoisNum", "Out@GRAD"}, {"pooled_height", "pooled_width", "output_channels", "spatial_scale"}, - {GradVarName("X")}); + {"X@GRAD"}); } } // namespace phi diff --git a/paddle/phi/ops/compat/put_along_axis_sig.cc b/paddle/phi/ops/compat/put_along_axis_sig.cc index 5f8dc1cf4cd711c1d39c0730a2a8b4ab86c57bea..83f0e5f65a0c511d4e78f3865f4eb40360bc5a10 100644 --- a/paddle/phi/ops/compat/put_along_axis_sig.cc +++ b/paddle/phi/ops/compat/put_along_axis_sig.cc @@ -26,9 +26,9 @@ KernelSignature PutAlongAxisArgumentMapping(const ArgumentMappingContext& ctx) { KernelSignature PutAlongAxisGradArgumentMapping( const ArgumentMappingContext& ctx) { return KernelSignature("put_along_axis_grad", - {"Input", "Index", GradVarName("Result")}, + {"Input", "Index", "Result@GRAD"}, {"Axis", "Reduce"}, - {GradVarName("Input"), GradVarName("Value")}); + {"Input@GRAD", "Value@GRAD"}); } } // namespace phi diff --git a/paddle/phi/ops/compat/reduce_sig.cc b/paddle/phi/ops/compat/reduce_sig.cc index cf2edf9f20fc219163039e990b8a2b1ef71f9438..a0ba07f5e8e2cf4c9b21819681092a70c1357a7c 100644 --- a/paddle/phi/ops/compat/reduce_sig.cc +++ b/paddle/phi/ops/compat/reduce_sig.cc @@ -130,41 +130,41 @@ KernelSignature ReduceAllOpArgumentMapping(const ArgumentMappingContext& ctx) { KernelSignature ReduceSumGradOpArgumentMapping( const ArgumentMappingContext& ctx) { return KernelSignature("sum_grad", - {"X", GradVarName("Out")}, + {"X", "Out@GRAD"}, {"dim", "keep_dim", "reduce_all"}, - {GradVarName("X")}); + {"X@GRAD"}); } KernelSignature ReduceMeanGradOpArgumentMapping( const ArgumentMappingContext& ctx) { return KernelSignature("mean_grad", - {"X", GradVarName("Out")}, + {"X", "Out@GRAD"}, {"dim", "keep_dim", "reduce_all"}, - {GradVarName("X")}); + {"X@GRAD"}); } KernelSignature ReduceMaxGradOpArgumentMapping( const ArgumentMappingContext& ctx) { return KernelSignature("max_grad", - {"X", "Out", GradVarName("Out")}, + {"X", "Out", "Out@GRAD"}, {"dim", "keep_dim", "reduce_all"}, - {GradVarName("X")}); + {"X@GRAD"}); } KernelSignature ReduceMinGradOpArgumentMapping( const ArgumentMappingContext& ctx) { return KernelSignature("min_grad", - {"X", "Out", GradVarName("Out")}, + {"X", "Out", "Out@GRAD"}, {"dim", "keep_dim", "reduce_all"}, - {GradVarName("X")}); + {"X@GRAD"}); } KernelSignature ReduceProdGradOpArgumentMapping( const ArgumentMappingContext& ctx) { return KernelSignature("prod_grad", - {"X", "Out", GradVarName("Out")}, + {"X", "Out", "Out@GRAD"}, {"dim", "keep_dim", "reduce_all"}, - {GradVarName("X")}); + {"X@GRAD"}); } } // namespace phi diff --git a/paddle/phi/ops/compat/reshape_sig.cc b/paddle/phi/ops/compat/reshape_sig.cc index 04f64e403527313213f97d4f3dab260423039440..a01f2a98c9bdf7d5614649d731b4e0c06a46cf73 100644 --- a/paddle/phi/ops/compat/reshape_sig.cc +++ b/paddle/phi/ops/compat/reshape_sig.cc @@ -41,8 +41,7 @@ KernelSignature ReshapeOpArgumentMapping(const ArgumentMappingContext& ctx) { KernelSignature ReshapeGradOpArgumentMapping( const ArgumentMappingContext& ctx) { - return KernelSignature( - "reshape_grad", {GradVarName("Out")}, {}, {GradVarName("X")}); + return KernelSignature("reshape_grad", {"Out@GRAD"}, {}, {"X@GRAD"}); } KernelSignature ReshapeDoubleGradOpArgumentMapping( diff --git a/paddle/phi/ops/compat/rnn_sig.cc b/paddle/phi/ops/compat/rnn_sig.cc index 352510d5b2e6e3f0d8f998f92051fbc7c6abd386..87c99ac13aa07efcedfa55e505a717d6f515b690 100644 --- a/paddle/phi/ops/compat/rnn_sig.cc +++ b/paddle/phi/ops/compat/rnn_sig.cc @@ -39,8 +39,8 @@ KernelSignature RnnGradOpArgumentMapping(const ArgumentMappingContext& ctx) { "Out", "DropoutState", "Reserve", - GradVarName("Out"), - GradVarName("State")}, + "Out@GRAD", + "State@GRAD"}, {"dropout_prob", "is_bidirec", "input_size", @@ -49,9 +49,7 @@ KernelSignature RnnGradOpArgumentMapping(const ArgumentMappingContext& ctx) { "mode", "seed", "is_test"}, - {GradVarName("Input"), - GradVarName("PreState"), - GradVarName("WeightList")}); + {"Input@GRAD", "PreState@GRAD", "WeightList@GRAD"}); } } // namespace phi diff --git a/paddle/phi/ops/compat/roi_align_sig.cc b/paddle/phi/ops/compat/roi_align_sig.cc index 1717ec8f788091fc5eae59c40a32a30c355760e8..7279e82139bbc369ecb24a59d5617c06a0de4474 100644 --- a/paddle/phi/ops/compat/roi_align_sig.cc +++ b/paddle/phi/ops/compat/roi_align_sig.cc @@ -30,13 +30,13 @@ KernelSignature RoiAlignOpArgumentMapping(const ArgumentMappingContext& ctx) { KernelSignature RoiAlignGradOpArgumentMapping( const ArgumentMappingContext& ctx) { return KernelSignature("roi_align_grad", - {"X", "ROIs", "RoisNum", GradVarName("Out")}, + {"X", "ROIs", "RoisNum", "Out@GRAD"}, {"pooled_height", "pooled_width", "spatial_scale", "sampling_ratio", "aligned"}, - {GradVarName("X")}); + {"X@GRAD"}); } } // namespace phi diff --git a/paddle/phi/ops/compat/roi_pool_sig.cc b/paddle/phi/ops/compat/roi_pool_sig.cc index d04c645f183c6e1ac91e4bf6003427008a24fe42..971b4b9d5bf3208d7af5870b675bb8c70655449e 100644 --- a/paddle/phi/ops/compat/roi_pool_sig.cc +++ b/paddle/phi/ops/compat/roi_pool_sig.cc @@ -26,9 +26,9 @@ KernelSignature RoiPoolOpArgumentMapping(const ArgumentMappingContext& ctx) { KernelSignature RoiPoolOpGradArgumentMapping( const ArgumentMappingContext& ctx) { return KernelSignature("roi_pool_grad", - {"X", "ROIs", "RoisNum", "Argmax", GradVarName("Out")}, + {"X", "ROIs", "RoisNum", "Argmax", "Out@GRAD"}, {"pooled_height", "pooled_width", "spatial_scale"}, - {GradVarName("X")}); + {"X@GRAD"}); } } // namespace phi diff --git a/paddle/phi/ops/compat/roll_sig.cc b/paddle/phi/ops/compat/roll_sig.cc index a144f0e8e8a90eee0bf0a8a80455b1e19611880c..e6817555bc4b92a053ec2beb7aaf3aadbdaa3875 100644 --- a/paddle/phi/ops/compat/roll_sig.cc +++ b/paddle/phi/ops/compat/roll_sig.cc @@ -24,10 +24,8 @@ KernelSignature RollOpArgumentMapping(const ArgumentMappingContext& ctx) { } KernelSignature RollGradOpArgumentMapping(const ArgumentMappingContext& ctx) { - return KernelSignature("roll_grad", - {"X", GradVarName("Out")}, - {"shifts", "axis"}, - {GradVarName("X")}); + return KernelSignature( + "roll_grad", {"X", "Out@GRAD"}, {"shifts", "axis"}, {"X@GRAD"}); } } // namespace phi diff --git a/paddle/phi/ops/compat/segment_pool_sig.cc b/paddle/phi/ops/compat/segment_pool_sig.cc index 97646a2ac31d33fe8b0bd09c1a205122b4f3fd6c..db07343f9ad84d3fd68edf45ff779fe32cca4f7b 100644 --- a/paddle/phi/ops/compat/segment_pool_sig.cc +++ b/paddle/phi/ops/compat/segment_pool_sig.cc @@ -18,13 +18,12 @@ namespace phi { KernelSignature SegmentPoolGradOpArgumentMapping( const ArgumentMappingContext& ctx) { - return KernelSignature( - "segment_pool_grad", - { - "X", "SegmentIds", "Out", "SummedIds", GradVarName("Out"), - }, - {"pooltype"}, - {GradVarName("X")}); + return KernelSignature("segment_pool_grad", + { + "X", "SegmentIds", "Out", "SummedIds", "Out@GRAD", + }, + {"pooltype"}, + {"X@GRAD"}); } } // namespace phi diff --git a/paddle/phi/ops/compat/selu_sig.cc b/paddle/phi/ops/compat/selu_sig.cc index 23f5cc34515b4aba482e2cfe3d6e0d148e2d97b2..08087584a1094559788c9032ab5b8c724af4d349 100644 --- a/paddle/phi/ops/compat/selu_sig.cc +++ b/paddle/phi/ops/compat/selu_sig.cc @@ -19,10 +19,8 @@ namespace phi { KernelSignature SeluGradGradOpArgumentMapping( const ArgumentMappingContext& ctx) { - return KernelSignature("selu_grad", - {"Out", GradVarName("Out")}, - {"scale", "alpha"}, - {GradVarName("X")}); + return KernelSignature( + "selu_grad", {"Out", "Out@GRAD"}, {"scale", "alpha"}, {"X@GRAD"}); } } // namespace phi PD_REGISTER_ARG_MAPPING_FN(selu_grad, phi::SeluGradGradOpArgumentMapping); diff --git a/paddle/phi/ops/compat/set_value_sig.cc b/paddle/phi/ops/compat/set_value_sig.cc index 5feff54b028ba437125d65e4a6709254704164d8..6ff94a6e263f454988381e39599657eaa154833f 100644 --- a/paddle/phi/ops/compat/set_value_sig.cc +++ b/paddle/phi/ops/compat/set_value_sig.cc @@ -737,96 +737,89 @@ KernelSignature SetValueGradOpArgumentMapping( if (ctx.InputSize("StartsTensorList") > 0) { if (ctx.InputSize("EndsTensorList") > 0) { if (ctx.InputSize("StepsTensorList") > 0) { - return KernelSignature( - "set_value_grad", - {GradVarName("Out")}, - {"StartsTensorList", - "EndsTensorList", - "StepsTensorList", - "axes", - "decrease_axes", - "none_axes"}, - {GradVarName("Input"), GradVarName("ValueTensor")}); + return KernelSignature("set_value_grad", + {"Out@GRAD"}, + {"StartsTensorList", + "EndsTensorList", + "StepsTensorList", + "axes", + "decrease_axes", + "none_axes"}, + {"Input@GRAD", "ValueTensor@GRAD"}); } else { - return KernelSignature( - "set_value_grad", - {GradVarName("Out")}, - {"StartsTensorList", - "EndsTensorList", - "steps", - "axes", - "decrease_axes", - "none_axes"}, - {GradVarName("Input"), GradVarName("ValueTensor")}); + return KernelSignature("set_value_grad", + {"Out@GRAD"}, + {"StartsTensorList", + "EndsTensorList", + "steps", + "axes", + "decrease_axes", + "none_axes"}, + {"Input@GRAD", "ValueTensor@GRAD"}); } } else { if (ctx.InputSize("StepsTensorList") > 0) { - return KernelSignature( - "set_value_grad", - {GradVarName("Out")}, - {"StartsTensorList", - "ends", - "StepsTensorList", - "axes", - "decrease_axes", - "none_axes"}, - {GradVarName("Input"), GradVarName("ValueTensor")}); + return KernelSignature("set_value_grad", + {"Out@GRAD"}, + {"StartsTensorList", + "ends", + "StepsTensorList", + "axes", + "decrease_axes", + "none_axes"}, + {"Input@GRAD", "ValueTensor@GRAD"}); } else { - return KernelSignature( - "set_value_grad", - {GradVarName("Out")}, - {"StartsTensorList", - "ends", - "steps", - "axes", - "decrease_axes", - "none_axes"}, - {GradVarName("Input"), GradVarName("ValueTensor")}); + return KernelSignature("set_value_grad", + {"Out@GRAD"}, + {"StartsTensorList", + "ends", + "steps", + "axes", + "decrease_axes", + "none_axes"}, + {"Input@GRAD", "ValueTensor@GRAD"}); } } } else { if (ctx.InputSize("EndsTensorList") > 0) { if (ctx.InputSize("StepsTensorList") > 0) { - return KernelSignature( - "set_value_grad", - {GradVarName("Out")}, - {"starts", - "EndsTensorList", - "StepsTensorList", - "axes", - "decrease_axes", - "none_axes"}, - {GradVarName("Input"), GradVarName("ValueTensor")}); + return KernelSignature("set_value_grad", + {"Out@GRAD"}, + {"starts", + "EndsTensorList", + "StepsTensorList", + "axes", + "decrease_axes", + "none_axes"}, + {"Input@GRAD", "ValueTensor@GRAD"}); } else { - return KernelSignature( - "set_value_grad", - {GradVarName("Out")}, - {"starts", - "EndsTensorList", - "steps", - "axes", - "decrease_axes", - "none_axes"}, - {GradVarName("Input"), GradVarName("ValueTensor")}); + return KernelSignature("set_value_grad", + {"Out@GRAD"}, + {"starts", + "EndsTensorList", + "steps", + "axes", + "decrease_axes", + "none_axes"}, + {"Input@GRAD", "ValueTensor@GRAD"}); } } else { if (ctx.InputSize("StepsTensorList") > 0) { - return KernelSignature( - "set_value_grad", - {GradVarName("Out")}, - {"starts", - "ends", - "StepsTensorList", - "axes", - "decrease_axes", - "none_axes"}, - {GradVarName("Input"), GradVarName("ValueTensor")}); + return KernelSignature("set_value_grad", + {"Out@GRAD"}, + {"starts", + "ends", + "StepsTensorList", + "axes", + "decrease_axes", + "none_axes"}, + {"Input@GRAD", "ValueTensor@GRAD"}); } else { return KernelSignature( "set_value_grad", - {GradVarName("Out")}, + {"Out@GRAD"}, {"starts", "ends", "steps", "axes", "decrease_axes", "none_axes"}, - {GradVarName("Input"), GradVarName("ValueTensor")}); + {"Input@GRAD", "ValueTensor@GRAD"}); } } } diff --git a/paddle/phi/ops/compat/sigmoid_cross_entropy_with_logits_sig.cc b/paddle/phi/ops/compat/sigmoid_cross_entropy_with_logits_sig.cc index 61ad9627a9612d62318939af8efda3a541cfa606..795e287d53debad8b096f1ae6e7fad7f4154ad96 100644 --- a/paddle/phi/ops/compat/sigmoid_cross_entropy_with_logits_sig.cc +++ b/paddle/phi/ops/compat/sigmoid_cross_entropy_with_logits_sig.cc @@ -19,9 +19,9 @@ namespace phi { KernelSignature SigmoidCrossEntropyWithLogitsKernelGradOpArgumentMapping( const ArgumentMappingContext& ctx) { return KernelSignature("sigmoid_cross_entropy_with_logits_grad", - {"X", "Label", GradVarName("Out")}, + {"X", "Label", "Out@GRAD"}, {"normalize", "ignore_index"}, - {GradVarName("X")}); + {"X@GRAD"}); } } // namespace phi diff --git a/paddle/phi/ops/compat/slice_sig.cc b/paddle/phi/ops/compat/slice_sig.cc index ba3bafdaa51c739d828312e8b4d296e05de45ce6..607d0b31310b6403779404c975d4b8456f598aca 100644 --- a/paddle/phi/ops/compat/slice_sig.cc +++ b/paddle/phi/ops/compat/slice_sig.cc @@ -105,74 +105,74 @@ KernelSignature SliceGradOpArgumentMapping(const ArgumentMappingContext& ctx) { if (ctx.HasInput("StartsTensor")) { if (ctx.HasInput("EndsTensor")) { return KernelSignature("slice_grad", - {"Input", GradVarName("Out")}, + {"Input", "Out@GRAD"}, {"axes", "StartsTensor", "EndsTensor", "infer_flags", "decrease_axis"}, - {GradVarName("Input")}); + {"Input@GRAD"}); } else if (ctx.InputSize("EndsTensorList") > 0) { return KernelSignature("slice_grad", - {"Input", GradVarName("Out")}, + {"Input", "Out@GRAD"}, {"axes", "StartsTensor", "EndsTensorList", "infer_flags", "decrease_axis"}, - {GradVarName("Input")}); + {"Input@GRAD"}); } else { return KernelSignature( "slice_grad", - {"Input", GradVarName("Out")}, + {"Input", "Out@GRAD"}, {"axes", "StartsTensor", "ends", "infer_flags", "decrease_axis"}, - {GradVarName("Input")}); + {"Input@GRAD"}); } } else if (ctx.InputSize("StartsTensorList") > 0) { if (ctx.HasInput("EndsTensor")) { return KernelSignature("slice_grad", - {"Input", GradVarName("Out")}, + {"Input", "Out@GRAD"}, {"axes", "StartsTensorList", "EndsTensor", "infer_flags", "decrease_axis"}, - {GradVarName("Input")}); + {"Input@GRAD"}); } else if (ctx.InputSize("EndsTensorList") > 0) { return KernelSignature("slice_grad", - {"Input", GradVarName("Out")}, + {"Input", "Out@GRAD"}, {"axes", "StartsTensorList", "EndsTensorList", "infer_flags", "decrease_axis"}, - {GradVarName("Input")}); + {"Input@GRAD"}); } else { return KernelSignature( "slice_grad", - {"Input", GradVarName("Out")}, + {"Input", "Out@GRAD"}, {"axes", "StartsTensorList", "ends", "infer_flags", "decrease_axis"}, - {GradVarName("Input")}); + {"Input@GRAD"}); } } else { if (ctx.HasInput("EndsTensor")) { return KernelSignature( "slice_grad", - {"Input", GradVarName("Out")}, + {"Input", "Out@GRAD"}, {"axes", "starts", "EndsTensor", "infer_flags", "decrease_axis"}, - {GradVarName("Input")}); + {"Input@GRAD"}); } else if (ctx.InputSize("EndsTensorList") > 0) { return KernelSignature( "slice_grad", - {"Input", GradVarName("Out")}, + {"Input", "Out@GRAD"}, {"axes", "starts", "EndsTensorList", "infer_flags", "decrease_axis"}, - {GradVarName("Input")}); + {"Input@GRAD"}); } else { return KernelSignature( "slice_grad", - {"Input", GradVarName("Out")}, + {"Input", "Out@GRAD"}, {"axes", "starts", "ends", "infer_flags", "decrease_axis"}, - {GradVarName("Input")}); + {"Input@GRAD"}); } } } diff --git a/paddle/phi/ops/compat/softmax_sig.cc b/paddle/phi/ops/compat/softmax_sig.cc index 65a915b51d08a85acf16d4206faa765dc6434d8c..a30a2a2b06fd5ca9c0a45c325fc486ad0fdd4e50 100644 --- a/paddle/phi/ops/compat/softmax_sig.cc +++ b/paddle/phi/ops/compat/softmax_sig.cc @@ -22,10 +22,8 @@ KernelSignature SoftmaxOpArgumentMapping(const ArgumentMappingContext& ctx) { KernelSignature SoftmaxGradOpArgumentMapping( const ArgumentMappingContext& ctx) { - return KernelSignature("softmax_grad", - {"Out", GradVarName("Out")}, - {"axis"}, - {GradVarName("X")}); + return KernelSignature( + "softmax_grad", {"Out", "Out@GRAD"}, {"axis"}, {"X@GRAD"}); } } // namespace phi diff --git a/paddle/phi/ops/compat/softmax_with_cross_entropy_sig.cc b/paddle/phi/ops/compat/softmax_with_cross_entropy_sig.cc index 9cfc5ded90a49a1572a136f4de609b8ff4b742af..c75d4f711dc0fd3456e5b67c5bfa4b59cee77ce3 100644 --- a/paddle/phi/ops/compat/softmax_with_cross_entropy_sig.cc +++ b/paddle/phi/ops/compat/softmax_with_cross_entropy_sig.cc @@ -31,13 +31,13 @@ KernelSignature SoftmaxWithCrossEntropyOpArgumentMapping( KernelSignature SoftmaxWithCrossEntropyGradOpArgumentMapping( const ArgumentMappingContext& ctx) { return KernelSignature("cross_entropy_with_softmax_grad", - {"Label", "Softmax", GradVarName("Loss")}, + {"Label", "Softmax", "Loss@GRAD"}, {"soft_label", "use_softmax", "numeric_stable_mode", "ignore_index", "axis"}, - {GradVarName("Logits")}); + {"Logits@GRAD"}); } } // namespace phi diff --git a/paddle/phi/ops/compat/squeeze_sig.cc b/paddle/phi/ops/compat/squeeze_sig.cc index 276246533e89e29f1a5d38fd4f3d831a044b5535..c65d77df9808e1b85ef15d890c75757a2177c2f4 100644 --- a/paddle/phi/ops/compat/squeeze_sig.cc +++ b/paddle/phi/ops/compat/squeeze_sig.cc @@ -23,10 +23,8 @@ KernelSignature SqueezeOpArgumentMapping(const ArgumentMappingContext& ctx) { KernelSignature SqueezeGradOpArgumentMapping( const ArgumentMappingContext& ctx) { - return KernelSignature("squeeze_grad", - {"XShape", GradVarName("Out")}, - {"axes"}, - {GradVarName("X")}); + return KernelSignature( + "squeeze_grad", {"XShape", "Out@GRAD"}, {"axes"}, {"X@GRAD"}); } } // namespace phi diff --git a/paddle/phi/ops/compat/stack_sig.cc b/paddle/phi/ops/compat/stack_sig.cc index 97768eb89026e98dd855276b34b4e4667bc3b0a1..334fdb29e5f25ea8e6974c2fadab0f8668159cb4 100644 --- a/paddle/phi/ops/compat/stack_sig.cc +++ b/paddle/phi/ops/compat/stack_sig.cc @@ -14,8 +14,7 @@ limitations under the License. */ namespace phi { KernelSignature StackGradOpArgumentMapping(const ArgumentMappingContext& ctx) { - return KernelSignature( - "stack_grad", {GradVarName("Y")}, {"axis"}, {GradVarName("X")}); + return KernelSignature("stack_grad", {"Y@GRAD"}, {"axis"}, {"X@GRAD"}); } } // namespace phi diff --git a/paddle/phi/ops/compat/strided_slice_sig.cc b/paddle/phi/ops/compat/strided_slice_sig.cc index 9fb70af0dea515f73dd755a2e2619f6981de455a..5421fcd616ce739863b8d6fe113dd48ff29d2e2a 100644 --- a/paddle/phi/ops/compat/strided_slice_sig.cc +++ b/paddle/phi/ops/compat/strided_slice_sig.cc @@ -29,35 +29,35 @@ KernelSignature StridedSliceOpArgumentMapping( bool use_attr_ends = !ctx.IsRuntime() && !ends.empty(); bool use_attr_strides = !ctx.IsRuntime() && !strides.empty(); - std::string starts_key = + const char* starts_key = ctx.HasInput("StartsTensor") ? "StartsTensor" : (ctx.InputSize("StartsTensorList") > 0 ? (use_attr_starts ? "starts" : "StartsTensorList") : "starts"); - std::string ends_key = + const char* ends_key = ctx.HasInput("EndsTensor") ? "EndsTensor" : (ctx.InputSize("EndsTensorList") > 0 ? (use_attr_ends ? "ends" : "EndsTensorList") : "ends"); - std::string strides_key = + const char* strides_key = ctx.HasInput("StridesTensor") ? "StridesTensor" : (ctx.InputSize("StridesTensorList") > 0 ? (use_attr_strides ? "strides" : "StridesTensorList") : "strides"); - paddle::SmallVector inputs = {"Input"}; - paddle::SmallVector attrs = {"axes", + paddle::SmallVector inputs = {"Input"}; + paddle::SmallVector attrs = {"axes", starts_key, ends_key, strides_key, "infer_flags", "decrease_axis"}; - paddle::SmallVector outputs = {"Out"}; + paddle::SmallVector outputs = {"Out"}; - std::string kernel_name; + const char* kernel_name; if (ctx.IsDenseTensorVectorInput("Input")) { kernel_name = "strided_slice_array"; } else { @@ -78,35 +78,35 @@ KernelSignature StridedSliceGradOpArgumentMapping( bool use_attr_ends = !ctx.IsRuntime() && !ends.empty(); bool use_attr_strides = !ctx.IsRuntime() && !strides.empty(); - std::string starts_key = + const char* starts_key = ctx.HasInput("StartsTensor") ? "StartsTensor" : (ctx.InputSize("StartsTensorList") > 0 ? (use_attr_starts ? "starts" : "StartsTensorList") : "starts"); - std::string ends_key = + const char* ends_key = ctx.HasInput("EndsTensor") ? "EndsTensor" : (ctx.InputSize("EndsTensorList") > 0 ? (use_attr_ends ? "ends" : "EndsTensorList") : "ends"); - std::string strides_key = + const char* strides_key = ctx.HasInput("StridesTensor") ? "StridesTensor" : (ctx.InputSize("StridesTensorList") > 0 ? (use_attr_strides ? "strides" : "StridesTensorList") : "strides"); - paddle::SmallVector inputs = {"Input", GradVarName("Out")}; - paddle::SmallVector attrs = {"axes", + paddle::SmallVector inputs = {"Input", "Out@GRAD"}; + paddle::SmallVector attrs = {"axes", starts_key, ends_key, strides_key, "infer_flags", "decrease_axis"}; - paddle::SmallVector outputs = {GradVarName("Input")}; + paddle::SmallVector outputs = {"Input@GRAD"}; - std::string kernel_name; + const char* kernel_name; if (ctx.IsDenseTensorVectorInput("Input")) { kernel_name = "strided_slice_array_grad"; } else { diff --git a/paddle/phi/ops/compat/take_along_axis_sig.cc b/paddle/phi/ops/compat/take_along_axis_sig.cc index 27a996a270ddf4a36ad694836f45cb304f9a8f4c..a35c1c2db44800917ef90af31f40b64a0e27d18e 100644 --- a/paddle/phi/ops/compat/take_along_axis_sig.cc +++ b/paddle/phi/ops/compat/take_along_axis_sig.cc @@ -25,9 +25,9 @@ KernelSignature TakeAlongAxisArgumentMapping( KernelSignature TakeAlongAxisGradArgumentMapping( const ArgumentMappingContext& ctx) { return KernelSignature("take_along_axis_grad", - {"Input", "Index", GradVarName("Result")}, + {"Input", "Index", "Result@GRAD"}, {"Axis"}, - {GradVarName("Input")}); + {"Input@GRAD"}); } } // namespace phi diff --git a/paddle/phi/ops/compat/temporal_shift_sig.cc b/paddle/phi/ops/compat/temporal_shift_sig.cc index a686c37ff7e65176e4cd14c448316699a9edd704..a6eed22716ca7c9504e4994b8c4c2e82ab853e05 100644 --- a/paddle/phi/ops/compat/temporal_shift_sig.cc +++ b/paddle/phi/ops/compat/temporal_shift_sig.cc @@ -27,9 +27,9 @@ KernelSignature TemporalShiftOpArgumentMapping( KernelSignature TemporalShiftGradOpArgumentMapping( const ArgumentMappingContext& ctx) { return KernelSignature("temporal_shift_grad", - {GradVarName("Out")}, + {"Out@GRAD"}, {"seg_num", "shift_ratio", "data_format"}, - {GradVarName("X")}); + {"X@GRAD"}); } } // namespace phi diff --git a/paddle/phi/ops/compat/tile_sig.cc b/paddle/phi/ops/compat/tile_sig.cc index ca3fa5fe1f86ac13252c04c05c0508c47feded42..be401e40c4974eddac6197b364a56eb13e30383f 100644 --- a/paddle/phi/ops/compat/tile_sig.cc +++ b/paddle/phi/ops/compat/tile_sig.cc @@ -33,20 +33,14 @@ KernelSignature TileOpArgumentMapping(const ArgumentMappingContext& ctx) { KernelSignature TileGradOpArgumentMapping(const ArgumentMappingContext& ctx) { if (ctx.HasInput("RepeatTimes")) { - return KernelSignature("tile_grad", - {"X", GradVarName("Out")}, - {"RepeatTimes"}, - {GradVarName("X")}); + return KernelSignature( + "tile_grad", {"X", "Out@GRAD"}, {"RepeatTimes"}, {"X@GRAD"}); } else if (ctx.InputSize("repeat_times_tensor") > 0) { - return KernelSignature("tile_grad", - {"X", GradVarName("Out")}, - {"repeat_times_tensor"}, - {GradVarName("X")}); + return KernelSignature( + "tile_grad", {"X", "Out@GRAD"}, {"repeat_times_tensor"}, {"X@GRAD"}); } else { - return KernelSignature("tile_grad", - {"X", GradVarName("Out")}, - {"repeat_times"}, - {GradVarName("X")}); + return KernelSignature( + "tile_grad", {"X", "Out@GRAD"}, {"repeat_times"}, {"X@GRAD"}); } } diff --git a/paddle/phi/ops/compat/top_k_sig.cc b/paddle/phi/ops/compat/top_k_sig.cc index 8488a18e34ce10642929133a422ab3f2418f419f..c1073f9efdc6b561e703a45c90d8d6557c329270 100644 --- a/paddle/phi/ops/compat/top_k_sig.cc +++ b/paddle/phi/ops/compat/top_k_sig.cc @@ -29,9 +29,9 @@ KernelSignature TopkOpArgumentMapping(const ArgumentMappingContext& ctx) { KernelSignature TopkGradOpArgumentMapping(const ArgumentMappingContext& ctx) { return KernelSignature("top_k_grad", - {"X", "Indices", GradVarName("Out")}, + {"X", "Indices", "Out@GRAD"}, {"k", "axis", "largest", "sorted"}, - {GradVarName("X")}); + {"X@GRAD"}); } } // namespace phi diff --git a/paddle/phi/ops/compat/trace_sig.cc b/paddle/phi/ops/compat/trace_sig.cc index c3f5d6d287551e0c8732f3c6a7fca9cfcf3276bb..2cb7d9a80bce5099d280b2021132142c36c4af5a 100644 --- a/paddle/phi/ops/compat/trace_sig.cc +++ b/paddle/phi/ops/compat/trace_sig.cc @@ -23,9 +23,9 @@ KernelSignature TraceOpArgumentMapping(const ArgumentMappingContext& ctx) { KernelSignature TraceGradOpArgumentMapping(const ArgumentMappingContext& ctx) { return KernelSignature("trace_grad", - {"Input", GradVarName("Out")}, + {"Input", "Out@GRAD"}, {"offset", "axis1", "axis2"}, - {GradVarName("Input")}); + {"Input@GRAD"}); } } // namespace phi diff --git a/paddle/phi/ops/compat/transpose_sig.cc b/paddle/phi/ops/compat/transpose_sig.cc index 90961760cfc66822ea766080de725a787627682f..0f2a3108ec9e6943c723a6ee352ae7dd6abc0ce9 100644 --- a/paddle/phi/ops/compat/transpose_sig.cc +++ b/paddle/phi/ops/compat/transpose_sig.cc @@ -22,8 +22,7 @@ KernelSignature TransposeOpArgumentMapping(const ArgumentMappingContext& ctx) { KernelSignature TransposeGradOpArgumentMapping( const ArgumentMappingContext& ctx) { - return KernelSignature( - "transpose_grad", {GradVarName("Out")}, {"axis"}, {GradVarName("X")}); + return KernelSignature("transpose_grad", {"Out@GRAD"}, {"axis"}, {"X@GRAD"}); } } // namespace phi diff --git a/paddle/phi/ops/compat/triangular_solve_sig.cc b/paddle/phi/ops/compat/triangular_solve_sig.cc index c56af3e21e53e9ded6d01ad7fdb9c0fb5609ea6c..851db32a032d65db0a4f5a724681861623ae894c 100644 --- a/paddle/phi/ops/compat/triangular_solve_sig.cc +++ b/paddle/phi/ops/compat/triangular_solve_sig.cc @@ -19,9 +19,9 @@ namespace phi { KernelSignature TriangularSolveGradOpArgumentMapping( const ArgumentMappingContext& ctx) { return KernelSignature("triangular_solve_grad", - {"X", "Y", "Out", GradVarName("Out")}, + {"X", "Y", "Out", "Out@GRAD"}, {"upper", "transpose", "unitriangular"}, - {GradVarName("X"), GradVarName("Y")}); + {"X@GRAD", "Y@GRAD"}); } } // namespace phi diff --git a/paddle/phi/ops/compat/tril_triu_sig.cc b/paddle/phi/ops/compat/tril_triu_sig.cc index 4f79f8650decfc6556287be2caefa6d1074ecf7f..3c5fa15b41cae0a1f590e29312aefe34d2427f60 100644 --- a/paddle/phi/ops/compat/tril_triu_sig.cc +++ b/paddle/phi/ops/compat/tril_triu_sig.cc @@ -22,10 +22,8 @@ KernelSignature TrilTriuOpArgumentMapping(const ArgumentMappingContext& ctx) { KernelSignature TrilTriuGradOpArgumentMapping( const ArgumentMappingContext& ctx) { - return KernelSignature("tril_triu_grad", - {GradVarName("Out")}, - {"diagonal", "lower"}, - {GradVarName("X")}); + return KernelSignature( + "tril_triu_grad", {"Out@GRAD"}, {"diagonal", "lower"}, {"X@GRAD"}); } } // namespace phi diff --git a/paddle/phi/ops/compat/trunc_sig.cc b/paddle/phi/ops/compat/trunc_sig.cc index 2d35439216da522ecc3f279814226afeb3e24948..7b6a7771fbe89a2f69faa769825f3990b2dffcfb 100644 --- a/paddle/phi/ops/compat/trunc_sig.cc +++ b/paddle/phi/ops/compat/trunc_sig.cc @@ -21,8 +21,7 @@ KernelSignature TruncOpArgumentMapping(const ArgumentMappingContext& ctx) { } KernelSignature TruncGradOpArgumentMapping(const ArgumentMappingContext& ctx) { - return KernelSignature( - "trunc_grad", {GradVarName("Out")}, {}, {GradVarName("X")}); + return KernelSignature("trunc_grad", {"Out@GRAD"}, {}, {"X@GRAD"}); } } // namespace phi diff --git a/paddle/phi/ops/compat/unfold_sig.cc b/paddle/phi/ops/compat/unfold_sig.cc index ddc3b1813cbef7b562369df8537260e9de6c017f..45415616f296905453ece95ea27dea7bdf240b82 100644 --- a/paddle/phi/ops/compat/unfold_sig.cc +++ b/paddle/phi/ops/compat/unfold_sig.cc @@ -18,9 +18,9 @@ namespace phi { KernelSignature UnfoldGradOpArgumentMapping(const ArgumentMappingContext& ctx) { return KernelSignature("unfold_grad", - {"X", GradVarName("Y")}, + {"X", "Y@GRAD"}, {"kernel_sizes", "strides", "paddings", "dilations"}, - {GradVarName("X")}); + {"X@GRAD"}); } } // namespace phi diff --git a/paddle/phi/ops/compat/unsqueeze_sig.cc b/paddle/phi/ops/compat/unsqueeze_sig.cc index 20cd9701e83e5ecf3563eeccb5e1b4b2923bd65a..c802c2684b282f972cb27aaa577b7b9e0b79ddaf 100644 --- a/paddle/phi/ops/compat/unsqueeze_sig.cc +++ b/paddle/phi/ops/compat/unsqueeze_sig.cc @@ -35,7 +35,7 @@ KernelSignature UnsqueezeOpArgumentMapping(const ArgumentMappingContext& ctx) { KernelSignature UnsqueezeGradOpArgumentMapping( const ArgumentMappingContext& ctx) { return KernelSignature( - "unsqueeze_grad", {"XShape", GradVarName("Out")}, {}, {GradVarName("X")}); + "unsqueeze_grad", {"XShape", "Out@GRAD"}, {}, {"X@GRAD"}); } } // namespace phi PD_REGISTER_BASE_KERNEL_NAME(unsqueeze2, unsqueeze); diff --git a/paddle/phi/ops/compat/unstack_sig.cc b/paddle/phi/ops/compat/unstack_sig.cc index 41d7fc120a9efd2eee7521b037fea24fc4843c81..d03499f94b6be617c7392b32256c9dc6334860ac 100644 --- a/paddle/phi/ops/compat/unstack_sig.cc +++ b/paddle/phi/ops/compat/unstack_sig.cc @@ -15,8 +15,7 @@ namespace phi { KernelSignature UnStackGradOpArgumentMapping( const ArgumentMappingContext& ctx) { - return KernelSignature( - "unstack_grad", {GradVarName("Y")}, {"axis"}, {GradVarName("X")}); + return KernelSignature("unstack_grad", {"Y@GRAD"}, {"axis"}, {"X@GRAD"}); } } // namespace phi diff --git a/paddle/phi/ops/compat/warpctc_sig.cc b/paddle/phi/ops/compat/warpctc_sig.cc index 75f440de7f2dbbc8bb07f940dd48d2d902950297..ac3dc366ad8c624f7707f98249c0897a6b70e0c7 100644 --- a/paddle/phi/ops/compat/warpctc_sig.cc +++ b/paddle/phi/ops/compat/warpctc_sig.cc @@ -25,11 +25,10 @@ KernelSignature WarpctcOpArgumentMapping(const ArgumentMappingContext& ctx) { KernelSignature WarpctcGradOpArgumentMapping( const ArgumentMappingContext& ctx) { - return KernelSignature( - "warpctc_grad", - {"WarpCTCGrad", "Logits", GradVarName("Loss"), "LogitsLength"}, - {"blank", "norm_by_times"}, - {GradVarName("Logits")}); + return KernelSignature("warpctc_grad", + {"WarpCTCGrad", "Logits", "Loss@GRAD", "LogitsLength"}, + {"blank", "norm_by_times"}, + {"Logits@GRAD"}); } } // namespace phi diff --git a/paddle/phi/ops/compat/where_grad_sig.cc b/paddle/phi/ops/compat/where_grad_sig.cc index 71984a26d35afd841654d82480c263799bdbf181..e0c380672c895c5f68321ee971d17d598b328646 100644 --- a/paddle/phi/ops/compat/where_grad_sig.cc +++ b/paddle/phi/ops/compat/where_grad_sig.cc @@ -18,9 +18,9 @@ namespace phi { KernelSignature WhereGradOpArgumentMapping(const ArgumentMappingContext& ctx) { return KernelSignature("where_grad", - {"Condition", "X", "Y", GradVarName("Out")}, + {"Condition", "X", "Y", "Out@GRAD"}, {}, - {GradVarName("X"), GradVarName("Y")}); + {"X@GRAD", "Y@GRAD"}); } } // namespace phi diff --git a/paddle/phi/ops/compat/yolov3_loss_sig.cc b/paddle/phi/ops/compat/yolov3_loss_sig.cc index bbdadfa93ba9636daefc27fe69de6f057d3a9931..8d5d82a9e72e3c92bd843a59ebd2e93f6cfc4e12 100644 --- a/paddle/phi/ops/compat/yolov3_loss_sig.cc +++ b/paddle/phi/ops/compat/yolov3_loss_sig.cc @@ -31,25 +31,23 @@ KernelSignature Yolov3LossOpArgumentMapping(const ArgumentMappingContext& ctx) { KernelSignature Yolov3LossGradOpArgumentMapping( const ArgumentMappingContext& ctx) { - return KernelSignature("yolov3_loss_grad", - {"X", - "GTBox", - "GTLabel", - "GTScore", - GradVarName("Loss"), - "ObjectnessMask", - "GTMatchMask"}, - {"anchors", - "anchor_mask", - "class_num", - "ignore_thresh", - "downsample_ratio", - "use_label_smooth", - "scale_x_y"}, - {GradVarName("X"), - GradVarName("GTBox"), - GradVarName("GTLabel"), - GradVarName("GTScore")}); + return KernelSignature( + "yolov3_loss_grad", + {"X", + "GTBox", + "GTLabel", + "GTScore", + "Loss@GRAD", + "ObjectnessMask", + "GTMatchMask"}, + {"anchors", + "anchor_mask", + "class_num", + "ignore_thresh", + "downsample_ratio", + "use_label_smooth", + "scale_x_y"}, + {"X@GRAD", "GTBox@GRAD", "GTLabel@GRAD", "GTScore@GRAD"}); } } // namespace phi diff --git a/paddle/phi/tests/core/test_meta_fn_utils.cc b/paddle/phi/tests/core/test_meta_fn_utils.cc index c90e2f3dbcdedca876648883be902df7244821bc..028b9d23352c7f96740a98851d28bdd087a108fe 100644 --- a/paddle/phi/tests/core/test_meta_fn_utils.cc +++ b/paddle/phi/tests/core/test_meta_fn_utils.cc @@ -46,9 +46,9 @@ TEST(MetaFnFactory, InferMetaFnExists) { phi::MetaTensor meta_out(&dense_out1); phi::UnchangedInferMeta(meta_x, &meta_out); - auto shared_meat_x = std::make_shared(&dense_x); + auto shared_meat_x = phi::MetaTensor(&dense_x); phi::DenseTensor dense_out2; - auto shared_meta_out = std::make_shared(&dense_out2); + auto shared_meta_out = phi::MetaTensor(&dense_out2); phi::InferMetaContext ctx; ctx.EmplaceBackInput(shared_meat_x); ctx.EmplaceBackOutput(shared_meta_out); @@ -69,9 +69,9 @@ TEST(MetaFnFactory, CopyInferMetaFn) { phi::MetaTensor meta_out(&dense_out1); phi::UnchangedInferMeta(meta_x, &meta_out); - auto shared_meat_x = std::make_shared(&dense_x); + auto shared_meat_x = phi::MetaTensor(&dense_x); phi::DenseTensor dense_out2; - auto shared_meta_out = std::make_shared(&dense_out2); + auto shared_meta_out = phi::MetaTensor(&dense_out2); phi::InferMetaContext ctx; ctx.EmplaceBackInput(shared_meat_x); @@ -90,13 +90,13 @@ TEST(MetaFnFactory, SplitInferMetaFn) { phi::DenseTensor dense_x; dense_x.Resize({4, 10}); phi::MetaTensor meta_x(&dense_x); - auto shared_meat_x = std::make_shared(&dense_x); + auto shared_meat_x = phi::MetaTensor(&dense_x); phi::DenseTensor dense_out1; phi::DenseTensor dense_out2; - paddle::SmallVector> out; - out.push_back(std::make_shared(&dense_out1)); - out.push_back(std::make_shared(&dense_out2)); + paddle::SmallVector out; + out.emplace_back(phi::MetaTensor(&dense_out1)); + out.emplace_back(phi::MetaTensor(&dense_out2)); phi::InferMetaContext ctx; ctx.EmplaceBackInput(shared_meat_x); diff --git a/paddle/testing/CMakeLists.txt b/paddle/testing/CMakeLists.txt index 0cc68bf31617c4894a27ece3749ed643b34a52a1..2c977e923b5b1c662f0c5823ca32e91eedfa8658 100644 --- a/paddle/testing/CMakeLists.txt +++ b/paddle/testing/CMakeLists.txt @@ -1,5 +1,5 @@ # for paddle test case if(WITH_TESTING) - cc_library(paddle_gtest_main SRCS paddle_gtest_main.cc DEPS init device_context memory gtest gflags proto_desc) + cc_library(paddle_gtest_main SRCS paddle_gtest_main.cc DEPS init device_context memory gtest gflags proto_desc phi_utils) endif() diff --git a/paddle/testing/paddle_gtest_main.cc b/paddle/testing/paddle_gtest_main.cc index 0fb5412ff051fcb7b5eca1bad719d487f05b36c9..bb919f0e9110c162fbf7870b57e46fa3388189f8 100644 --- a/paddle/testing/paddle_gtest_main.cc +++ b/paddle/testing/paddle_gtest_main.cc @@ -14,6 +14,7 @@ limitations under the License. */ #include "gflags/gflags.h" #include "gtest/gtest.h" +#include "paddle/fluid/framework/phi_utils.h" #include "paddle/fluid/memory/allocation/allocator_strategy.h" #include "paddle/fluid/platform/device/npu/npu_info.h" #include "paddle/fluid/platform/flags.h" @@ -85,6 +86,7 @@ int main(int argc, char** argv) { ::GFLAGS_NAMESPACE::ParseCommandLineFlags( &new_argc, &new_argv_address, false); paddle::framework::InitDevices(); + paddle::framework::InitDefaultKernelSignatureMap(); int ret = RUN_ALL_TESTS(); diff --git a/python/paddle/fluid/__init__.py b/python/paddle/fluid/__init__.py index fb9e8d8ece100baa3ed7c65a8dc495aa12c254ff..13b964274fde270a86deeecff34d03a55e242104 100644 --- a/python/paddle/fluid/__init__.py +++ b/python/paddle/fluid/__init__.py @@ -212,6 +212,7 @@ def __bootstrap__(): core.init_glog(sys.argv[0]) # don't init_p2p when in unittest to save time. core.init_devices() + core.init_default_kernel_signatures() # TODO(panyx0718): Avoid doing complex initialization logic in __init__.py. diff --git a/python/paddle/utils/code_gen/api.yaml b/python/paddle/utils/code_gen/api.yaml index 4779750b5b44078976a0aac81a38128a500c7c67..a4c6fac836cfa0dbb062c0b6a2acc576b73d9951 100644 --- a/python/paddle/utils/code_gen/api.yaml +++ b/python/paddle/utils/code_gen/api.yaml @@ -1332,8 +1332,11 @@ - api : meshgrid args : (Tensor[] inputs) - output : Tensor[] - invoke : meshgrid_impl(inputs) + output : Tensor[]{inputs.size()} + infer_meta : + func : MeshgridInferMeta + kernel : + func : meshgrid backward : meshgrid_grad - api : min @@ -2103,8 +2106,11 @@ - api : unbind args : (Tensor input, int axis) - output : Tensor[] - invoke : unbind_impl(input, axis) + output : Tensor[] {axis<0 ? input.dims()[input.dims().size()+axis]:input.dims()[axis]} + infer_meta : + func : UnbindInferMeta + kernel : + func : unbind backward : unbind_grad # unfold diff --git a/python/paddle/utils/code_gen/api_base.py b/python/paddle/utils/code_gen/api_base.py index e8d067483d8fcceaa2e9700baafe60fd5821c77d..378ead7ff20aa9d29e69a36129cd5dd011ca4ebb 100644 --- a/python/paddle/utils/code_gen/api_base.py +++ b/python/paddle/utils/code_gen/api_base.py @@ -31,6 +31,7 @@ class BaseAPI(object): # outputs: # names : [], list of output names # types : [], list of output types + # out_size_expr : [], expression for getting size of vector # return_type : Tensor, vector, ..., the return type of api # args_str: # args_declare : "str" // str of function params with default value. Example: (..., bool flag=false) @@ -67,11 +68,12 @@ class BaseAPI(object): ] inputs, attrs, args_str = self.parse_input_and_attr( api_name, api_item_yaml['args'], optional_vars) - output_type_list, output_names, return_type = self.parse_output( + output_type_list, output_names, out_size_expr, return_type = self.parse_output( api_name, api_item_yaml['output']) return inputs, attrs, { 'names': output_names, 'types': output_type_list, + 'out_size_expr': out_size_expr, 'return_type': return_type }, args_str, optional_vars @@ -184,39 +186,36 @@ class BaseAPI(object): 'Tensor': 'Tensor', 'Tensor[]': 'std::vector' } - if re.search(r'\([a-zA-Z0-9_@]*\)', output_item): - result = re.search( - r"(?P[a-zA-Z0-9_[\]]+)\s*\((?P[a-zA-Z0-9_@]+)\)", - output_item) - out_type = result.group('out_type') - assert out_type in output_type_map, \ - f"{api_name} : Output type error: the output type only support Tensor and Tensor[], \ - but now is {out_type}." - - return output_type_map[out_type], result.group('name') - - else: - if output_item.strip() in output_type_map: - return output_type_map[output_item.strip()], 'out' - else: - raise ValueError( - "{} : Output type error: the output type only support Tensor and Tensor[], \ - but now is {}.".format(api_name, output_item.strip())) + result = re.search( + r"(?P[a-zA-Z0-9_[\]]+)\s*(?P\([a-zA-Z0-9_@]+\))?\s*(?P\{[^\}]+\})?", + output_item) + assert result is not None, f"{api_name} : the output config parse error." + out_type = result.group('out_type') + assert out_type in output_type_map, \ + f"{api_name} : Output type error: the output type only support Tensor and Tensor[], \ + but now is {out_type}." + + out_name = 'out' if result.group('name') is None else result.group( + 'name')[1:-1] + out_size_expr = None if result.group( + 'expr') is None else result.group('expr')[1:-1] + return output_type_map[out_type], out_name, out_size_expr temp_list = output_config.split(',') if len(temp_list) == 1: - out_type, out_name = parse_output_item(temp_list[0]) - return [out_type], [out_name], self.get_return_type([out_type]) + out_type, out_name, size_expr = parse_output_item(temp_list[0]) + return [out_type], [out_name], size_expr, self.get_return_type( + [out_type]) else: out_type_list = [] out_name_list = [] for output_item in temp_list: - out_type, out_name = parse_output_item(output_item) + out_type, out_name, size_expr = parse_output_item(output_item) out_type_list.append(out_type) out_name_list.append(out_name) - return out_type_list, out_name_list, self.get_return_type( + return out_type_list, out_name_list, size_expr, self.get_return_type( out_type_list) def parse_infer_meta(self, infer_meta_config): @@ -462,9 +461,8 @@ PADDLE_API {self.gene_return_type_code()} {self.get_api_func_name() + '_'}({self attr_names = self.attrs['names'] infer_meta = self.infer_meta - infer_meta_params = infer_meta[ - 'param'] + kernel_output_names if infer_meta[ - 'param'] is not None else input_names + attr_names + kernel_output_names + infer_meta_params = infer_meta['param'] if infer_meta[ + 'param'] is not None else input_names + attr_names # generate meta tensors meta_tensor_code = "" param_code = "" @@ -476,7 +474,7 @@ PADDLE_API {self.gene_return_type_code()} {self.get_api_func_name() + '_'}({self param] == "const std::vector&": meta_tensor_code = meta_tensor_code + f""" {code_indent} auto {param}_meta_vec = MakeMetaTensor({PREFIX_TENSOR_NAME}{param}); -{code_indent} std::vector {param}_metas({param}_meta_vec.size()); +{code_indent} std::vector {param}_metas({param}_meta_vec.size()); {code_indent} for (size_t i = 0; i < {param}_meta_vec.size(); ++i) {{ {code_indent} {param}_metas[i] = &{param}_meta_vec[i]; {code_indent} }} @@ -500,11 +498,6 @@ PADDLE_API {self.gene_return_type_code()} {self.get_api_func_name() + '_'}({self raise ValueError( f"{self.api} : Param of infer_meta error : {self.inputs['input_info'][param]} type is not supported." ) - elif param in kernel_output_names: - meta_tensor_code = meta_tensor_code + code_indent + " phi::MetaTensor " + param.replace( - 'kernel_', PREFIX_META_TENSOR_NAME) + "(" + param + ");\n" - param_code = param_code + "&" + param.replace( - 'kernel_', PREFIX_META_TENSOR_NAME) + ", " elif param in attr_names: param_code = param_code + param + ", " elif isinstance(param, str): @@ -514,6 +507,23 @@ PADDLE_API {self.gene_return_type_code()} {self.get_api_func_name() + '_'}({self else: param_code = param_code + str(param) + ", " + for i, out_name in enumerate(kernel_output_names): + if self.outputs['types'][i] == 'std::vector': + meta_tensor_code = meta_tensor_code + f""" +{code_indent} auto {out_name}_{PREFIX_META_TENSOR_NAME}vec = MakeMetaTensor({out_name}); +{code_indent} std::vector {out_name}_metas({out_name}_{PREFIX_META_TENSOR_NAME}vec.size()); +{code_indent} for (size_t i = 0; i < {out_name}_{PREFIX_META_TENSOR_NAME}vec.size(); ++i) {{ +{code_indent} {out_name}_metas[i] = &{out_name}_{PREFIX_META_TENSOR_NAME}vec[i]; +{code_indent} }}""" + + param_code = param_code + out_name + '_metas, ' + else: + meta_tensor_code = meta_tensor_code + code_indent + " phi::MetaTensor " + out_name.replace( + 'kernel_', + PREFIX_META_TENSOR_NAME) + "(" + out_name + ");\n" + param_code = param_code + "&" + out_name.replace( + 'kernel_', PREFIX_META_TENSOR_NAME) + ", " + param_code = param_code[:-2] return f"""{meta_tensor_code} {code_indent} phi::{infer_meta['func']}({param_code}); diff --git a/python/paddle/utils/code_gen/api_gen.py b/python/paddle/utils/code_gen/api_gen.py index 4087b55b51324aba202cb0fc9bc74ceb77f84700..538958c2361bc74b466af6c96b4bddcdcf6e9001 100644 --- a/python/paddle/utils/code_gen/api_gen.py +++ b/python/paddle/utils/code_gen/api_gen.py @@ -91,7 +91,16 @@ class ForwardAPI(BaseAPI): 0]] if inplace_flag and self.inplace_map is not None and self.outputs[ 'names'][0] in self.inplace_map else "" output_create = f""" -{code_indent} {self.outputs['return_type']} api_output{inplace_assign}; +{code_indent} {self.outputs['return_type']} api_output{inplace_assign};""" + + if self.outputs['return_type'] == 'std::vector': + assert self.outputs['out_size_expr'] is not None, \ + f"{api_name}: The out size expr : '{{expr}}' should be set when output has Tensor[]. You can refer 'split' api." + output_create = output_create + f""" +{code_indent} auto kernel_out = {set_out_func}({self.outputs['out_size_expr']}, kernel_backend, &api_output);""" + + else: + output_create = output_create + f""" {code_indent} auto kernel_out = {set_out_func}(kernel_backend, &api_output);""" if not inplace_flag and self.view_map is not None and self.outputs[ @@ -113,7 +122,14 @@ class ForwardAPI(BaseAPI): output_create = output_create + f""" {code_indent} std::get<{i}>(api_output) = {self.inplace_map[self.outputs['names'][i]]};""" - output_create = output_create + f""" + if output_type_list[i] == 'std::vector': + assert self.outputs['out_size_expr'][i] is not None, \ + f"{api_name}: The out size expr : '{{expr}}' should be set when output has Tensor[]. You can refer 'split' api." + output_create = output_create + f""" +{code_indent} auto kernel_out_{i} = {set_out_func}({self.outputs['out_size_expr'][i]}, kernel_backend, &std::get<{i}>(api_output));""" + + else: + output_create = output_create + f""" {code_indent} auto kernel_out_{i} = {set_out_func}(kernel_backend, &std::get<{i}>(api_output));""" if not inplace_flag and self.view_map is not None and self.outputs[ diff --git a/python/paddle/utils/code_gen/backward.yaml b/python/paddle/utils/code_gen/backward.yaml index 55efb61e73f9f5879be59a9b76ef87321eaa5894..59ad29db61fddd1485cd58fadfd8ce00319f2ef4 100644 --- a/python/paddle/utils/code_gen/backward.yaml +++ b/python/paddle/utils/code_gen/backward.yaml @@ -57,7 +57,7 @@ - backward_api : add_n_grad forward : add_n (Tensor[] x) -> Tensor(out) args : (Tensor[] x, Tensor out_grad) - output : Tensor[](x_grad) + output : Tensor[](x_grad){x.size()} invoke : add_n_grad_impl(x, out_grad) no_need_buffer : x @@ -238,8 +238,12 @@ - backward_api : concat_grad forward : concat (Tensor[] x, Scalar axis) -> Tensor(out) args : (Tensor[] x, Tensor out_grad, Scalar axis = 0) - output : Tensor[](x_grad) - invoke : concat_grad_impl(x, out_grad, axis) + output : Tensor[](x_grad){x.size()} + infer_meta : + func : UnchangedMultiInferMeta + param : [x] + kernel : + func : concat_grad no_need_buffer : x - backward_api : conj_grad @@ -1018,8 +1022,11 @@ - backward_api : meshgrid_grad forward : meshgrid (Tensor[] inputs) -> Tensor[](outputs) args : (Tensor[] inputs, Tensor[] outputs_grad) - output : Tensor[](inputs_grad) - invoke : meshgrid_grad_impl(inputs, outputs_grad) + output : Tensor[](inputs_grad){inputs.size()} + infer_meta : + func : MeshgridGradInferMeta + kernel : + func : meshgrid_grad - backward_api : min_grad forward: min (Tensor x, int64_t[] dims={}, bool keep_dim=false) -> Tensor(out) @@ -1075,14 +1082,22 @@ - backward_api : multi_dot_grad forward : multi_dot (Tensor[] x) -> Tensor(out) args : (Tensor[] x, Tensor out_grad) - output : Tensor[](x_grad) - invoke : multi_dot_grad_impl(x, out_grad) + output : Tensor[](x_grad) {x.size()} + infer_meta : + func : MultiDotGradInferMeta + kernel : + func : multi_dot_grad - backward_api : multiplex_grad forward : multiplex (Tensor[] ins, Tensor ids) -> Tensor(out) args : (Tensor[] ins, Tensor ids, Tensor out_grad) - output : Tensor[](ins_grad) - invoke : multiplex_grad_impl(ins, ids, out_grad) + output : Tensor[](ins_grad){ins.size()} + infer_meta : + func : MultiplexGradInferMeta + param : [ids, out_grad] + kernel : + func : multiplex_grad + param : [ids, out_grad] - backward_api : multiply_double_grad forward : multiply_grad (Tensor x, Tensor y, Tensor grad_out, int axis = -1) -> Tensor(grad_x), Tensor(grad_y) @@ -1581,8 +1596,13 @@ - backward_api : stack_grad forward : stack (Tensor[] x, int axis) -> Tensor(out) args : (Tensor[] x, Tensor out_grad, int axis) - output : Tensor[](x_grad) - invoke : stack_grad_impl(x, out_grad, axis) + output : Tensor[](x_grad){x.size()} + infer_meta : + func : StackGradInferMeta + param: [out_grad, axis] + kernel : + func : stack_grad + param : [out_grad, axis] no_need_buffer : x - backward_api : strided_slice_grad diff --git a/python/paddle/utils/code_gen/backward_api_gen.py b/python/paddle/utils/code_gen/backward_api_gen.py index 46aa3e7e23d514a15ec2a4af05c9654876815b65..a88339c607c555d35e953b051f153e0796c2f913 100644 --- a/python/paddle/utils/code_gen/backward_api_gen.py +++ b/python/paddle/utils/code_gen/backward_api_gen.py @@ -35,7 +35,7 @@ class BackwardAPI(BaseAPI): r"(?P[a-z][a-z0-9_]+)\s*(?P\([^\)]+\))\s*->\s*(?P.+)", forward_config) api = result.group('api') - _, outputs, _ = self.parse_output(self.api, result.group('outputs')) + _, outputs, _, _ = self.parse_output(self.api, result.group('outputs')) outputs = [item.split('@')[0] for item in outputs] fw_inputs, fw_attrs, _, = self.parse_input_and_attr( api, result.group('args')) @@ -110,7 +110,16 @@ class BackwardAPI(BaseAPI): 0]] if inplace_flag and self.inplace_map is not None and self.outputs[ 'names'][0] in self.inplace_map else "" output_create = f""" -{code_indent} {self.outputs['return_type']} api_output{inplace_assign}; +{code_indent} {self.outputs['return_type']} api_output{inplace_assign};""" + + if output_type_list[0] == 'std::vector': + assert self.outputs['out_size_expr'] is not None, \ + f"{api_name}: The out size expr : '{{expr}}' should be set when output has Tensor[]. You can refer 'split' api." + output_create = output_create + f""" +{code_indent} auto kernel_out = {set_out_func}({self.outputs['out_size_expr']}, kernel_backend, &api_output);""" + + else: + output_create = output_create + f""" {code_indent} auto kernel_out = {set_out_func}(kernel_backend, &api_output);""" elif len(output_type_list) > 1: @@ -121,7 +130,6 @@ class BackwardAPI(BaseAPI): kernel_output = kernel_output + f'kernel_out_{i}, ' output_names.append(f'kernel_out_{i}') if out_type_item == 'Tensor': - get_out_code = f'&api_output[{i}][0]' if inplace_flag and self.inplace_map is not None and self.outputs[ 'names'][i] in self.inplace_map: output_create = output_create + f""" @@ -131,6 +139,9 @@ class BackwardAPI(BaseAPI): output_create = output_create + f""" {code_indent} api_output[{i}].emplace_back();""" + output_create = output_create + f""" +{code_indent} auto kernel_out_{i} = {set_out_func}(kernel_backend, &api_output[{i}][0]);""" + else: get_out_code = f'&api_output[{i}]' if inplace_flag and self.inplace_map is not None and self.outputs[ @@ -138,8 +149,10 @@ class BackwardAPI(BaseAPI): output_create = output_create + f""" {code_indent} api_output[{i}] = {self.inplace_map[self.outputs['names'][i]]};""" - output_create = output_create + f""" -{code_indent} auto kernel_out_{i} = {set_out_func}(kernel_backend, {get_out_code});""" + assert self.outputs['out_size_expr'][i] is not None, \ + f"{api_name}: The out size expr : '{{expr}}' should be set when output has Tensor[]. You can refer 'split' api." + output_create = output_create + f""" +{code_indent} auto kernel_out_{i} = {set_out_func}({self.outputs['out_size_expr'][i]}, kernel_backend, &api_output[{i}]);""" kernel_output = kernel_output[:-2] else: