diff --git a/paddle/fluid/framework/data_transform.cc b/paddle/fluid/framework/data_transform.cc index 7ac708ac9b0b1d87885f9cd2e90d3978ad7937a5..2e7a692145cb740a6793647d7ffc35c10fdcf696 100644 --- a/paddle/fluid/framework/data_transform.cc +++ b/paddle/fluid/framework/data_transform.cc @@ -161,7 +161,7 @@ phi::GetKernelTypeForVarContext BuildGetKernelTypeForVarContext( const AttributeMap &fluid_attrs, phi::AttributeMap *phi_attrs, bool has_infer_varkernel_fn) { - // According to "GetKernelTypeForVar" in some ops those have MKLDNN codes, + // According to "GetKernelTypeForVar" in some ops executed with oneDNN, // the only "string" member, such as "data_layout" 、"data_format" of // AttibuteMap is useful. In the future the other args maybe used. Because the // "phi" module should not depend on the "fluid", transform diff --git a/paddle/fluid/operators/CMakeLists.txt b/paddle/fluid/operators/CMakeLists.txt index 674c3e747c15aa8abb95a09f7c26abbe53406d97..39d3515899c6d4556a34b72e7817af043d43ff29 100644 --- a/paddle/fluid/operators/CMakeLists.txt +++ b/paddle/fluid/operators/CMakeLists.txt @@ -94,7 +94,7 @@ if(WITH_UNITY_BUILD) include(unity_build_rule.cmake) endif() -set(OP_HEADER_DEPS ${OP_HEADER_DEPS} phi phi_utils backward_infermeta sparse_backward_infermeta static_prim_api) +set(OP_HEADER_DEPS ${OP_HEADER_DEPS} phi phi_utils backward_infermeta sparse_backward_infermeta static_prim_api get_expected_kernel_func) register_operators(EXCLUDES py_func_op warpctc_op dgc_op generated_op1 generated_op2 generated_op3 generated_op4 load_combine_op lstm_op run_program_op eye_op quantize_linear_op recurrent_op save_combine_op sparse_attention_op sync_batch_norm_op ${OP_MKL_DEPS} DEPS ${OP_HEADER_DEPS}) diff --git a/paddle/fluid/operators/generator/CMakeLists.txt b/paddle/fluid/operators/generator/CMakeLists.txt index bd30ce2fb10242f79c6d2da12a07e5ee25a9ae6d..9a98d03a654dfb32bddf8fc0a57756b9ba15b882 100644 --- a/paddle/fluid/operators/generator/CMakeLists.txt +++ b/paddle/fluid/operators/generator/CMakeLists.txt @@ -243,3 +243,9 @@ file(APPEND ${op_utils_header} register_op_utils(op_compat_infos DEPS op_utils) copy_if_different(${op_utils_header} ${op_utils_header_final}) + +# add special GetExpectedKernelType +cc_library( + get_expected_kernel_func + SRCS get_expected_kernel_func.cc + DEPS operator) diff --git a/paddle/fluid/operators/generator/generate_op.py b/paddle/fluid/operators/generator/generate_op.py index 2dbe91a7db44513c39d555c5c9963b7aa2871ab3..4aceb61a588310daa8082d8a88716d6274cd7555 100644 --- a/paddle/fluid/operators/generator/generate_op.py +++ b/paddle/fluid/operators/generator/generate_op.py @@ -101,18 +101,12 @@ def process_scalar(op_item, scalar_configs): and scalar_config['support_tensor'] else False ) - if attr_item['is_support_tensor']: - attr_item['typename'] = ( - scalar_config['data_type'] - if 'data_type' in scalar_config - else scalar_map[attr_type] - ) - else: - attr_item['data_type'] = ( - scalar_config['data_type'] - if 'data_type' in scalar_config - else scalar_map[attr_type] - ) + attr_item['data_type'] = ( + scalar_config['data_type'] + if 'data_type' in scalar_config + else scalar_map[attr_type] + ) + if attr_item['is_support_tensor'] is False: attr_item['tensor_name'] = scalar_config['tensor_name'] @@ -136,19 +130,12 @@ def process_int_array(op_item, int_array_configs): and int_array_config['support_tensor'] else False ) - if attr_item['is_support_tensor']: - attr_item['typename'] = ( - 'int[]' - if 'data_type' in int_array_config - and int_array_config['data_type'] == 'int' - else 'int64_t[]' - ) - else: - attr_item['data_type'] = ( - data_type_map[int_array_config['data_type']] - if 'data_type' in int_array_config - else 'std::vector' - ) + attr_item['data_type'] = ( + data_type_map[int_array_config['data_type']] + if 'data_type' in int_array_config + else 'std::vector' + ) + if attr_item['is_support_tensor'] is False: attr_item['manual_flag'] = True if 'tensor_name' in int_array_config: attr_item['tensor_name'] = int_array_config[ @@ -460,16 +447,16 @@ def process_invoke_op(forward_op_dict, backward_op_dict): def parse_drop_empty_grad(op_fluid_list: list, bw_op_dict: dict): - for op_op in op_fluid_list: - if 'drop_empty_grad' in op_op: + for op_comp_map in op_fluid_list: + if 'drop_empty_grad' in op_comp_map: bw_names = [ bw_name.split('(')[0].strip() - for bw_name in op_op['backward'].split(',') + for bw_name in op_comp_map['backward'].split(',') ] for bw_name in bw_names: # static_ops.yaml and ops.yaml use the common op_compat.yaml if bw_name in bw_op_dict: - for out_grad in op_op['drop_empty_grad']: + for out_grad in op_comp_map['drop_empty_grad']: assert ( out_grad in bw_op_dict[bw_name]['output_dict'] ), f''' @@ -479,6 +466,45 @@ def parse_drop_empty_grad(op_fluid_list: list, bw_op_dict: dict): ] = False +def parse_get_expected_kerneltype( + op_fluid_list: list, fw_op_dict: dict, bw_op_dict: dict +): + for op_comp_map in op_fluid_list: + if 'get_expected_kernel_type' in op_comp_map: + fw_name = op_comp_map['op'].split('(')[0].strip() + if fw_name in op_comp_map['get_expected_kernel_type']: + # static_ops.yaml and ops.yaml use the common op_compat.yaml + if fw_name in fw_op_dict: + fw_op_dict[fw_name][ + "get_expected_kernel_type" + ] = op_comp_map['get_expected_kernel_type'][fw_name] + bw_names = [ + bw_name.split('(')[0].strip() + for bw_name in op_comp_map['backward'].split(',') + ] + for bw_name in bw_names: + # static_ops.yaml and ops.yaml use the common op_compat.yaml + if ( + bw_name in bw_op_dict + and bw_name in op_comp_map['get_expected_kernel_type'] + ): + bw_op_dict[bw_name][ + "get_expected_kernel_type" + ] = op_comp_map['get_expected_kernel_type'][bw_name] + + +def parse_keep_signature( + op_fluid_list: list, fw_op_dict: dict, bw_op_dict: dict +): + for op_comp_map in op_fluid_list: + if 'manual_signature' in op_comp_map: + for op_name in op_comp_map['manual_signature']: + if op_name in fw_op_dict: + fw_op_dict[op_name]["manual_signature"] = True + elif op_name in bw_op_dict: + bw_op_dict[op_name]["manual_signature"] = True + + def split_ops_list(ops, backward_op_dict, split_num): new_ops_list = [] new_bw_ops_list = [] @@ -547,6 +573,12 @@ def main( # deal the drop_empty_grad of bw_op by op_compat.yaml parse_drop_empty_grad(op_fluid_map_list, backward_op_dict) + parse_get_expected_kerneltype( + op_fluid_map_list, forward_op_dict, backward_op_dict + ) + + parse_keep_signature(op_fluid_map_list, forward_op_dict, backward_op_dict) + add_composite_info(ops, backward_ops, backward_op_dict) add_compat_name(op_fluid_map_list, forward_op_dict, backward_op_dict) diff --git a/paddle/fluid/operators/generator/get_expected_kernel_func.cc b/paddle/fluid/operators/generator/get_expected_kernel_func.cc new file mode 100644 index 0000000000000000000000000000000000000000..917bf23bb5e5bffd5e9b89c54067c309b6c37b6c --- /dev/null +++ b/paddle/fluid/operators/generator/get_expected_kernel_func.cc @@ -0,0 +1,106 @@ +/* Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include +#include +#include +#include + +#include "paddle/fluid/operators/generator/get_expected_kernel_func.h" + +#include "paddle/fluid/framework/convert_utils.h" +#include "paddle/fluid/framework/phi_utils.h" +#include "paddle/fluid/framework/tensor_util.h" +namespace paddle { +namespace operators { + +// oneDNN's reduction kernel is optimized only for reducing throughout the +// most outer dims, so in case of another type of reduction, it would be +// better to fallback to native implementation +static bool ReduceOpHasOptimizedOneDNNKernel( + const framework::ExecutionContext& ctx) { + // native reduce kernels don't support bf16 + // so oneDNN kernel is enforced in that case + if (ctx.Input("X")->dtype() == + experimental::DataType::BFLOAT16) + return true; + + if (!ctx.HasAttr("dim") || !ctx.HasAttr("reduce_all")) { + return false; + } + + auto reduce_dims = ctx.Attr>("dim"); + const bool reduce_all = ctx.Attr("reduce_all"); + int ndims = ctx.Input("X")->dims().size(); + + if (reduce_all) { + return true; + } + + for (size_t i = 0; i < reduce_dims.size(); ++i) { + if (reduce_dims[i] < 0) reduce_dims[i] = ndims + reduce_dims[i]; + } + sort(reduce_dims.begin(), reduce_dims.end()); + for (size_t i = 0; i < reduce_dims.size(); ++i) { + if (reduce_dims[reduce_dims.size() - i - 1] != + static_cast(ndims - i - 1)) { + return false; + } + } + + return true; +} + +phi::KernelKey GetReduceExpectedKernelType( + const framework::ExecutionContext& ctx, + const framework::OperatorWithKernel* op_ptr) { + // choose cudnn kernel if the runtime supported. + auto input_data_type = op_ptr->IndicateVarDataType(ctx, "X"); + + if (ctx.Input("X")->dims().size() > 5 || + !ReduceOpHasOptimizedOneDNNKernel(ctx)) { + op_ptr->SetDnnFallback(true); + } + + if (input_data_type == framework::proto::VarType::FP16) { + PADDLE_ENFORCE_EQ( + platform::is_gpu_place(ctx.GetPlace()) || + platform::is_npu_place(ctx.GetPlace()) || + platform::is_mlu_place(ctx.GetPlace()) || + platform::is_xpu_place(ctx.GetPlace()) || + platform::is_custom_place(ctx.GetPlace()), + true, + platform::errors::InvalidArgument( + "float16 can only be used on GPU or NPU or MLU or XPU place")); + } + return phi::KernelKey(input_data_type, ctx.GetPlace()); +} + +phi::KernelKey GetReduceGradExpectedKernelType( + const framework::ExecutionContext& ctx, + const framework::OperatorWithKernel* op_ptr) { + int out_dtype = ctx.Attr("out_dtype"); + auto input_data_type = + (out_dtype >= 0) + ? static_cast(out_dtype) + : op_ptr->IndicateVarDataType(ctx, framework::GradVarName("Out")); + if (ctx.Input("X")->dims().size() > 5) { + op_ptr->SetDnnFallback(true); + } + + return phi::KernelKey(input_data_type, ctx.GetPlace()); +} + +} // namespace operators +} // namespace paddle diff --git a/paddle/fluid/operators/generator/get_expected_kernel_func.h b/paddle/fluid/operators/generator/get_expected_kernel_func.h new file mode 100644 index 0000000000000000000000000000000000000000..2054d593fb33608a07602464206fb62321bbc75e --- /dev/null +++ b/paddle/fluid/operators/generator/get_expected_kernel_func.h @@ -0,0 +1,32 @@ +/* Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +#include "paddle/fluid/framework/operator.h" +#include "paddle/phi/core/kernel_factory.h" + +namespace paddle { +namespace operators { + +phi::KernelKey GetReduceExpectedKernelType( + const framework::ExecutionContext& ctx, + const framework::OperatorWithKernel* op_ptr); + +phi::KernelKey GetReduceGradExpectedKernelType( + const framework::ExecutionContext& ctx, + const framework::OperatorWithKernel* op_ptr); + +} // namespace operators +} // namespace paddle diff --git a/paddle/fluid/operators/generator/templates/ks.c.j2 b/paddle/fluid/operators/generator/templates/ks.c.j2 index ad640a911c773bc7d3e2a0e6ff1727386d5ae667..96c78810e0b41e043621b2a05d324a53fa2a19e3 100644 --- a/paddle/fluid/operators/generator/templates/ks.c.j2 +++ b/paddle/fluid/operators/generator/templates/ks.c.j2 @@ -6,22 +6,28 @@ namespace phi { {% for op in ops %} - {% if op is base_op %} + {% if "manual_signature" not in op %} + {% if op is base_op %} {{name_map(op)}} + {% endif %} {% endif %} {% endfor %} {% for op in backward_ops %} - {% if op is base_op %} + {% if "manual_signature" not in op %} + {% if op is base_op %} {{name_map(op)}} + {% endif %} {% endif %} {% endfor %} } // namespace phi {% for op in ops + backward_ops %} - {% if op["name"] != op["op_name"] %} + {% if "manual_signature" not in op %} + {% if op["name"] != op["op_name"] %} {{register_base_kernel_name(op)}} - {% endif %} - {% if op is base_op %} + {% endif %} + {% if op is base_op %} {{register_name_map(op)}} + {% endif %} {% endif %} {% endfor %} diff --git a/paddle/fluid/operators/generator/templates/op.c.j2 b/paddle/fluid/operators/generator/templates/op.c.j2 index b39bdc663fdd863a5290a1147269ae18c6661abb..feeb1dee169fff13964cd46151bed3316e908948 100644 --- a/paddle/fluid/operators/generator/templates/op.c.j2 +++ b/paddle/fluid/operators/generator/templates/op.c.j2 @@ -8,6 +8,7 @@ #include "paddle/fluid/prim/api/composite_backward/composite_backward_api.h" #include "paddle/fluid/prim/utils/static/composite_grad_desc_maker.h" #include "paddle/fluid/prim/utils/static/desc_tensor.h" +#include "paddle/fluid/operators/generator/get_expected_kernel_func.h" #include "paddle/phi/core/infermeta_utils.h" #include "paddle/phi/infermeta/backward.h" #include "paddle/phi/infermeta/binary.h" diff --git a/paddle/fluid/operators/generator/templates/operator_utils.c.j2 b/paddle/fluid/operators/generator/templates/operator_utils.c.j2 index 2fb14e7187a92705cb5b2391879826233da5ce64..33233bd77a141d15a75c2377baada2ca18d00d5c 100644 --- a/paddle/fluid/operators/generator/templates/operator_utils.c.j2 +++ b/paddle/fluid/operators/generator/templates/operator_utils.c.j2 @@ -68,11 +68,13 @@ AddOutput({{name | to_opmaker_name}}, "({{typename}}), output {{i}} of {{op_name {% macro add_attr(i, attr, op_name) %}{# inline #} {% set name = attr["fluid_name"] %} {% set typename = attr["typename"] %} - {% if typename is scalar %} + {% if typename is scalar and + ("is_support_tensor" not in attr or attr["is_support_tensor"] is false) %} AddInput("{{attr | to_scalar_tensor_name}}", "attribute {{i}} for {{op_name}} op from 0D Tensor.") .AsDispensable(); AddAttr<{{attr["data_type"]}}>("{{name}}", "({{attr["data_type"]}}), attribute {{i}} for {{op_name}} op.") - {% elif typename == "IntArray" %}{# the type has been renamed #} + {% elif typename == "IntArray" and + ("is_support_tensor" not in attr or attr["is_support_tensor"] is false) %}{# the type has been renamed #} {% if 'tensor_name' in attr or 'manual_flag' not in attr %} AddInput("{{attr | to_int_array_tensor_name}}", "attribute {{i}} for {{op_name}} op from 1D integer Tensor.") .AsDispensable(); @@ -82,6 +84,8 @@ AddInput("{{attr | to_int_array_tensors_name}}", "attribute {{i}} for {{op_name} .AsDuplicable() .AsDispensable(); {% endif %} + AddAttr<{{attr["data_type"]}}>("{{name}}", "({{attr["data_type"]}}), attribute {{i}} for {{op_name}} op.") + {% elif "is_support_tensor" in attr and attr["is_support_tensor"] %} AddAttr<{{attr["data_type"]}}>("{{name}}", "({{attr["data_type"]}}), attribute {{i}} for {{op_name}} op.") {% else %} AddAttr<{{typename | to_op_attr_type}}>("{{name}}", "({{typename | to_op_attr_type}}), attribute {{i}} for {{op_name}} op.") @@ -221,9 +225,11 @@ paddle::small_vector inputs { {% set typename = attr["typename"] %} {%- if attr["fluid_name"] in kernel_args %} {% set name = attr["fluid_name"] %} -{% if typename is scalar %}{# scalar correspond to a dispensable input and an attr in opmaker #} +{% if typename is scalar and +("is_support_tensor" not in attr or attr["is_support_tensor"] is false) %}{# scalar correspond to a dispensable input and an attr in opmaker #} attrs.emplace_back(ctx.HasInput("{{attr | to_scalar_tensor_name}}") ? "{{attr | to_scalar_tensor_name}}" : "{{name}}"); -{%- elif typename == "IntArray" %} +{%- elif typename == "IntArray" and +("is_support_tensor" not in attr or attr["is_support_tensor"] is false) %} {% if 'tensor_name' in attr and 'tensors_name' not in attr %} attrs.emplace_back( ctx.HasInput("{{attr | to_int_array_tensor_name}}") @@ -260,6 +266,7 @@ paddle::small_vector outputs { {% set kernel = op["kernel"] %} phi::KernelKey GetExpectedKernelType( const framework::ExecutionContext& ctx) const override { +{% if "get_expected_kernel_type" not in op %} phi::KernelKey kt; {%if kernel["data_type"] is not none %}{# data type ---------------------------------#} {% if kernel["data_type"]["candidates"] | length == 1 %} @@ -303,6 +310,9 @@ phi::KernelKey GetExpectedKernelType( } {% endif %} return kt; +{% else %} + return {{op["get_expected_kernel_type"]}}(ctx, this); +{% endif %} } {% endmacro -%} @@ -360,10 +370,12 @@ class {{op["op_name"] | to_pascal_case}}Op : public framework::OperatorWithKerne {% set kernel = op["kernel"] %} {% if kernel["data_type"] is not none or kernel["backend"] is not none or kernel["force_backend"] is not none - or "complex_promote" in op or "data_transform" in op %} + or "complex_promote" in op or "data_transform" in op + or "get_expected_kernel_type" in op%} protected: {% if kernel["data_type"] is not none or kernel["backend"] is not none - or kernel["force_backend"] is not none or "complex_promote" in op %} + or kernel["force_backend"] is not none or "complex_promote" in op + or "get_expected_kernel_type" in op%} {% filter indent(2, True)%} {{get_expected_kernel(op)}} {% endfilter %} @@ -523,7 +535,8 @@ class {{name | to_pascal_case}}OpMaker : public framework::SingleGradOpMaker grad_op->SetInput("{{attr | to_int_array_tensors_name}}", this->Input("{{attr | to_int_array_tensors_name}}")); } {% endif %} - {% elif attr["typename"] == "Scalar" %} + {% elif attr["typename"] is scalar and + ("is_support_tensor" not in attr or attr["is_support_tensor"] is false)%} if (this->HasInput("{{attr | to_scalar_tensor_name}}")) { grad_op->SetInput("{{attr | to_scalar_tensor_name}}", this->Input("{{attr | to_scalar_tensor_name}}")); } @@ -589,7 +602,8 @@ class {{name | to_pascal_case}}OpMaker : public framework::SingleGradOpMaker grad_op->SetInput("{{fw_attr | to_int_array_tensors_name}}", this->Input("{{fw_attr | to_int_array_tensors_name}}")); } {% endif %} - {% elif fw_attr["typename"] == "Scalar" %} + {% elif fw_attr["typename"] is scalar and + ("is_support_tensor" not in attr or attr["is_support_tensor"] is false) %} if (this->HasInput("{{fw_attr | to_scalar_tensor_name}}")) { grad_op->SetInput("{{fw_attr | to_scalar_tensor_name}}", this->Input("{{fw_attr | to_scalar_tensor_name}}")); } diff --git a/paddle/fluid/operators/reduce_ops/frobenius_norm_op.cc b/paddle/fluid/operators/reduce_ops/frobenius_norm_op.cc deleted file mode 100644 index 7fba45fa53923e64517f623f8ce4a48bb1be8772..0000000000000000000000000000000000000000 --- a/paddle/fluid/operators/reduce_ops/frobenius_norm_op.cc +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include - -#include "paddle/fluid/framework/infershape_utils.h" -#include "paddle/fluid/framework/op_registry.h" -#include "paddle/fluid/operators/reduce_ops/reduce_op.h" -#include "paddle/phi/core/infermeta_utils.h" -#include "paddle/phi/infermeta/unary.h" - -namespace paddle { -namespace framework { -class OpDesc; -} // namespace framework -namespace imperative { -class OpBase; -} // namespace imperative -} // namespace paddle - -namespace paddle { -namespace operators { - -template -class FrobeniusNormOpGradMaker : public framework::SingleGradOpMaker { - public: - using framework::SingleGradOpMaker::SingleGradOpMaker; - - protected: - void Apply(GradOpPtr op) const override { - op->SetType("frobenius_norm_grad"); - op->SetInput("X", this->Input("X")); - op->SetInput("Out", this->Output("Out")); - op->SetInput(framework::GradVarName("Out"), this->OutputGrad("Out")); - op->SetOutput(framework::GradVarName("X"), this->InputGrad("X")); - op->SetAttrMap(this->Attrs()); - } -}; - -} // namespace operators -} // namespace paddle - -class FrobeniusNormOpMaker : public ops::ReduceOpMaker { - protected: - virtual std::string GetName() const { return "frobenius_norm"; } - virtual std::string GetOpType() const { return "Reduce frobenius_norm"; } -}; - -DECLARE_INFER_SHAPE_FUNCTOR(frobenius_norm, - FrobeniusNormInferShapeFunctor, - PD_INFER_META(phi::ReduceInferMetaBase)); - -REGISTER_OPERATOR(frobenius_norm, - ops::ReduceOp, - FrobeniusNormOpMaker, - ops::FrobeniusNormOpGradMaker, - ops::FrobeniusNormOpGradMaker, - FrobeniusNormInferShapeFunctor); - -REGISTER_OPERATOR(frobenius_norm_grad, ops::ReduceGradOp); diff --git a/paddle/fluid/operators/reduce_ops/reduce_all_op.cc b/paddle/fluid/operators/reduce_ops/reduce_all_op.cc deleted file mode 100644 index f0de94666357e3423b541bed9c0d451f3dd564a3..0000000000000000000000000000000000000000 --- a/paddle/fluid/operators/reduce_ops/reduce_all_op.cc +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "paddle/fluid/operators/reduce_ops/reduce_all_op.h" - -#include "paddle/fluid/framework/infershape_utils.h" -#include "paddle/phi/core/infermeta_utils.h" -#include "paddle/phi/infermeta/unary.h" - -namespace paddle { -namespace framework { -class OpDesc; -template -class EmptyGradOpMaker; -} // namespace framework -namespace imperative { -class OpBase; -} // namespace imperative -} // namespace paddle - -DECLARE_INFER_SHAPE_FUNCTOR(reduce_all, - ReduceAllInferShapeFunctor, - PD_INFER_META(phi::ReduceInferMetaBase)); -class ReduceAllOpMaker : public ops::ReduceOpMaker { - protected: - virtual std::string GetName() const { return "reduce_all"; } - virtual std::string GetOpType() const { return "Reduce reduce_all"; } -}; -// kernel's device type is decided by input tensor place, to be consistent with -// compare and logical ops -REGISTER_OPERATOR( - reduce_all, - ops::ReduceOpUseInputPlace, - ReduceAllOpMaker, - paddle::framework::EmptyGradOpMaker, - paddle::framework::EmptyGradOpMaker, - ReduceAllInferShapeFunctor); diff --git a/paddle/fluid/operators/reduce_ops/reduce_all_op.h b/paddle/fluid/operators/reduce_ops/reduce_all_op.h deleted file mode 100644 index ba159dd703c8904784546eda262bf7be77967d48..0000000000000000000000000000000000000000 --- a/paddle/fluid/operators/reduce_ops/reduce_all_op.h +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -#pragma once - -#include "paddle/fluid/operators/reduce_ops/reduce_op.h" - -namespace paddle { -namespace operators { - -struct AllFunctor { - template - void operator()(const DeviceContext& place, X* x, Y* y, const Dim& dim) { - y->device(place) = x->all(dim); - } -}; - -} // namespace operators -} // namespace paddle diff --git a/paddle/phi/api/yaml/op_compat.yaml b/paddle/phi/api/yaml/op_compat.yaml index d5b834404b93d823a35936b8bf4585f24cbf4eb0..4d79b56c45f598a445d77618402a3a0b74b7d842 100644 --- a/paddle/phi/api/yaml/op_compat.yaml +++ b/paddle/phi/api/yaml/op_compat.yaml @@ -60,6 +60,21 @@ extra : attrs : [bool use_cudnn = true] +- op : all (reduce_all) + inputs: + x : X + attrs: + { axis : dim, keepdim : keep_dim} + outputs: + out : Out + int_array: + axis : + data_type : int + support_tensor : true + manual_signature : [all] + extra : + attrs : [bool use_mkldnn = false] + - op : allclose inputs : {x : Input, y : Other} @@ -705,6 +720,19 @@ - op : frobenius_norm backward : frobenius_norm_grad + inputs: + x : X + attrs: + { axis : dim, keepdim : keep_dim} + outputs: + out : Out + int_array: + axis : + data_type : int + support_tensor : true + get_expected_kernel_type : + frobenius_norm : GetReduceExpectedKernelType + frobenius_norm_grad : GetReduceGradExpectedKernelType extra : attrs : [bool use_mkldnn = false] @@ -1363,10 +1391,6 @@ extra : attrs : [bool use_mkldnn = false, bool use_cudnn = false] -- op : reduce_all - extra : - attrs : [bool use_mkldnn = false] - - op : reduce_amax backward : reduce_amax_grad extra : diff --git a/paddle/phi/api/yaml/static_backward.yaml b/paddle/phi/api/yaml/static_backward.yaml index 8c9cbb6cfb35c7b10be54cb603193de240c7f64f..3599b7064cadbe2aa6e5d4b4ac506f32863e4875 100644 --- a/paddle/phi/api/yaml/static_backward.yaml +++ b/paddle/phi/api/yaml/static_backward.yaml @@ -1 +1,12 @@ # This file is to support those static ops different the dynamic. + +- backward_op : frobenius_norm_grad + forward: frobenius_norm (Tensor x, IntArray axis={0}, bool keepdim=false, bool reduce_all=false, int in_dtype=-1, int out_dtype=-1) -> Tensor(out) + args : (Tensor x, Tensor out, Tensor out_grad, IntArray axis={0}, bool keepdim=false, bool reduce_all=false, int in_dtype=-1, int out_dtype=-1) + output : Tensor(x_grad) + infer_meta : + func : UnchangedInferMeta + param : [x] + kernel : + func : frobenius_norm_grad + param : [x, out, out_grad, axis, keepdim, reduce_all] diff --git a/paddle/phi/api/yaml/static_ops.yaml b/paddle/phi/api/yaml/static_ops.yaml index 19ae9f6dddcb6faa945e9f497e97387fe70cb69c..8fa782e18635f7896a062ba262ecc7d0058e4c63 100644 --- a/paddle/phi/api/yaml/static_ops.yaml +++ b/paddle/phi/api/yaml/static_ops.yaml @@ -1,3 +1,11 @@ +- op : all + args : (Tensor x, IntArray axis={0}, bool keepdim=false, bool reduce_all=false, int in_dtype=-1, int out_dtype=-1) + output : Tensor(out) + infer_meta : + func : ReduceInferMetaBase + kernel : + func : all + - op : embedding_with_eltwise_add_xpu args : (Tensor[] ids, Tensor[] tables, int64_t padding_idx) output: Tensor @@ -29,6 +37,16 @@ data_type : x optional : bias, x_max +- op : frobenius_norm + args : (Tensor x, IntArray axis={0}, bool keepdim=false, bool reduce_all=false, int in_dtype=-1, int out_dtype=-1) + output : Tensor(out) + infer_meta : + func : ReduceInferMetaBase + kernel : + func : frobenius_norm + param : [x, axis, keepdim, reduce_all] + backward : frobenius_norm_grad + - op : generate_sequence_xpu args : (Tensor x, DataType dtype) output : Tensor diff --git a/paddle/phi/core/compat/CMakeLists.txt b/paddle/phi/core/compat/CMakeLists.txt index 3051ae4989222420d7d863996395f35a529ebf69..3234f1004f0e5f931180866cd0b6979a903bf589 100644 --- a/paddle/phi/core/compat/CMakeLists.txt +++ b/paddle/phi/core/compat/CMakeLists.txt @@ -6,7 +6,10 @@ cc_library( op_utils SRCS op_utils.cc DEPS arg_map_context enforce) -cc_library(get_kerneltype_forvar_utils SRCS get_kerneltype_forvar_utils.cc) +cc_library( + get_kerneltype_forvar_utils + SRCS get_kerneltype_forvar_utils.cc + DEPS enforce) set(convert_utils_deps data_type place op_utils phi_backends) diff --git a/paddle/phi/core/compat/get_kerneltype_forvar_utils.cc b/paddle/phi/core/compat/get_kerneltype_forvar_utils.cc index abb483b97240d88d34c4ee9921c653c80c21bf44..7519399e0e4d197c94cae3c7fffa9c60c2349d4f 100644 --- a/paddle/phi/core/compat/get_kerneltype_forvar_utils.cc +++ b/paddle/phi/core/compat/get_kerneltype_forvar_utils.cc @@ -14,17 +14,33 @@ #include "paddle/phi/core/compat/get_kerneltype_forvar_utils.h" +#include "paddle/phi/core/enforce.h" namespace phi { const std::string& GetKernelTypeForVarContext::GetVarName(void) const { + PADDLE_ENFORCE_NE( + var_name_, + nullptr, + errors::InvalidArgument( + "Variablle name is null. The context hasn't been initialized. ")); return *var_name_; } const DenseTensor& GetKernelTypeForVarContext::GetTensor(void) const { + PADDLE_ENFORCE_NE( + tensor_, + nullptr, + errors::InvalidArgument( + "Tensor is null. The context hasn't been initialized. ")); return *tensor_; } const KernelKey& GetKernelTypeForVarContext::GetKernelKey(void) const { + PADDLE_ENFORCE_NE( + kernel_key_, + nullptr, + errors::InvalidArgument( + "Kernel key is null. The context hasn't been initialized. ")); return *kernel_key_; } diff --git a/paddle/phi/core/compat/get_kerneltype_forvar_utils.h b/paddle/phi/core/compat/get_kerneltype_forvar_utils.h index 61ff7b53a1d91a5eb0cd2467ca708bb8393f433b..48348ce43f906ca7b88e64d5b23bc0e4fb8702cb 100644 --- a/paddle/phi/core/compat/get_kerneltype_forvar_utils.h +++ b/paddle/phi/core/compat/get_kerneltype_forvar_utils.h @@ -21,9 +21,9 @@ namespace phi { class KernelKey; class DenseTensor; /** - * Note: GetKernelTypeForVarContext is currently designed to MKLDNN kernel when + * Note: GetKernelTypeForVarContext is currently designed for oneDNN kernel when * the related memeber function 'GetKernelTypeForVar' is special. It is - * possiable to uesed for other custom hardwares in the future. + * possible to leverage to other vendor libraries in the future. */ class GetKernelTypeForVarContext { public: diff --git a/paddle/phi/ops/compat/frobenius_norm_sig.cc b/paddle/phi/ops/compat/frobenius_norm_sig.cc deleted file mode 100644 index 1fb53c36cafb2615b699fd53217b3924be998016..0000000000000000000000000000000000000000 --- a/paddle/phi/ops/compat/frobenius_norm_sig.cc +++ /dev/null @@ -1,37 +0,0 @@ -/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#include "paddle/phi/core/compat/op_utils.h" - -namespace phi { - -KernelSignature FrobeniusNormOpArgumentMapping( - const ArgumentMappingContext& ctx) { - return KernelSignature( - "frobenius_norm", {"X"}, {"dim", "keep_dim", "reduce_all"}, {"Out"}); -} - -KernelSignature FrobeniusNormGradOpArgumentMapping( - const ArgumentMappingContext& ctx) { - return KernelSignature("frobenius_norm_grad", - {"X", "Out", "Out@GRAD"}, - {"dim", "keep_dim", "reduce_all"}, - {"X@GRAD"}); -} - -} // namespace phi - -PD_REGISTER_ARG_MAPPING_FN(frobenius_norm, phi::FrobeniusNormOpArgumentMapping); -PD_REGISTER_ARG_MAPPING_FN(frobenius_norm_grad, - phi::FrobeniusNormGradOpArgumentMapping);