diff --git a/paddle/fluid/operators/reduce_ops/reduce_prod_op.cc b/paddle/fluid/operators/reduce_ops/reduce_prod_op.cc deleted file mode 100644 index 0a9aebbebac7f4b7f60eb7d9c8b1fd04b4aefbab..0000000000000000000000000000000000000000 --- a/paddle/fluid/operators/reduce_ops/reduce_prod_op.cc +++ /dev/null @@ -1,90 +0,0 @@ -// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "paddle/fluid/operators/reduce_ops/reduce_prod_op.h" -#include "paddle/fluid/prim/api/composite_backward/composite_backward_api.h" -#include "paddle/fluid/prim/utils/static/composite_grad_desc_maker.h" -#include "paddle/fluid/prim/utils/static/desc_tensor.h" - -#include "paddle/fluid/framework/infershape_utils.h" -#include "paddle/phi/core/infermeta_utils.h" -#include "paddle/phi/infermeta/unary.h" - -namespace paddle { -namespace framework { -class OpDesc; -} // namespace framework -namespace imperative { -class OpBase; -} // namespace imperative -} // namespace paddle - -namespace paddle { -namespace operators { -class ReduceProdCompositeGradOpMaker : public prim::CompositeGradOpMakerBase { - public: - using prim::CompositeGradOpMakerBase::CompositeGradOpMakerBase; - void Apply() override { - // get inputs - paddle::Tensor x = this->GetSingleForwardInput("X"); - paddle::Tensor out = this->GetSingleForwardOutput("Out"); - paddle::Tensor out_grad = this->GetSingleOutputGrad("Out"); - - // get attr - std::vector axis = this->Attr>("dim"); - bool keep_dim = this->Attr("keep_dim"); - bool reduce_all = this->Attr("reduce_all"); - - // get output - paddle::Tensor x_grad_t = this->GetSingleInputGrad("X"); - - // get output ptr - auto x_grad = this->GetOutputPtr(&x_grad_t); - - // get output orginal name - std::string x_grad_name = this->GetOutputName(x_grad_t); - VLOG(6) << "Runing prod_grad composite func"; - // call composite backward func - prim::prod_grad( - x, out, out_grad, axis, keep_dim, reduce_all, x_grad); - // recover output name - this->RecoverOutputName(x_grad_t, x_grad_name); - } -}; - -} // namespace operators -} // namespace paddle - -namespace ops = paddle::operators; - -class ReduceProdOpMaker : public ops::ReduceBaseOpMaker { - protected: - virtual std::string GetName() const { return "reduce_prod"; } - virtual std::string GetOpType() const { return "Reduce reduce_prod"; } -}; - -DECLARE_INFER_SHAPE_FUNCTOR( - reduce_prod, - ReduceProdInferShapeFunctor, - PD_INFER_META(phi::ReduceIntArrayAxisInferMetaBase)); - -REGISTER_OPERATOR( - reduce_prod, - ops::ReduceBaseOp, - ReduceProdOpMaker, - paddle::framework::DefaultGradOpMaker, - paddle::framework::DefaultGradOpMaker, - ops::ReduceProdCompositeGradOpMaker, - ReduceProdInferShapeFunctor); -REGISTER_OPERATOR(reduce_prod_grad, ops::ReduceGradOp); diff --git a/paddle/fluid/operators/reduce_ops/reduce_prod_op.h b/paddle/fluid/operators/reduce_ops/reduce_prod_op.h deleted file mode 100644 index 8e55f7aecd0f0fbe5e91e80cc062e5b0f6b684da..0000000000000000000000000000000000000000 --- a/paddle/fluid/operators/reduce_ops/reduce_prod_op.h +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#pragma once - -#include "paddle/fluid/operators/reduce_ops/reduce_op.h" - -namespace paddle { -namespace operators { - -struct ProdGradFunctor { - template - void operator()(const DeviceContext& place, - X* x, - Y* y, - DX* dx, - DY* dy, - const Dim& dim, - int size) { - dx->device(place) = dy->broadcast(dim) * y->broadcast(dim) * x->inverse(); - } -}; - -} // namespace operators -} // namespace paddle diff --git a/paddle/fluid/operators/reduce_ops/unity_build_rule.cmake b/paddle/fluid/operators/reduce_ops/unity_build_rule.cmake index e761142c4304c8898eb4f1c2de4cf90cf6becbd4..839bb1ac7306c842760248eea70ec81ca4cbbff9 100644 --- a/paddle/fluid/operators/reduce_ops/unity_build_rule.cmake +++ b/paddle/fluid/operators/reduce_ops/unity_build_rule.cmake @@ -4,9 +4,8 @@ # Generally, the combination rules in this file do not need to be modified. # If there are some redefined error in compiling with the source file which # in combination rule, you can remove the source file from the following rules. -register_unity_group(cc reduce_all_op.cc reduce_any_op.cc reduce_prod_op.cc) -register_unity_group(cu reduce_all_op.cu reduce_any_op.cu reduce_prod_op.cu - reduce_prod_op.part.cu) +register_unity_group(cc reduce_all_op.cc reduce_any_op.cc) +register_unity_group(cu reduce_all_op.cu reduce_any_op.cu) # The following groups are to make better use of `/MP` which MSVC's parallel # compilation instruction when compiling in Unity Build. register_unity_group(cu frobenius_norm_op.cu) diff --git a/paddle/phi/api/yaml/op_compat.yaml b/paddle/phi/api/yaml/op_compat.yaml index 6532ac7ef41d2d0dec8ef33a712486aa9b185a7d..301eb88662ac4110783e8a70ca57fdac38559b6f 100755 --- a/paddle/phi/api/yaml/op_compat.yaml +++ b/paddle/phi/api/yaml/op_compat.yaml @@ -2029,15 +2029,20 @@ backward : prod_grad (reduce_prod_grad) inputs: x : X - attrs: - { dims : dim, keep_dim : keep_dim} outputs: out : Out + attrs: + { dims : dim, keep_dim : keep_dim} int_array: dims : data_type : int + support_tensor : true extra : attrs : [bool use_mkldnn = false] + get_expected_kernel_type : + prod : GetReduceExpectedKernelType + prod_grad : GetReduceGradExpectedKernelType + manual_signature : [prod] - op : put_along_axis backward : put_along_axis_grad diff --git a/paddle/phi/api/yaml/static_backward.yaml b/paddle/phi/api/yaml/static_backward.yaml index d01fe0d07aac0b76027b29cf8cf075befae7d72a..db27958e2842f9737569423474f03cfd203fb7eb 100755 --- a/paddle/phi/api/yaml/static_backward.yaml +++ b/paddle/phi/api/yaml/static_backward.yaml @@ -222,6 +222,17 @@ func : pool3d_grad param : [x, out, out_grad, kernel_size, strides, paddings, ceil_mode, exclusive, data_format, pooling_type, global_pooling, adaptive, padding_algorithm] +- backward_op : prod_grad + forward : prod (Tensor x, IntArray dims={0}, bool keep_dim=false, bool reduce_all=false, int in_dtype=-1, DataType out_dtype=DataType::UNDEFINED) -> Tensor(out) + args : (Tensor x, Tensor out, Tensor out_grad, IntArray dims={0}, bool keep_dim=false, bool reduce_all=false) + output : Tensor(x_grad) + infer_meta : + func : UnchangedInferMeta + param : [x] + kernel : + func : prod_grad + composite: prod_grad(x, out, out_grad, dims, keep_dim, reduce_all, x_grad) + - backward_op : relu6_grad forward : relu6 (Tensor x, float threshold = 6.0f) -> Tensor(out) args : (Tensor out, Tensor out_grad) diff --git a/paddle/phi/api/yaml/static_ops.yaml b/paddle/phi/api/yaml/static_ops.yaml index 1859c0bc700f47b64865dd4e84b32c7f5826be86..5f73a280b687756fa8c571447717768f656f4f4e 100755 --- a/paddle/phi/api/yaml/static_ops.yaml +++ b/paddle/phi/api/yaml/static_ops.yaml @@ -433,6 +433,18 @@ param : [x, kernel_size, strides, paddings, ceil_mode, exclusive, data_format, pooling_type, global_pooling, adaptive, padding_algorithm] backward : pool3d_grad +- op : prod + args : (Tensor x, IntArray dims={0}, bool keep_dim=false, bool reduce_all=false, int in_dtype=-1, DataType out_dtype=DataType::UNDEFINED) + output : Tensor(out) + infer_meta : + func : ReduceIntArrayAxisInferMetaBase + param : [x, dims, keep_dim, reduce_all, out_dtype] + kernel : + func : prod + param : [x, dims, keep_dim, reduce_all, out_dtype] + data_type : x + backward : prod_grad + - op : randint args : (int low, int high, IntArray shape = {}, DataType dtype = DataType::INT64, int seed = 0) output : Tensor(out) diff --git a/paddle/phi/ops/compat/reduce_sig.cc b/paddle/phi/ops/compat/reduce_sig.cc index 17cfe13b85674af2b06657a0a9cec8dda967b7de..4ae3b106ef434c5b3ed57eca3a1300dc9950ace7 100644 --- a/paddle/phi/ops/compat/reduce_sig.cc +++ b/paddle/phi/ops/compat/reduce_sig.cc @@ -167,14 +167,6 @@ KernelSignature ReduceMeanGradOpArgumentMapping( {"X@GRAD"}); } -KernelSignature ReduceProdGradOpArgumentMapping( - const ArgumentMappingContext& ctx UNUSED) { - return KernelSignature("prod_grad", - {"X", "Out", "Out@GRAD"}, - {"dim", "keep_dim", "reduce_all"}, - {"X@GRAD"}); -} - } // namespace phi PD_REGISTER_BASE_KERNEL_NAME(reduce_sum, sum); @@ -188,7 +180,6 @@ PD_REGISTER_BASE_KERNEL_NAME(reduce_all, all); PD_REGISTER_BASE_KERNEL_NAME(reduce_any, any); PD_REGISTER_BASE_KERNEL_NAME(reduce_mean_grad, mean_grad); -PD_REGISTER_BASE_KERNEL_NAME(reduce_prod_grad, prod_grad); PD_REGISTER_ARG_MAPPING_FN(reduce_sum, phi::ReduceSumOpArgumentMapping); PD_REGISTER_ARG_MAPPING_FN(reduce_mean, phi::ReduceMeanOpArgumentMapping); @@ -202,5 +193,3 @@ PD_REGISTER_ARG_MAPPING_FN(reduce_any, phi::ReduceAnyOpArgumentMapping); PD_REGISTER_ARG_MAPPING_FN(reduce_mean_grad, phi::ReduceMeanGradOpArgumentMapping); -PD_REGISTER_ARG_MAPPING_FN(reduce_prod_grad, - phi::ReduceProdGradOpArgumentMapping);