diff --git a/paddle/fluid/operators/reduce_ops/frobenius_norm_op.cc b/paddle/fluid/operators/reduce_ops/frobenius_norm_op.cc index 0a5d54e72c8454b46e63c2efc9bd79fad822f721..83a21a919dcaaf6341bc13c2503f0c772c9ec6f6 100644 --- a/paddle/fluid/operators/reduce_ops/frobenius_norm_op.cc +++ b/paddle/fluid/operators/reduce_ops/frobenius_norm_op.cc @@ -12,9 +12,12 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/fluid/operators/reduce_ops/frobenius_norm_op.h" - #include +#include "paddle/fluid/framework/infershape_utils.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/operators/reduce_ops/reduce_op.h" +#include "paddle/phi/core/infermeta_utils.h" +#include "paddle/phi/infermeta/unary.h" namespace paddle { namespace framework { @@ -56,22 +59,12 @@ class FrobeniusNormOpMaker : public ops::ReduceOpMaker { virtual std::string GetOpType() const { return "Reduce frobenius_norm"; } }; +DECLARE_INFER_SHAPE_FUNCTOR(frobenius_norm, FrobeniusNormInferShapeFunctor, + PD_INFER_META(phi::ReduceInferMetaBase)); + REGISTER_OPERATOR(frobenius_norm, ops::ReduceOp, FrobeniusNormOpMaker, ops::FrobeniusNormOpGradMaker, - ops::FrobeniusNormOpGradMaker); + ops::FrobeniusNormOpGradMaker, + FrobeniusNormInferShapeFunctor); REGISTER_OPERATOR(frobenius_norm_grad, ops::ReduceGradOp); - -REGISTER_OP_CPU_KERNEL(frobenius_norm, - ops::ReduceKernel, - ops::ReduceKernel); - -template -using CPUFrobeniusNormGradKernel = - ops::FrobeniusNormGradKernel; - -REGISTER_OP_CPU_KERNEL(frobenius_norm_grad, CPUFrobeniusNormGradKernel, - CPUFrobeniusNormGradKernel); diff --git a/paddle/fluid/operators/reduce_ops/frobenius_norm_op.cu b/paddle/fluid/operators/reduce_ops/frobenius_norm_op.cu deleted file mode 100644 index b2cef09df94368d17171d5fb79fbc5e6ad332fe1..0000000000000000000000000000000000000000 --- a/paddle/fluid/operators/reduce_ops/frobenius_norm_op.cu +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "paddle/fluid/operators/reduce_ops/frobenius_norm_op.h" -#include "paddle/fluid/operators/reduce_ops/reduce_op.cu.h" - -template -using CUDAFrobeniusNormKernel = - ops::ReduceKernel; - -REGISTER_OP_CUDA_KERNEL(frobenius_norm, CUDAFrobeniusNormKernel, - CUDAFrobeniusNormKernel); - -template -using CUDAFrobeniusNormGradKernel = - ops::ReduceGradKernel; - -REGISTER_OP_CUDA_KERNEL(frobenius_norm_grad, CUDAFrobeniusNormGradKernel, - CUDAFrobeniusNormGradKernel); diff --git a/paddle/fluid/operators/reduce_ops/frobenius_norm_op.h b/paddle/fluid/operators/reduce_ops/frobenius_norm_op.h deleted file mode 100644 index 0b6b87d99ecd98e65c492fb96f3a1e886b7bfa4b..0000000000000000000000000000000000000000 --- a/paddle/fluid/operators/reduce_ops/frobenius_norm_op.h +++ /dev/null @@ -1,54 +0,0 @@ -// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#pragma once - -#include - -#include "paddle/fluid/operators/reduce_ops/reduce_op.h" - -namespace paddle { -namespace operators { - -// \partial \| X \|_F = \frac{X}{ \| X \|_F } -template -class FrobeniusNormGradKernel : public framework::OpKernel { - public: - void Compute(const framework::ExecutionContext& context) const override { - // default use Eigen broadcast - ReduceGradKernel kernel; - kernel.Compute(context); - } -}; - -struct FrobeniusNormFunctor { - template - void operator()(const DeviceContext& place, X* x, Y* y, const Dim& dim) { - y->device(place) = ((x->square()).sum(dim)).sqrt(); - } -}; - -struct FrobeniusNormGradFunctor { - template - void operator()(const DeviceContext& place, X* x, Y* y, DX* dx, DY* dy, - const Dim& dim, int size) { - dx->device(place) = y->broadcast(dim); - dx->device(place) = *dx + dx->constant(1e-12f); - dx->device(place) = (*x / *dx) * (dy->broadcast(dim)); - } -}; - -} // namespace operators -} // namespace paddle diff --git a/paddle/phi/kernels/cpu/frobenius_norm_grad_kernel.cc b/paddle/phi/kernels/cpu/frobenius_norm_grad_kernel.cc new file mode 100644 index 0000000000000000000000000000000000000000..338be9e252da3349cd81cdfa61a8eae4d2d30166 --- /dev/null +++ b/paddle/phi/kernels/cpu/frobenius_norm_grad_kernel.cc @@ -0,0 +1,25 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/phi/kernels/frobenius_norm_grad_kernel.h" +#include "paddle/phi/kernels/impl/frobenius_norm_grad_kernel_impl.h" + +#include "paddle/phi/core/kernel_registry.h" + +PD_REGISTER_KERNEL(frobenius_norm_grad, + CPU, + ALL_LAYOUT, + phi::FrobeniusNormGradKernel, + float, + double) {} diff --git a/paddle/phi/kernels/cpu/frobenius_norm_kernel.cc b/paddle/phi/kernels/cpu/frobenius_norm_kernel.cc new file mode 100644 index 0000000000000000000000000000000000000000..77509b953bf39bc472b9f3e8b134b294253d1998 --- /dev/null +++ b/paddle/phi/kernels/cpu/frobenius_norm_kernel.cc @@ -0,0 +1,21 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/phi/kernels/frobenius_norm_kernel.h" +#include "paddle/phi/kernels/impl/frobenius_norm_kernel_impl.h" + +#include "paddle/phi/core/kernel_registry.h" + +PD_REGISTER_KERNEL( + frobenius_norm, CPU, ALL_LAYOUT, phi::FrobeniusNormKernel, float, double) {} diff --git a/paddle/phi/kernels/frobenius_norm_grad_kernel.h b/paddle/phi/kernels/frobenius_norm_grad_kernel.h new file mode 100644 index 0000000000000000000000000000000000000000..edf3aed8b84934e8d1cb6afbfed633c9c58d0890 --- /dev/null +++ b/paddle/phi/kernels/frobenius_norm_grad_kernel.h @@ -0,0 +1,33 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include +#include "paddle/phi/core/dense_tensor.h" + +namespace phi { + +template +void FrobeniusNormGradKernel(const Context& ctx, + const DenseTensor& x, + const DenseTensor& out, + const DenseTensor& dout, + const std::vector& axis, + bool keep_dim, + bool reduce_all, + DataType in_dtype, + DataType out_dtype, + DenseTensor* dx); +} // namespace phi diff --git a/paddle/phi/kernels/frobenius_norm_kernel.h b/paddle/phi/kernels/frobenius_norm_kernel.h new file mode 100644 index 0000000000000000000000000000000000000000..f5f37ee0c0fa5f0a4b32f032a02c1671386b909b --- /dev/null +++ b/paddle/phi/kernels/frobenius_norm_kernel.h @@ -0,0 +1,30 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include +#include "paddle/phi/core/dense_tensor.h" + +namespace phi { + +template +void FrobeniusNormKernel(const Context& ctx, + const DenseTensor& x, + const std::vector& axis, + bool keep_dim, + bool reduce_all, + DenseTensor* out); + +} // namespace phi diff --git a/paddle/phi/kernels/funcs/reduce_functor.h b/paddle/phi/kernels/funcs/reduce_functor.h index b793afb63b1dca9bbd8ad09b83461567de6371ad..9bf1bfecabbf22f5fdc87d9c7426ec7525ac1046 100644 --- a/paddle/phi/kernels/funcs/reduce_functor.h +++ b/paddle/phi/kernels/funcs/reduce_functor.h @@ -17,11 +17,39 @@ namespace phi { namespace funcs { -//////// Sum Functor /////// -struct SumFunctor { +//////// Frobenius Norm Functor /////// +struct FrobeniusNormFunctor { template void operator()(const DeviceContext& place, X* x, Y* y, const Dim& dim) { - y->device(place) = x->sum(dim); + y->device(place) = ((x->square()).sum(dim)).sqrt(); + } +}; + +struct FrobeniusNormGradFunctor { + template + void operator()(const DeviceContext& place, + X* x, + Y* y, + DX* dx, + DY* dy, + const Dim& dim, + int size) { + dx->device(place) = y->broadcast(dim); + dx->device(place) = *dx + dx->constant(1e-12f); + dx->device(place) = (*x / *dx) * (dy->broadcast(dim)); + } +}; + +//////// Max Functor /////// +struct MaxFunctor { + template + void operator()(const DeviceContext& place, X* x, Y* y, const Dim& dim) { + y->device(place) = x->maximum(dim); } }; @@ -41,11 +69,11 @@ struct ProdFunctor { } }; -//////// Max Functor /////// -struct MaxFunctor { +//////// Sum Functor /////// +struct SumFunctor { template void operator()(const DeviceContext& place, X* x, Y* y, const Dim& dim) { - y->device(place) = x->maximum(dim); + y->device(place) = x->sum(dim); } }; diff --git a/paddle/phi/kernels/gpu/frobenius_norm_grad_kernel.cu b/paddle/phi/kernels/gpu/frobenius_norm_grad_kernel.cu new file mode 100644 index 0000000000000000000000000000000000000000..221bf1cb4c68c7b0ef9b91fa7fb08dd77bcf04da --- /dev/null +++ b/paddle/phi/kernels/gpu/frobenius_norm_grad_kernel.cu @@ -0,0 +1,25 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/phi/kernels/frobenius_norm_grad_kernel.h" +#include "paddle/phi/kernels/impl/frobenius_norm_grad_kernel_impl.h" + +#include "paddle/phi/core/kernel_registry.h" + +PD_REGISTER_KERNEL(frobenius_norm_grad, + GPU, + ALL_LAYOUT, + phi::FrobeniusNormGradKernel, + float, + double) {} diff --git a/paddle/phi/kernels/gpu/frobenius_norm_kernel.cu b/paddle/phi/kernels/gpu/frobenius_norm_kernel.cu new file mode 100644 index 0000000000000000000000000000000000000000..012237165b739a5698afff6a8922c9ed06bc7265 --- /dev/null +++ b/paddle/phi/kernels/gpu/frobenius_norm_kernel.cu @@ -0,0 +1,21 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/phi/kernels/frobenius_norm_kernel.h" +#include "paddle/phi/kernels/impl/frobenius_norm_kernel_impl.h" + +#include "paddle/phi/core/kernel_registry.h" + +PD_REGISTER_KERNEL( + frobenius_norm, GPU, ALL_LAYOUT, phi::FrobeniusNormKernel, float, double) {} diff --git a/paddle/phi/kernels/impl/frobenius_norm_grad_kernel_impl.h b/paddle/phi/kernels/impl/frobenius_norm_grad_kernel_impl.h new file mode 100644 index 0000000000000000000000000000000000000000..65d903a7fe426c6eed6cba6f38e8c636001d47b0 --- /dev/null +++ b/paddle/phi/kernels/impl/frobenius_norm_grad_kernel_impl.h @@ -0,0 +1,39 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include "paddle/phi/kernels/frobenius_norm_grad_kernel.h" + +#include "paddle/phi/kernels/funcs/reduce_functor.h" +#include "paddle/phi/kernels/impl/reduce_grad.h" + +namespace phi { + +template +void FrobeniusNormGradKernel(const Context& ctx, + const DenseTensor& x, + const DenseTensor& out, + const DenseTensor& dout, + const std::vector& axis, + bool keep_dim, + bool reduce_all, + DataType in_dtype, + DataType out_dtype, + DenseTensor* dx) { + ReduceGradKernel( + ctx, x, dout, out, axis, keep_dim, reduce_all, in_dtype, out_dtype, dx); +} + +} // namespace phi diff --git a/paddle/phi/kernels/impl/frobenius_norm_kernel_impl.h b/paddle/phi/kernels/impl/frobenius_norm_kernel_impl.h new file mode 100644 index 0000000000000000000000000000000000000000..8577a4e3c634567a7900a47a942ff7d8b6f3686c --- /dev/null +++ b/paddle/phi/kernels/impl/frobenius_norm_kernel_impl.h @@ -0,0 +1,35 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include "paddle/phi/kernels/frobenius_norm_kernel.h" + +#include "paddle/phi/kernels/cpu/reduce.h" +#include "paddle/phi/kernels/funcs/reduce_functor.h" + +namespace phi { + +template +void FrobeniusNormKernel(const Context& ctx, + const DenseTensor& x, + const std::vector& axis, + bool keep_dim, + bool reduce_all, + DenseTensor* out) { + Reduce( + ctx, x, reduce_all, axis, keep_dim, x.dtype(), out); +} + +} // namespace phi diff --git a/paddle/phi/ops/compat/frobenius_norm_sig.cc b/paddle/phi/ops/compat/frobenius_norm_sig.cc new file mode 100644 index 0000000000000000000000000000000000000000..c6dc5ad9014ecd06b902be304d2c2752d0934713 --- /dev/null +++ b/paddle/phi/ops/compat/frobenius_norm_sig.cc @@ -0,0 +1,38 @@ +/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/phi/core/compat/op_utils.h" + +namespace phi { + +KernelSignature FrobeniusNormOpArgumentMapping( + const ArgumentMappingContext& ctx) { + return KernelSignature( + "frobenius_norm", {"X"}, {"dim", "keep_dim", "reduce_all"}, {"Out"}); +} + +KernelSignature FrobeniusNormGradOpArgumentMapping( + const ArgumentMappingContext& ctx) { + return KernelSignature( + "frobenius_norm_grad", + {"X", "Out", GradVarName("Out")}, + {"dim", "keep_dim", "reduce_all", "in_dtype", "out_dtype"}, + {GradVarName("X")}); +} + +} // namespace phi + +PD_REGISTER_ARG_MAPPING_FN(frobenius_norm, phi::FrobeniusNormOpArgumentMapping); +PD_REGISTER_ARG_MAPPING_FN(frobenius_norm_grad, + phi::FrobeniusNormGradOpArgumentMapping);