diff --git a/paddle/fluid/operators/expand_as_v2_op.cc b/paddle/fluid/operators/expand_as_v2_op.cc index 119e514a49e28fb3295e36947664770889bbdd81..97a35a34f23e96707269482e29da13a15538cdca 100755 --- a/paddle/fluid/operators/expand_as_v2_op.cc +++ b/paddle/fluid/operators/expand_as_v2_op.cc @@ -121,37 +121,9 @@ REGISTER_OPERATOR(expand_as_v2, ops::ExpandAsV2Op, ops::ExpandAsV2OpMaker, ops::ExpandAsV2GradOpMaker); REGISTER_OPERATOR(expand_as_v2_grad, ops::ExpandAsV2GradOp, ops::ExpandAsV2GradNoNeedBufVarsInferer); -REGISTER_OP_CPU_KERNEL( - expand_as_v2, - ops::ExpandAsV2Kernel, - ops::ExpandAsV2Kernel, - ops::ExpandAsV2Kernel, - ops::ExpandAsV2Kernel, - ops::ExpandAsV2Kernel); -REGISTER_OP_CPU_KERNEL( - expand_as_v2_grad, - ops::ExpandAsV2GradKernel, - ops::ExpandAsV2GradKernel, - ops::ExpandAsV2GradKernel, - ops::ExpandAsV2GradKernel); -#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) -REGISTER_OP_CUDA_KERNEL( - expand_as_v2, - ops::ExpandAsV2Kernel, - ops::ExpandAsV2Kernel, - ops::ExpandAsV2Kernel, - ops::ExpandAsV2Kernel, - ops::ExpandAsV2Kernel); -REGISTER_OP_CUDA_KERNEL( - expand_as_v2_grad, - ops::ExpandAsV2GradKernel, - ops::ExpandAsV2GradKernel, - ops::ExpandAsV2GradKernel, - ops::ExpandAsV2GradKernel); -#endif REGISTER_OP_VERSION(expand_as_v2) .AddCheckpoint( R"ROC(fix expand_as_v2 and add new input [Y])ROC", paddle::framework::compatible::OpVersionDesc().NewInput( - "Y", "Expand X according to the shape of Y")); \ No newline at end of file + "Y", "Expand X according to the shape of Y")); diff --git a/paddle/fluid/operators/expand_as_v2_op.h b/paddle/fluid/operators/expand_as_v2_op.h index d7560efc5c1f1244ae4eed4c68c59a38287057ee..f09e7764eed3959c7f0ca700b953dbd0c2891d12 100755 --- a/paddle/fluid/operators/expand_as_v2_op.h +++ b/paddle/fluid/operators/expand_as_v2_op.h @@ -32,219 +32,5 @@ template using EigenTensor = framework::EigenTensor; -template -class ExpandAsV2Kernel : public framework::OpKernel { - public: - void Compute(const framework::ExecutionContext& context) const override { - auto rank = context.Input("X")->dims().size(); - auto target_shape = context.Attr>("target_shape"); - auto target_rank = target_shape.size(); - PADDLE_ENFORCE_GE(target_rank, rank, - platform::errors::InvalidArgument( - "The rank (%d) of the input 'target_tensor' for " - "expand_as_v2 op must be greater than or equal to " - "the rank (%d) of the input 'x'.", - target_rank, rank)); - PADDLE_ENFORCE_GE(rank, 1, platform::errors::InvalidArgument( - "The rank (%d) of the input 'x' for " - "expand_as_v2 op must be positive.", - rank)); - PADDLE_ENFORCE_LE(target_rank, MAX_RANK_SUPPORTED, - platform::errors::InvalidArgument( - "The rank (%d) of the input 'target_tensor' for " - "expand_as_v2 op must be less than or equal to %d.", - target_rank, MAX_RANK_SUPPORTED)); - - switch (target_rank) { - case 1: - ExpandAs<1>(context); - break; - case 2: - ExpandAs<2>(context); - break; - case 3: - ExpandAs<3>(context); - break; - case 4: - ExpandAs<4>(context); - break; - case 5: - ExpandAs<5>(context); - break; - case 6: - ExpandAs<6>(context); - break; - } - } - - protected: - template - void ExpandAs(const framework::ExecutionContext& context) const { - auto* in0 = context.Input("X"); - auto in_dims = in0->dims(); - auto target_shape = context.Attr>("target_shape"); - auto vec_in_dims = phi::vectorize(in_dims); - auto diff = target_shape.size() - vec_in_dims.size(); - vec_in_dims.insert(vec_in_dims.begin(), diff, 1); - std::vector repeat_times(vec_in_dims.size()); - for (size_t i = 0; i < vec_in_dims.size(); ++i) { - PADDLE_ENFORCE_NE(target_shape[i], 0, - platform::errors::InvalidArgument( - "The value of target shape cannot be zero.")); - if (i < diff) { - PADDLE_ENFORCE_GT( - target_shape[i], 0, - platform::errors::InvalidArgument( - "The expanded size (%d) for non-existing dimensions must be " - "positive for expand_as_v2 op.", - target_shape[i])); - repeat_times[i] = target_shape[i]; - } else if (target_shape[i] > 0) { - if (vec_in_dims[i] != 1) { - PADDLE_ENFORCE_EQ( - vec_in_dims[i], target_shape[i], - platform::errors::InvalidArgument( - "The value (%d) of the non-singleton dimension does not match" - " the corresponding value (%d) in shape for expand_as_v2 op.", - vec_in_dims[i], target_shape[i])); - repeat_times[i] = 1; - } else { - repeat_times[i] = target_shape[i]; - } - } else { - PADDLE_ENFORCE_EQ( - target_shape[i], -1, - platform::errors::InvalidArgument( - "When the value in shape is negative for expand_as_v2 op, " - "only -1 is supported, but the value received is %d.", - target_shape[i])); - repeat_times[i] = 1; - } - } - auto* out0 = context.Output("Out"); - Eigen::DSizes bcast_dims; - for (size_t i = 0; i < repeat_times.size(); ++i) { - bcast_dims[i] = repeat_times[i]; - } - - framework::DDim new_in_dims = phi::make_ddim(vec_in_dims); - framework::DDim out_dims = phi::make_ddim(target_shape); - - out0->Resize(out_dims); - auto x = EigenTensor::From(*in0, new_in_dims); - out0->mutable_data(context.GetPlace()); - auto y = EigenTensor::From(*out0, out_dims); - auto& place = - *context.template device_context().eigen_device(); - EigenBroadcast, T, Rank>::Eval(place, y, x, - bcast_dims); - } -}; - -template -class ExpandAsV2GradKernel : public framework::OpKernel { - public: - void Compute(const framework::ExecutionContext& context) const override { - auto* in0 = context.Input("X"); - auto target_shape = context.Attr>("target_shape"); - auto x_dims = in0->dims(); - auto vec_in_dims = phi::vectorize(x_dims); - auto diff = target_shape.size() - vec_in_dims.size(); - vec_in_dims.insert(vec_in_dims.begin(), diff, 1); - std::vector repeat_times(vec_in_dims.size()); - for (size_t i = 0; i < vec_in_dims.size(); ++i) { - repeat_times[i] = target_shape[i] / vec_in_dims[i]; - } - std::vector reshape_dims_vec; - std::vector reduce_dims_vec; - for (size_t i = 0; i < repeat_times.size(); ++i) { - reduce_dims_vec.push_back(reshape_dims_vec.size()); - reshape_dims_vec.push_back(repeat_times[i]); - reshape_dims_vec.push_back(vec_in_dims[i]); - } - - int dims = reduce_dims_vec.size(); - bool just_copy = true; - for (size_t i = 0; i < repeat_times.size(); i++) { - if (repeat_times[i] != 1) { - just_copy = false; - break; - } - } - // no need reduce, just copy - if (just_copy) { - auto* in0 = context.Input(framework::GradVarName("Out")); - auto* out0 = context.Output(framework::GradVarName("X")); - out0->mutable_data(context.GetPlace()); - framework::TensorCopy(*in0, context.GetPlace(), context.device_context(), - out0); - } else { - PADDLE_ENFORCE_GE(dims, 1, - platform::errors::InvalidArgument( - "The rank of the input 'Out@GRAD' for " - "expand_as_v2_grad op must be greater than or " - "equal to 1, but the value received is %d.", - dims)); - PADDLE_ENFORCE_LE(dims, MAX_RANK_SUPPORTED, - platform::errors::InvalidArgument( - "The rank of the input 'Out@GRAD' for " - "expand_as_v2_grad op must be less than or equal " - "to %d, but the value received is %d.", - MAX_RANK_SUPPORTED, dims)); - switch (dims) { - case 1: - ExpandAsBackward<1>(context, reshape_dims_vec, reduce_dims_vec); - break; - case 2: - ExpandAsBackward<2>(context, reshape_dims_vec, reduce_dims_vec); - break; - case 3: - ExpandAsBackward<3>(context, reshape_dims_vec, reduce_dims_vec); - break; - case 4: - ExpandAsBackward<4>(context, reshape_dims_vec, reduce_dims_vec); - break; - case 5: - ExpandAsBackward<5>(context, reshape_dims_vec, reduce_dims_vec); - break; - case 6: - ExpandAsBackward<6>(context, reshape_dims_vec, reduce_dims_vec); - break; - default: - PADDLE_THROW(platform::errors::InvalidArgument( - "Only support tensor with rank being between 1 and 6. But " - "received tensor's rank = %d.", - dims)); - } - } - } - - protected: - template - void ExpandAsBackward(const framework::ExecutionContext& context, - const std::vector& reshape_dims_vec, - const std::vector& reduce_dims_vec) const { - size_t reshape_size = reshape_dims_vec.size(); - size_t reduce_size = reduce_dims_vec.size(); - auto* in0 = context.Input(framework::GradVarName("Out")); - auto* out0 = context.Output(framework::GradVarName("X")); - out0->mutable_data(context.GetPlace()); - auto x_grad = EigenVector::Flatten(*out0); - Eigen::DSizes reshape_dims; - for (size_t i = 0; i < reshape_size; ++i) { - reshape_dims[i] = reshape_dims_vec[i]; - } - Eigen::DSizes reduce_dims; - for (size_t i = 0; i < reduce_size; ++i) { - reduce_dims[i] = reduce_dims_vec[i]; - } - auto out_grad = EigenVector::Flatten(*in0); - auto& place = - *context.template device_context().eigen_device(); - EigenBroadcastGrad, T, Dims>::Eval( - place, x_grad, out_grad, reduce_dims, reshape_dims); - } -}; - } // namespace operators } // namespace paddle diff --git a/paddle/phi/core/compat/op_utils.h b/paddle/phi/core/compat/op_utils.h index fea79766a6b3f555a93eabe7d5f6facfd67ed6ad..f2b7f00cb6b8598fe7736e4cb38f03122f871807 100644 --- a/paddle/phi/core/compat/op_utils.h +++ b/paddle/phi/core/compat/op_utils.h @@ -51,7 +51,9 @@ const std::unordered_set deprecated_op_names({"diag", "reshape", "reshape_grad", "expand", + "expand_as", "expand_grad", + "expand_as_grad", "sum", "top_k", "top_k_grad"}); diff --git a/paddle/phi/kernels/cpu/expand_as_grad_kernel.cc b/paddle/phi/kernels/cpu/expand_as_grad_kernel.cc new file mode 100644 index 0000000000000000000000000000000000000000..6eafe9aa49dfe820881ca1394716a29e0ced4ec4 --- /dev/null +++ b/paddle/phi/kernels/cpu/expand_as_grad_kernel.cc @@ -0,0 +1,28 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/phi/kernels/expand_as_grad_kernel.h" +#include "paddle/phi/kernels/impl/expand_as_grad_kernel_impl.h" + +#include "paddle/phi/backends/cpu/cpu_context.h" +#include "paddle/phi/core/kernel_registry.h" + +PD_REGISTER_KERNEL(expand_as_grad, + CPU, + ALL_LAYOUT, + phi::ExpandAsGradKernel, + float, + double, + int, + int64_t) {} diff --git a/paddle/phi/kernels/cpu/expand_as_kernel.cc b/paddle/phi/kernels/cpu/expand_as_kernel.cc new file mode 100644 index 0000000000000000000000000000000000000000..697ea138097ee9d57f23a11d7403b2d4b78158b6 --- /dev/null +++ b/paddle/phi/kernels/cpu/expand_as_kernel.cc @@ -0,0 +1,29 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/phi/kernels/expand_as_kernel.h" +#include "paddle/phi/kernels/impl/expand_as_kernel_impl.h" + +#include "paddle/phi/backends/cpu/cpu_context.h" +#include "paddle/phi/core/kernel_registry.h" + +PD_REGISTER_KERNEL(expand_as, + CPU, + ALL_LAYOUT, + phi::ExpandAsKernel, + float, + double, + int, + int64_t, + bool) {} diff --git a/paddle/phi/kernels/expand_as_grad_kernel.h b/paddle/phi/kernels/expand_as_grad_kernel.h new file mode 100644 index 0000000000000000000000000000000000000000..675e03c42a34732bf313a1a497e067170f828e33 --- /dev/null +++ b/paddle/phi/kernels/expand_as_grad_kernel.h @@ -0,0 +1,28 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include "paddle/phi/core/dense_tensor.h" + +namespace phi { + +template +void ExpandAsGradKernel(const Context& ctx, + const DenseTensor& x, + const DenseTensor& out_grad, + const std::vector& target_shape, + DenseTensor* in_grad); + +} // namespace phi diff --git a/paddle/phi/kernels/expand_as_kernel.h b/paddle/phi/kernels/expand_as_kernel.h new file mode 100644 index 0000000000000000000000000000000000000000..971ea32310f3eab1635bfbedaa0298015f50ac2b --- /dev/null +++ b/paddle/phi/kernels/expand_as_kernel.h @@ -0,0 +1,28 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include "paddle/phi/core/dense_tensor.h" + +namespace phi { + +template +void ExpandAsKernel(const Context& ctx, + const DenseTensor& x, + paddle::optional y, + const std::vector& target_shape, + DenseTensor* out); + +} // namespace phi diff --git a/paddle/phi/kernels/gpu/expand_as_grad_kernel.cu b/paddle/phi/kernels/gpu/expand_as_grad_kernel.cu new file mode 100644 index 0000000000000000000000000000000000000000..273851cfd8b34917d8bea6eeafa8a70fe2ae5ba2 --- /dev/null +++ b/paddle/phi/kernels/gpu/expand_as_grad_kernel.cu @@ -0,0 +1,28 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/phi/kernels/expand_as_grad_kernel.h" +#include "paddle/phi/kernels/impl/expand_as_grad_kernel_impl.h" + +#include "paddle/phi/backends/gpu/gpu_context.h" +#include "paddle/phi/core/kernel_registry.h" + +PD_REGISTER_KERNEL(expand_as_grad, + GPU, + ALL_LAYOUT, + phi::ExpandAsGradKernel, + float, + double, + int, + int64_t) {} diff --git a/paddle/phi/kernels/gpu/expand_as_kernel.cu b/paddle/phi/kernels/gpu/expand_as_kernel.cu new file mode 100644 index 0000000000000000000000000000000000000000..0972eebeabf1832d061160d5910d10292f2638ec --- /dev/null +++ b/paddle/phi/kernels/gpu/expand_as_kernel.cu @@ -0,0 +1,29 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/phi/kernels/expand_as_kernel.h" +#include "paddle/phi/kernels/impl/expand_as_kernel_impl.h" + +#include "paddle/phi/backends/gpu/gpu_context.h" +#include "paddle/phi/core/kernel_registry.h" + +PD_REGISTER_KERNEL(expand_as, + GPU, + ALL_LAYOUT, + phi::ExpandAsKernel, + float, + double, + int, + int64_t, + bool) {} diff --git a/paddle/phi/kernels/impl/expand_as_grad_kernel_impl.h b/paddle/phi/kernels/impl/expand_as_grad_kernel_impl.h new file mode 100644 index 0000000000000000000000000000000000000000..6ef282d470333e8099b668e5dd7d2e4c68beff3e --- /dev/null +++ b/paddle/phi/kernels/impl/expand_as_grad_kernel_impl.h @@ -0,0 +1,129 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include "paddle/phi/kernels/copy_kernel.h" +#include "paddle/phi/kernels/impl/expand_as_kernel_impl.h" + +namespace phi { +template +void ExpandAsBackward(const Context& ctx, + const DenseTensor& out_grad, + const std::vector& reshape_dims_vec, + const std::vector& reduce_dims_vec, + DenseTensor* in_grad) { + size_t reshape_size = reshape_dims_vec.size(); + size_t reduce_size = reduce_dims_vec.size(); + ctx.template Alloc(in_grad); + auto x_grad = EigenVector::Flatten(*in_grad); + Eigen::DSizes reshape_dims; + for (size_t i = 0; i < reshape_size; ++i) { + reshape_dims[i] = reshape_dims_vec[i]; + } + Eigen::DSizes reduce_dims; + for (size_t i = 0; i < reduce_size; ++i) { + reduce_dims[i] = reduce_dims_vec[i]; + } + auto out_grad0 = EigenVector::Flatten(out_grad); + auto& place = *ctx.eigen_device(); + funcs::EigenBroadcastGrad, T, Dims>::Eval( + place, x_grad, out_grad0, reduce_dims, reshape_dims); +} + +template +void ExpandAsGradKernel(const Context& context, + const DenseTensor& x, + const DenseTensor& out_grad, + const std::vector& target_shape, + DenseTensor* in_grad) { + auto x_dims = x.dims(); + auto vec_in_dims = phi::vectorize(x_dims); + auto diff = target_shape.size() - vec_in_dims.size(); + vec_in_dims.insert(vec_in_dims.begin(), diff, 1); + std::vector repeat_times(vec_in_dims.size()); + for (size_t i = 0; i < vec_in_dims.size(); ++i) { + repeat_times[i] = target_shape[i] / vec_in_dims[i]; + } + std::vector reshape_dims_vec; + std::vector reduce_dims_vec; + for (size_t i = 0; i < repeat_times.size(); ++i) { + reduce_dims_vec.push_back(reshape_dims_vec.size()); + reshape_dims_vec.push_back(repeat_times[i]); + reshape_dims_vec.push_back(vec_in_dims[i]); + } + + int dims = reduce_dims_vec.size(); + bool just_copy = true; + for (size_t i = 0; i < repeat_times.size(); i++) { + if (repeat_times[i] != 1) { + just_copy = false; + break; + } + } + // no need reduce, just copy + if (just_copy) { + context.template Alloc(in_grad); + phi::Copy(context, out_grad, context.GetPlace(), false, in_grad); + } else { + PADDLE_ENFORCE_GE( + dims, + 1, + errors::InvalidArgument("The rank of the input 'Out@GRAD' for " + "expand_as_v2_grad op must be greater than or " + "equal to 1, but the value received is %d.", + dims)); + PADDLE_ENFORCE_LE(dims, + MAX_RANK_SUPPORTED, + errors::InvalidArgument( + "The rank of the input 'Out@GRAD' for " + "expand_as_v2_grad op must be less than or equal " + "to %d, but the value received is %d.", + MAX_RANK_SUPPORTED, + dims)); + switch (dims) { + case 1: + ExpandAsBackward( + context, out_grad, reshape_dims_vec, reduce_dims_vec, in_grad); + break; + case 2: + ExpandAsBackward( + context, out_grad, reshape_dims_vec, reduce_dims_vec, in_grad); + break; + case 3: + ExpandAsBackward( + context, out_grad, reshape_dims_vec, reduce_dims_vec, in_grad); + break; + case 4: + ExpandAsBackward( + context, out_grad, reshape_dims_vec, reduce_dims_vec, in_grad); + break; + case 5: + ExpandAsBackward( + context, out_grad, reshape_dims_vec, reduce_dims_vec, in_grad); + break; + case 6: + ExpandAsBackward( + context, out_grad, reshape_dims_vec, reduce_dims_vec, in_grad); + break; + default: + PADDLE_THROW(errors::InvalidArgument( + "Only support tensor with rank being between 1 and 6. But " + "received tensor's rank = %d.", + dims)); + } + } +} + +} // namespace phi diff --git a/paddle/phi/kernels/impl/expand_as_kernel_impl.h b/paddle/phi/kernels/impl/expand_as_kernel_impl.h new file mode 100644 index 0000000000000000000000000000000000000000..e5138e4e12c05403ceb47371e03e9cbe207bf9a4 --- /dev/null +++ b/paddle/phi/kernels/impl/expand_as_kernel_impl.h @@ -0,0 +1,145 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include +#include + +#include "paddle/phi/kernels/funcs/eigen/common.h" +#include "paddle/phi/kernels/funcs/eigen/eigen_function.h" + +#define MAX_RANK_SUPPORTED 6 + +namespace phi { + +template +void ExpandAs(const Context& context, + const DenseTensor& x, + const std::vector& target_shape, + DenseTensor* out) { + auto in_dims = x.dims(); + auto vec_in_dims = phi::vectorize(in_dims); + auto diff = target_shape.size() - vec_in_dims.size(); + vec_in_dims.insert(vec_in_dims.begin(), diff, 1); + std::vector repeat_times(vec_in_dims.size()); + for (size_t i = 0; i < vec_in_dims.size(); ++i) { + PADDLE_ENFORCE_NE( + target_shape[i], + 0, + errors::InvalidArgument("The value of target shape cannot be zero.")); + if (i < diff) { + PADDLE_ENFORCE_GT( + target_shape[i], + 0, + errors::InvalidArgument( + "The expanded size (%d) for non-existing dimensions must be " + "positive for expand_as_v2 op.", + target_shape[i])); + repeat_times[i] = target_shape[i]; + } else if (target_shape[i] > 0) { + if (vec_in_dims[i] != 1) { + PADDLE_ENFORCE_EQ( + vec_in_dims[i], + target_shape[i], + errors::InvalidArgument( + "The value (%d) of the non-singleton dimension does not match" + " the corresponding value (%d) in shape for expand_as_v2 op.", + vec_in_dims[i], + target_shape[i])); + repeat_times[i] = 1; + } else { + repeat_times[i] = target_shape[i]; + } + } else { + PADDLE_ENFORCE_EQ( + target_shape[i], + -1, + errors::InvalidArgument( + "When the value in shape is negative for expand_as_v2 op, " + "only -1 is supported, but the value received is %d.", + target_shape[i])); + repeat_times[i] = 1; + } + } + Eigen::DSizes bcast_dims; + for (size_t i = 0; i < repeat_times.size(); ++i) { + bcast_dims[i] = repeat_times[i]; + } + + phi::DDim new_in_dims = phi::make_ddim(vec_in_dims); + phi::DDim out_dims = phi::make_ddim(target_shape); + + out->Resize(out_dims); + context.template Alloc(out); + auto x0 = EigenTensor::From(x, new_in_dims); + auto y = EigenTensor::From(*out, out_dims); + auto& place = *context.eigen_device(); + funcs::EigenBroadcast, T, Rank>::Eval( + place, y, x0, bcast_dims); +} + +template +void ExpandAsKernel(const Context& ctx, + const DenseTensor& x, + paddle::optional y, + const std::vector& target_shape, + DenseTensor* out) { + auto rank = x.dims().size(); + auto target_rank = target_shape.size(); + PADDLE_ENFORCE_GE(target_rank, + rank, + errors::InvalidArgument( + "The rank (%d) of the input 'target_tensor' for " + "expand_as_v2 op must be greater than or equal to " + "the rank (%d) of the input 'x'.", + target_rank, + rank)); + PADDLE_ENFORCE_GE( + rank, + 1, + errors::InvalidArgument("The rank (%d) of the input 'x' for " + "expand_as_v2 op must be positive.", + rank)); + PADDLE_ENFORCE_LE(target_rank, + MAX_RANK_SUPPORTED, + errors::InvalidArgument( + "The rank (%d) of the input 'target_tensor' for " + "expand_as_v2 op must be less than or equal to %d.", + target_rank, + MAX_RANK_SUPPORTED)); + + switch (target_rank) { + case 1: + ExpandAs(ctx, x, target_shape, out); + break; + case 2: + ExpandAs(ctx, x, target_shape, out); + break; + case 3: + ExpandAs(ctx, x, target_shape, out); + break; + case 4: + ExpandAs(ctx, x, target_shape, out); + break; + case 5: + ExpandAs(ctx, x, target_shape, out); + break; + case 6: + ExpandAs(ctx, x, target_shape, out); + break; + } +} + +} // namespace phi diff --git a/paddle/phi/ops/compat/expand_as_sig.cc b/paddle/phi/ops/compat/expand_as_sig.cc new file mode 100644 index 0000000000000000000000000000000000000000..a616b63c10b3c6ef9bca8c906655da99e8912244 --- /dev/null +++ b/paddle/phi/ops/compat/expand_as_sig.cc @@ -0,0 +1,38 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/phi/core/compat/op_utils.h" + +namespace phi { + +KernelSignature ExpandAsOpArgumentMapping(const ArgumentMappingContext& ctx) { + return KernelSignature("expand_as", {"X", "Y"}, {"target_shape"}, {"Out"}); +} + +KernelSignature ExpandAsGradOpArgumentMapping( + const ArgumentMappingContext& ctx) { + return KernelSignature("expand_as_grad", + {"X", GradVarName("Out")}, + {"target_shape"}, + {GradVarName("X")}); +} + +} // namespace phi + +PD_REGISTER_BASE_KERNEL_NAME(expand_as_v2, expand_as); +PD_REGISTER_BASE_KERNEL_NAME(expand_as_v2_grad, expand_as_grad); + +PD_REGISTER_ARG_MAPPING_FN(expand_as_v2, phi::ExpandAsOpArgumentMapping); +PD_REGISTER_ARG_MAPPING_FN(expand_as_v2_grad, + phi::ExpandAsGradOpArgumentMapping);