未验证 提交 382e460b 编写于 作者: C chentianyu03 提交者: GitHub

[Phi]add pad3d kernel into phi (#40701)

* add pad3d kernel into phi

* add pad3d infermeta

* fix build error

* remove raw pad3d infershape function
上级 0e1191f4
此差异已折叠。
此差异已折叠。
...@@ -877,6 +877,77 @@ void PadInferMeta(const MetaTensor& input, ...@@ -877,6 +877,77 @@ void PadInferMeta(const MetaTensor& input,
out->set_dtype(input.dtype()); out->set_dtype(input.dtype());
} }
void Pad3dInferMeta(const MetaTensor& x,
const ScalarArray& paddings_scalar_array,
const std::string& mode,
float value,
const std::string& data_format,
MetaTensor* out,
MetaConfig config) {
auto x_dim = x.dims();
PADDLE_ENFORCE_EQ(x_dim.size(),
5,
errors::InvalidArgument(
"The size of Input(X)'s dimension should be equal to "
"5, but received %d. ",
x_dim.size()));
std::vector<int64_t> out_dims(x_dim.size());
out_dims[0] = x_dim[0];
if (paddings_scalar_array.FromTensor()) {
if (config.is_runtime) {
PADDLE_ENFORCE_EQ(
paddings_scalar_array.GetData().size(),
6,
errors::InvalidArgument("Shape of Input(Paddings) should be equal to "
"[6], but received [%d].",
paddings_scalar_array.GetData().size()));
}
out_dims[1] = x_dim[1];
out_dims[2] = x_dim[2];
out_dims[3] = x_dim[3];
} else {
auto paddings = paddings_scalar_array.GetData();
PADDLE_ENFORCE_EQ(
paddings.size(),
6,
errors::InvalidArgument(
"Size of paddings should be equal to 6, but received %d.",
static_cast<int>(paddings.size())));
if (data_format == "NCDHW") {
out_dims[1] = x_dim[1]; // channel
out_dims[2] = ((!config.is_runtime) && (x_dim[2] < 0))
? x_dim[2]
: (x_dim[2] + paddings[4] + paddings[5]); // depth
out_dims[3] = ((!config.is_runtime) && (x_dim[3] < 0))
? x_dim[3]
: (x_dim[3] + paddings[2] + paddings[3]); // height
out_dims[4] = ((!config.is_runtime) && (x_dim[4] < 0))
? x_dim[4]
: (x_dim[4] + paddings[0] + paddings[1]); // width
} else { // NDHWC
out_dims[4] = x_dim[4]; // channel
out_dims[1] = ((!config.is_runtime) && (x_dim[1] < 0))
? x_dim[1]
: (x_dim[1] + paddings[4] + paddings[5]); // depth
out_dims[2] = ((!config.is_runtime) && (x_dim[2] < 0))
? x_dim[2]
: (x_dim[2] + paddings[2] + paddings[3]); // height
out_dims[3] = ((!config.is_runtime) && (x_dim[3] < 0))
? x_dim[3]
: (x_dim[3] + paddings[0] + paddings[1]); // width
}
}
out->set_dims(phi::make_ddim(out_dims));
out->set_dtype(x.dtype());
out->share_lod(x);
}
void PixelShuffleInferMeta(const MetaTensor& x, void PixelShuffleInferMeta(const MetaTensor& x,
int upscale_factor, int upscale_factor,
const std::string& data_format, const std::string& data_format,
......
...@@ -147,6 +147,14 @@ void PadInferMeta(const MetaTensor& input, ...@@ -147,6 +147,14 @@ void PadInferMeta(const MetaTensor& input,
MetaTensor* out, MetaTensor* out,
MetaConfig config = MetaConfig()); MetaConfig config = MetaConfig());
void Pad3dInferMeta(const MetaTensor& x,
const ScalarArray& paddings,
const std::string& mode,
float value,
const std::string& data_format,
MetaTensor* out,
MetaConfig config = MetaConfig());
void PixelShuffleInferMeta(const MetaTensor& x, void PixelShuffleInferMeta(const MetaTensor& x,
int upscale_factor, int upscale_factor,
const std::string& data_format, const std::string& data_format,
......
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/pad3d_grad_kernel.h"
#include "paddle/phi/backends/cpu/cpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/funcs/math_function.h"
namespace phi {
template <typename T>
void ConstPad3DGradNCDHW(T* d_in_data,
const T* d_out_data,
const int in_depth,
const int in_height,
const int in_width,
const int out_depth,
const int out_height,
const int out_width,
const int pad_front,
const int pad_top,
const int pad_left,
const int out_d,
const int out_h,
const int out_w) {
int in_d = out_d - pad_front;
int in_h = out_h - pad_top;
int in_w = out_w - pad_left;
if (!(in_d < 0 || in_h < 0 || in_w < 0 || in_d >= in_depth ||
in_h >= in_height || in_w >= in_width)) {
d_in_data[in_d * in_height * in_width + in_h * in_width + in_w] =
d_out_data[out_d * out_height * out_width + out_h * out_width + out_w];
}
}
template <typename T>
void ConstPad3DGradNDHWC(T* d_in_data,
const T* d_out_data,
const int channels,
const int in_depth,
const int in_height,
const int in_width,
const int out_depth,
const int out_height,
const int out_width,
const int pad_front,
const int pad_top,
const int pad_left,
const int out_d,
const int out_h,
const int out_w) {
int in_d = out_d - pad_front;
int in_h = out_h - pad_top;
int in_w = out_w - pad_left;
const int out_index =
(out_d * out_height * out_width + out_h * out_width + out_w) * channels;
if (!(in_d < 0 || in_h < 0 || in_w < 0 || in_d >= in_depth ||
in_h >= in_height || in_w >= in_width)) {
const int in_index =
(in_d * in_height * in_width + in_h * in_width + in_w) * channels;
for (int c = 0; c < channels; ++c) {
d_in_data[in_index + c] = d_out_data[out_index + c];
}
}
}
template <typename T>
void ReflectPad3DGradNCDHW(T* d_in_data,
const T* d_out_data,
const int in_depth,
const int in_height,
const int in_width,
const int out_depth,
const int out_height,
const int out_width,
const int pad_front,
const int pad_top,
const int pad_left,
const int out_d,
const int out_h,
const int out_w) {
int in_d = out_d - pad_front;
int in_h = out_h - pad_top;
int in_w = out_w - pad_left;
in_d = std::max(in_d, -in_d); // reflect by 0
in_d = std::min(in_d, 2 * in_depth - in_d - 2); // reflect by in_depth
in_h = std::max(in_h, -in_h); // reflect by 0
in_h = std::min(in_h, 2 * in_height - in_h - 2); // reflect by in_height
in_w = std::max(in_w, -in_w); // reflect by 0
in_w = std::min(in_w, 2 * in_width - in_w - 2); // reflect by in_width
d_in_data[in_d * in_height * in_width + in_h * in_width + in_w] +=
d_out_data[out_d * out_height * out_width + out_h * out_width + out_w];
}
template <typename T>
void ReflectPad3DGradNDHWC(T* d_in_data,
const T* d_out_data,
const int channels,
const int in_depth,
const int in_height,
const int in_width,
const int out_depth,
const int out_height,
const int out_width,
const int pad_front,
const int pad_top,
const int pad_left,
const int out_d,
const int out_h,
const int out_w) {
int in_d = out_d - pad_front;
int in_h = out_h - pad_top;
int in_w = out_w - pad_left;
in_d = std::max(in_d, -in_d);
in_d = std::min(in_d, 2 * in_depth - in_d - 2);
in_h = std::max(in_h, -in_h);
in_h = std::min(in_h, 2 * in_height - in_h - 2);
in_w = std::max(in_w, -in_w);
in_w = std::min(in_w, 2 * in_width - in_w - 2);
const int out_index =
(out_d * out_height * out_width + out_h * out_width + out_w) * channels;
const int in_index =
(in_d * in_height * in_width + in_h * in_width + in_w) * channels;
for (int c = 0; c < channels; ++c) {
d_in_data[in_index + c] += d_out_data[out_index + c];
}
}
template <typename T>
void ReplicatePad3DGradNCDHW(T* d_in_data,
const T* d_out_data,
const int in_depth,
const int in_height,
const int in_width,
const int out_depth,
const int out_height,
const int out_width,
const int pad_front,
const int pad_top,
const int pad_left,
const int out_d,
const int out_h,
const int out_w) {
int in_d = std::min(in_depth - 1, std::max(out_d - pad_front, 0));
int in_h = std::min(in_height - 1, std::max(out_h - pad_top, 0));
int in_w = std::min(in_width - 1, std::max(out_w - pad_left, 0));
d_in_data[in_d * in_height * in_width + in_h * in_width + in_w] +=
d_out_data[out_d * out_height * out_width + out_h * out_width + out_w];
}
template <typename T>
void ReplicatePad3DGradNDHWC(T* d_in_data,
const T* d_out_data,
const int channels,
const int in_depth,
const int in_height,
const int in_width,
const int out_depth,
const int out_height,
const int out_width,
const int pad_front,
const int pad_top,
const int pad_left,
const int out_d,
const int out_h,
const int out_w) {
int in_d = std::min(in_depth - 1, std::max(out_d - pad_front, 0));
int in_h = std::min(in_height - 1, std::max(out_h - pad_top, 0));
int in_w = std::min(in_width - 1, std::max(out_w - pad_left, 0));
const int out_index =
(out_d * out_height * out_width + out_h * out_width + out_w) * channels;
const int in_index =
(in_d * in_height * in_width + in_h * in_width + in_w) * channels;
for (int c = 0; c < channels; ++c) {
d_in_data[in_index + c] += d_out_data[out_index + c];
}
}
template <typename T>
void CircularPad3DGradNCDHW(T* d_in_data,
const T* d_out_data,
const int in_depth,
const int in_height,
const int in_width,
const int out_depth,
const int out_height,
const int out_width,
const int pad_front,
const int pad_top,
const int pad_left,
const int out_d,
const int out_h,
const int out_w) {
int in_d = ((out_d - pad_front) % in_depth + in_depth) % in_depth;
int in_h = ((out_h - pad_top) % in_height + in_height) % in_height;
int in_w = ((out_w - pad_left) % in_width + in_width) % in_width;
d_in_data[in_d * in_height * in_width + in_h * in_width + in_w] +=
d_out_data[out_d * out_height * out_width + out_h * out_width + out_w];
}
template <typename T>
void CircularPad3DGradNDHWC(T* d_in_data,
const T* d_out_data,
const int channels,
const int in_depth,
const int in_height,
const int in_width,
const int out_depth,
const int out_height,
const int out_width,
const int pad_front,
const int pad_top,
const int pad_left,
const int out_d,
const int out_h,
const int out_w) {
int in_d = ((out_d - pad_front) % in_depth + in_depth) % in_depth;
int in_h = ((out_h - pad_top) % in_height + in_height) % in_height;
int in_w = ((out_w - pad_left) % in_width + in_width) % in_width;
const int out_index =
(out_d * out_height * out_width + out_h * out_width + out_w) * channels;
const int in_index =
(in_d * in_height * in_width + in_h * in_width + in_w) * channels;
for (int c = 0; c < channels; ++c) {
d_in_data[in_index + c] += d_out_data[out_index + c];
}
}
template <typename T>
void Pad3DGradNCDHW(T* d_in_data,
const int num,
const int channels,
const int in_depth,
const int in_height,
const int in_width,
const int out_depth,
const int out_height,
const int out_width,
const int pad_front,
const int pad_top,
const int pad_left,
const T* d_out_data,
void (*pad_func)(T*,
const T*,
const int,
const int,
const int,
const int,
const int,
const int,
const int,
const int,
const int,
const int,
const int,
const int)) {
for (int n = 0; n < num; ++n) {
for (int c = 0; c < channels; ++c) {
for (int out_d = 0; out_d < out_depth; ++out_d) {
for (int out_h = 0; out_h < out_height; ++out_h) {
for (int out_w = 0; out_w < out_width; ++out_w) {
pad_func(d_in_data,
d_out_data,
in_depth,
in_height,
in_width,
out_depth,
out_height,
out_width,
pad_front,
pad_top,
pad_left,
out_d,
out_h,
out_w);
}
}
}
d_in_data += in_depth * in_height * in_width;
d_out_data += out_depth * out_height * out_width;
}
}
}
template <typename T>
void Pad3DGradNDHWC(T* d_in_data,
const int num,
const int channels,
const int in_depth,
const int in_height,
const int in_width,
const int out_depth,
const int out_height,
const int out_width,
const int pad_front,
const int pad_top,
const int pad_left,
const T* d_out_data,
void (*pad_func)(T*,
const T*,
const int,
const int,
const int,
const int,
const int,
const int,
const int,
const int,
const int,
const int,
const int,
const int,
const int)) {
for (int n = 0; n < num; ++n) {
for (int out_d = 0; out_d < out_depth; ++out_d) {
for (int out_h = 0; out_h < out_height; ++out_h) {
for (int out_w = 0; out_w < out_width; ++out_w) {
pad_func(d_in_data,
d_out_data,
channels,
in_depth,
in_height,
in_width,
out_depth,
out_height,
out_width,
pad_front,
pad_top,
pad_left,
out_d,
out_h,
out_w);
}
}
}
d_in_data += in_depth * in_height * in_width * channels;
d_out_data += out_depth * out_height * out_width * channels;
}
}
template <typename T, typename Context>
void Pad3dGradKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& out_grad,
const ScalarArray& paddings,
const std::string& mode,
float pad_value,
const std::string& data_format,
DenseTensor* x_grad) {
std::vector<int64_t> pads = paddings.GetData();
auto* d_out = &out_grad;
auto* d_in = x_grad;
auto d_in_dims = d_in->dims();
auto d_out_dims = d_out->dims();
const T* d_out_data = d_out->data<T>();
T* d_in_data = dev_ctx.template Alloc<T>(d_in);
phi::funcs::SetConstant<Context, T>()(dev_ctx, d_in, static_cast<T>(0));
const int pad_left = pads[0];
const int pad_top = pads[2];
const int pad_front = pads[4];
const int num = d_in_dims[0];
if (data_format == "NCDHW") {
const int channels = d_in_dims[1];
const int in_depth = d_in_dims[2];
const int in_height = d_in_dims[3];
const int in_width = d_in_dims[4];
const int out_depth = d_out_dims[2];
const int out_height = d_out_dims[3];
const int out_width = d_out_dims[4];
std::map<std::string,
void (*)(T*,
const T*,
const int,
const int,
const int,
const int,
const int,
const int,
const int,
const int,
const int,
const int,
const int,
const int)>
func_map;
func_map["reflect"] = ReflectPad3DGradNCDHW;
func_map["replicate"] = ReplicatePad3DGradNCDHW;
func_map["circular"] = CircularPad3DGradNCDHW;
func_map["constant"] = ConstPad3DGradNCDHW;
Pad3DGradNCDHW(d_in_data,
num,
channels,
in_depth,
in_height,
in_width,
out_depth,
out_height,
out_width,
pad_front,
pad_top,
pad_left,
d_out_data,
func_map[mode]);
} else {
const int channels = d_in_dims[4];
const int in_depth = d_in_dims[1];
const int in_height = d_in_dims[2];
const int in_width = d_in_dims[3];
const int out_depth = d_out_dims[1];
const int out_height = d_out_dims[2];
const int out_width = d_out_dims[3];
std::map<std::string,
void (*)(T*,
const T*,
const int,
const int,
const int,
const int,
const int,
const int,
const int,
const int,
const int,
const int,
const int,
const int,
const int)>
func_map;
func_map["reflect"] = ReflectPad3DGradNDHWC;
func_map["replicate"] = ReplicatePad3DGradNDHWC;
func_map["circular"] = CircularPad3DGradNDHWC;
func_map["constant"] = ConstPad3DGradNDHWC;
Pad3DGradNDHWC(d_in_data,
num,
channels,
in_depth,
in_height,
in_width,
out_depth,
out_height,
out_width,
pad_front,
pad_top,
pad_left,
d_out_data,
func_map[mode]);
}
}
} // namespace phi
PD_REGISTER_KERNEL(
pad3d_grad, CPU, ALL_LAYOUT, phi::Pad3dGradKernel, float, double) {}
此差异已折叠。
此差异已折叠。
此差异已折叠。
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include "paddle/phi/common/scalar_array.h"
#include "paddle/phi/core/dense_tensor.h"
namespace phi {
template <typename T, typename Context>
void Pad3dGradKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& out_grad,
const ScalarArray& paddings,
const std::string& mode,
float pad_value,
const std::string& data_format,
DenseTensor* x_grad);
} // namespace phi
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include "paddle/phi/common/scalar_array.h"
#include "paddle/phi/core/dense_tensor.h"
namespace phi {
template <typename T, typename Context>
void Pad3dKernel(const Context& dev_ctx,
const DenseTensor& x,
const ScalarArray& paddings,
const std::string& mode,
float pad_value,
const std::string& data_format,
DenseTensor* out);
} // namespace phi
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/core/compat/op_utils.h"
namespace phi {
KernelSignature Pad3dOpArgumentMapping(const ArgumentMappingContext& ctx) {
if (ctx.HasInput("Paddings")) {
return KernelSignature(
"pad3d", {"X"}, {"Paddings", "mode", "value", "data_format"}, {"Out"});
}
return KernelSignature(
"pad3d", {"X"}, {"paddings", "mode", "value", "data_format"}, {"Out"});
}
KernelSignature Pad3dGradOpArgumentMapping(const ArgumentMappingContext& ctx) {
if (ctx.HasInput("Paddings")) {
return KernelSignature("pad3d_grad",
{"X", GradVarName("Out")},
{"Paddings", "mode", "value", "data_format"},
{GradVarName("X")});
}
return KernelSignature("pad3d_grad",
{"X", GradVarName("Out")},
{"paddings", "mode", "value", "data_format"},
{GradVarName("X")});
}
} // namespace phi
PD_REGISTER_ARG_MAPPING_FN(pad3d, phi::Pad3dOpArgumentMapping);
PD_REGISTER_ARG_MAPPING_FN(pad3d_grad, phi::Pad3dGradOpArgumentMapping);
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册