未验证 提交 a5f2e1f7 编写于 作者: W wangshengxiang 提交者: GitHub

bind pixel_shuffle & pixel_shuffle_grad op for xpu (#50090)

上级 0d9185b9
......@@ -415,6 +415,8 @@ XPUOpMap& get_kl2_ops() {
{"p_norm_grad", XPUKernelSet({phi::DataType::FLOAT32})},
{"pad3d_grad", XPUKernelSet({phi::DataType::FLOAT32})},
{"pad3d", XPUKernelSet({phi::DataType::FLOAT32})},
{"pixel_shuffle", XPUKernelSet({phi::DataType::FLOAT32})},
{"pixel_shuffle_grad", XPUKernelSet({phi::DataType::FLOAT32})},
{"pool2d_grad",
XPUKernelSet({phi::DataType::FLOAT32, phi::DataType::FLOAT16})},
{"pool2d",
......
// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/pixel_shuffle_grad_kernel.h"
#include "paddle/phi/backends/xpu/enforce_xpu.h"
#include "paddle/phi/core/kernel_registry.h"
namespace phi {
template <typename T, typename Context>
void PixelShuffleGradKernel(const Context& ctx,
const DenseTensor& out_grad,
int upscale_factor,
const std::string& data_format,
DenseTensor* x_grad) {
using XPUType = typename XPUTypeTrait<T>::Type;
const T* x_ptr = out_grad.data<T>();
T* y_ptr = ctx.template Alloc<T>(x_grad);
bool is_nchw = data_format == "NCHW";
int64_t n = out_grad.dims()[0];
int64_t xc = out_grad.dims()[is_nchw ? 1 : 3];
int64_t xh = out_grad.dims()[is_nchw ? 2 : 1];
int64_t xw = out_grad.dims()[is_nchw ? 3 : 2];
int r = pixel_unshuffle(ctx.x_context(),
reinterpret_cast<const XPUType*>(x_ptr),
reinterpret_cast<XPUType*>(y_ptr),
n,
xc,
xh,
xw,
upscale_factor,
is_nchw);
PADDLE_ENFORCE_XDNN_SUCCESS(r, "pixel_unshuffle");
}
} // namespace phi
PD_REGISTER_KERNEL(
pixel_shuffle_grad, XPU, ALL_LAYOUT, phi::PixelShuffleGradKernel, float) {}
// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/pixel_shuffle_kernel.h"
#include "paddle/phi/backends/xpu/enforce_xpu.h"
#include "paddle/phi/core/kernel_registry.h"
namespace phi {
template <typename T, typename Context>
void PixelShuffleKernel(const Context& ctx,
const DenseTensor& x,
int upscale_factor,
const std::string& data_format,
DenseTensor* out) {
using XPUType = typename XPUTypeTrait<T>::Type;
const T* x_ptr = x.data<T>();
T* y_ptr = ctx.template Alloc<T>(out);
bool is_nchw = data_format == "NCHW";
int64_t n = x.dims()[0];
int64_t xc = x.dims()[is_nchw ? 1 : 3];
int64_t xh = x.dims()[is_nchw ? 2 : 1];
int64_t xw = x.dims()[is_nchw ? 3 : 2];
int r = pixel_shuffle(ctx.x_context(),
reinterpret_cast<const XPUType*>(x_ptr),
reinterpret_cast<XPUType*>(y_ptr),
n,
xc,
xh,
xw,
upscale_factor,
is_nchw);
PADDLE_ENFORCE_XDNN_SUCCESS(r, "pixel_shuffle");
}
} // namespace phi
PD_REGISTER_KERNEL(
pixel_shuffle, XPU, ALL_LAYOUT, phi::PixelShuffleKernel, float) {}
# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import unittest
import numpy as np
sys.path.append("..")
from op_test_xpu import XPUOpTest
from xpu.get_test_cover_info import (
XPUOpTestWrapper,
create_test_class,
get_xpu_op_support_types,
)
import paddle
paddle.enable_static()
def pixel_shuffle_np(x, up_factor, data_format="NCHW"):
if data_format == "NCHW":
n, c, h, w = x.shape
new_shape = (
n,
c // (up_factor * up_factor),
up_factor,
up_factor,
h,
w,
)
# reshape to (num,output_channel,upscale_factor,upscale_factor,h,w)
npresult = np.reshape(x, new_shape)
# transpose to (num,output_channel,h,upscale_factor,w,upscale_factor)
npresult = npresult.transpose(0, 1, 4, 2, 5, 3)
oshape = [n, c // (up_factor * up_factor), h * up_factor, w * up_factor]
npresult = np.reshape(npresult, oshape)
return npresult
else:
n, h, w, c = x.shape
new_shape = (
n,
h,
w,
c // (up_factor * up_factor),
up_factor,
up_factor,
)
# reshape to (num,h,w,output_channel,upscale_factor,upscale_factor)
npresult = np.reshape(x, new_shape)
# transpose to (num,h,upscale_factor,w,upscale_factor,output_channel)
npresult = npresult.transpose(0, 1, 4, 2, 5, 3)
oshape = [n, h * up_factor, w * up_factor, c // (up_factor * up_factor)]
npresult = np.reshape(npresult, oshape)
return npresult
class XPUTestPixelShuffleOp(XPUOpTestWrapper):
def __init__(self):
self.op_name = "pixel_shuffle"
self.use_dynamic_create_class = False
class TestPixelShuffleOp(XPUOpTest):
def setUp(self):
self.set_xpu()
self.op_type = "pixel_shuffle"
self.init_dtype()
self.eager_mode = True
# override
self.init_input_shape()
self.init_attr()
self.x = np.random.random(self.x_shape).astype(self.dtype)
self.y = pixel_shuffle_np(
self.x, self.attrs["upscale_factor"], self.attrs["data_format"]
)
self.inputs = {'X': self.x}
self.outputs = {'Out': self.y}
def init_input_shape(self):
self.x_shape = [2, 64, 26, 26]
def init_attr(self):
self.attrs = {'upscale_factor': 2, 'data_format': "NCHW"}
def set_xpu(self):
self.__class__.no_need_check_grad = False
self.place = paddle.XPUPlace(0)
def init_dtype(self):
self.dtype = self.in_type
def test_check_output(self):
self.check_output_with_place(self.place)
def test_check_grad(self):
self.check_grad_with_place(
self.place, ['X'], 'Out', check_eager=self.eager_mode
)
class TestNHWC(TestPixelShuffleOp):
def init_input_shape(self):
self.x_shape = [2, 64, 26, 24]
def init_attr(self):
self.attrs = {'upscale_factor': 2, 'data_format': "NHWC"}
class TestUpFactor3(TestPixelShuffleOp):
def init_input_shape(self):
self.x_shape = [2, 27, 5, 5]
def init_attr(self):
self.attrs = {'upscale_factor': 3, 'data_format': "NCHW"}
class TestUpFactor3NHWC(TestPixelShuffleOp):
def init_input_shape(self):
self.x_shape = [2, 27, 5, 9]
def init_attr(self):
self.attrs = {'upscale_factor': 3, 'data_format': "NHWC"}
support_types = get_xpu_op_support_types("pixel_shuffle")
for stype in support_types:
create_test_class(globals(), XPUTestPixelShuffleOp, stype)
if __name__ == "__main__":
unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册