未验证 提交 22555e96 编写于 作者: Z zhangyikun02 提交者: GitHub

add pad3d and pad3d_grad op for xpu, test=kunlun (#48306)

上级 ac8a4b16
...@@ -10,7 +10,7 @@ set(XPU_RT_LIB_NAME "libxpurt.so") ...@@ -10,7 +10,7 @@ set(XPU_RT_LIB_NAME "libxpurt.so")
if(NOT DEFINED XPU_BASE_URL) if(NOT DEFINED XPU_BASE_URL)
set(XPU_BASE_URL_WITHOUT_DATE set(XPU_BASE_URL_WITHOUT_DATE
"https://baidu-kunlun-product.su.bcebos.com/KL-SDK/klsdk-dev") "https://baidu-kunlun-product.su.bcebos.com/KL-SDK/klsdk-dev")
set(XPU_BASE_URL "${XPU_BASE_URL_WITHOUT_DATE}/20221120") set(XPU_BASE_URL "${XPU_BASE_URL_WITHOUT_DATE}/20221124")
else() else()
set(XPU_BASE_URL "${XPU_BASE_URL}") set(XPU_BASE_URL "${XPU_BASE_URL}")
endif() endif()
......
...@@ -433,6 +433,8 @@ XPUOpMap& get_kl2_ops() { ...@@ -433,6 +433,8 @@ XPUOpMap& get_kl2_ops() {
pOpKernelType(vartype::INT64, XPUPlace())})}, pOpKernelType(vartype::INT64, XPUPlace())})},
{"p_norm", XPUKernelSet({pOpKernelType(vartype::FP32, XPUPlace())})}, {"p_norm", XPUKernelSet({pOpKernelType(vartype::FP32, XPUPlace())})},
{"p_norm_grad", XPUKernelSet({pOpKernelType(vartype::FP32, XPUPlace())})}, {"p_norm_grad", XPUKernelSet({pOpKernelType(vartype::FP32, XPUPlace())})},
{"pad3d_grad", XPUKernelSet({pOpKernelType(vartype::FP32, XPUPlace())})},
{"pad3d", XPUKernelSet({pOpKernelType(vartype::FP32, XPUPlace())})},
{"pool2d_grad", {"pool2d_grad",
XPUKernelSet({pOpKernelType(vartype::FP32, XPUPlace()), XPUKernelSet({pOpKernelType(vartype::FP32, XPUPlace()),
pOpKernelType(vartype::FP16, XPUPlace())})}, pOpKernelType(vartype::FP16, XPUPlace())})},
......
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/pad3d_grad_kernel.h"
#include "paddle/phi/backends/xpu/enforce_xpu.h"
#include "paddle/phi/backends/xpu/xpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
namespace phi {
template <typename T, typename Context>
void Pad3dGradKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& out_grad,
const IntArray& paddings,
const std::string& mode,
float pad_value,
const std::string& data_format,
DenseTensor* x_grad) {
T value = static_cast<T>(pad_value);
std::vector<int64_t> pads = paddings.GetData();
auto* d_out = &out_grad;
auto* d_in = x_grad;
auto d_in_dims = d_in->dims();
const T* d_out_data = d_out->data<T>();
T* d_in_data = dev_ctx.template Alloc<T>(d_in);
bool is_ncdhw = true;
if (data_format == "NDHWC") {
is_ncdhw = false;
}
const int num = d_in_dims[0]; // n
int channels = d_in_dims[1]; // c
int in_depth = d_in_dims[2]; // xd
int in_height = d_in_dims[3]; // xh
int in_width = d_in_dims[4]; // xw
if (data_format == "NDHWC") {
channels = d_in_dims[4];
in_depth = d_in_dims[1];
in_height = d_in_dims[2];
in_width = d_in_dims[3];
}
std::vector<int> pads_xpu(6);
pads_xpu[0] = pads[4]; // pf
pads_xpu[1] = pads[5]; // pb
pads_xpu[2] = pads[2]; // pt
pads_xpu[3] = pads[3]; // pd
pads_xpu[4] = pads[0]; // pl
pads_xpu[5] = pads[1]; // pr
if (mode == "reflect") {
int r = xpu::reflection_pad3d_grad(dev_ctx.x_context(),
d_out_data,
d_in_data,
num,
channels,
in_depth,
in_height,
in_width,
pads_xpu,
is_ncdhw);
PADDLE_ENFORCE_XDNN_SUCCESS(r, "reflection_pad3d_grad");
} else if (mode == "replicate") {
int r = xpu::replication_pad3d_grad(dev_ctx.x_context(),
d_out_data,
d_in_data,
num,
channels,
in_depth,
in_height,
in_width,
pads_xpu,
is_ncdhw);
PADDLE_ENFORCE_XDNN_SUCCESS(r, "replication_pad3d_grad");
} else if (mode == "constant") {
int r = xpu::constant_pad3d_grad(dev_ctx.x_context(),
d_out_data,
d_in_data,
num,
channels,
in_depth,
in_height,
in_width,
pads_xpu,
value,
is_ncdhw);
PADDLE_ENFORCE_XDNN_SUCCESS(r, "constant_pad3d_grad");
}
}
} // namespace phi
PD_REGISTER_KERNEL(pad3d_grad, XPU, ALL_LAYOUT, phi::Pad3dGradKernel, float) {}
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/pad3d_kernel.h"
#include "paddle/phi/backends/xpu/enforce_xpu.h"
#include "paddle/phi/backends/xpu/xpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
namespace phi {
template <typename T, typename Context>
void Pad3dKernel(const Context& dev_ctx,
const DenseTensor& x,
const IntArray& paddings,
const std::string& mode,
float pad_value,
const std::string& data_format,
DenseTensor* out) {
T value = static_cast<T>(pad_value);
std::vector<int64_t> pads = paddings.GetData();
auto in_dims = x.dims();
const T* in_data = x.data<T>();
bool is_ncdhw = true;
if (data_format == "NCDHW") {
out->Resize({in_dims[0],
in_dims[1],
in_dims[2] + pads[4] + pads[5],
in_dims[3] + pads[2] + pads[3],
in_dims[4] + pads[0] + pads[1]});
} else {
is_ncdhw = false;
out->Resize({in_dims[0],
in_dims[1] + pads[4] + pads[5],
in_dims[2] + pads[2] + pads[3],
in_dims[3] + pads[0] + pads[1],
in_dims[4]});
}
T* out_data = dev_ctx.template Alloc<T>(out);
const int num = in_dims[0]; // n
int channels = in_dims[1]; // c
int in_depth = in_dims[2]; // xd
int in_height = in_dims[3]; // xh
int in_width = in_dims[4]; // xw
if (data_format == "NDHWC") {
channels = in_dims[4];
in_depth = in_dims[1];
in_height = in_dims[2];
in_width = in_dims[3];
}
if (mode == "circular") {
PADDLE_THROW(phi::errors::External(
"XPU is not support circular padding mode in pad3d"));
}
if (mode == "reflect") {
PADDLE_ENFORCE_GT(
in_depth,
pads[4],
errors::InvalidArgument("The depth of Input(X)'s dimension should be "
"greater than pad_front"
" in reflect mode"
", but received depth(%d) and pad_front(%d).",
in_depth,
pads[4]));
PADDLE_ENFORCE_GT(
in_depth,
pads[5],
errors::InvalidArgument("The depth of Input(X)'s dimension should be "
"greater than pad_back"
" in reflect mode"
", but received depth(%d) and pad_back(%d).",
in_depth,
pads[5]));
PADDLE_ENFORCE_GT(
in_height,
pads[2],
errors::InvalidArgument("The height of Input(X)'s dimension should be "
"greater than pad_top"
" in reflect mode"
", but received depth(%d) and pad_top(%d).",
in_height,
pads[2]));
PADDLE_ENFORCE_GT(
in_height,
pads[3],
errors::InvalidArgument("The height of Input(X)'s dimension should be "
"greater than pad_bottom"
" in reflect mode"
", but received depth(%d) and pad_bottom(%d).",
in_height,
pads[3]));
PADDLE_ENFORCE_GT(
in_width,
pads[0],
errors::InvalidArgument("The width of Input(X)'s dimension should be "
"greater than pad_left"
" in reflect mode"
", but received depth(%d) and pad_left(%d).",
in_width,
pads[0]));
PADDLE_ENFORCE_GT(
in_width,
pads[1],
errors::InvalidArgument("The width of Input(X)'s dimension should be "
"greater than pad_right"
" in reflect mode"
", but received depth(%d) and pad_right(%d).",
in_width,
pads[1]));
} else if (mode == "replicate") {
PADDLE_ENFORCE_NE(in_depth * in_height * in_width,
0,
errors::InvalidArgument(
"The input tensor size can not be 0 for circular "
"or replicate padding mode."));
}
std::vector<int> pads_xpu(6);
pads_xpu[0] = pads[4]; // pf
pads_xpu[1] = pads[5]; // pb
pads_xpu[2] = pads[2]; // pt
pads_xpu[3] = pads[3]; // pd
pads_xpu[4] = pads[0]; // pl
pads_xpu[5] = pads[1]; // pr
if (mode == "reflect") {
int r = xpu::reflection_pad3d(dev_ctx.x_context(),
in_data,
out_data,
num,
channels,
in_depth,
in_height,
in_width,
pads_xpu,
is_ncdhw);
PADDLE_ENFORCE_XDNN_SUCCESS(r, "reflection_pad3d");
} else if (mode == "replicate") {
int r = xpu::replication_pad3d(dev_ctx.x_context(),
in_data,
out_data,
num,
channels,
in_depth,
in_height,
in_width,
pads_xpu,
is_ncdhw);
PADDLE_ENFORCE_XDNN_SUCCESS(r, "replication_pad3d");
} else if (mode == "constant") {
int r = xpu::constant_pad3d(dev_ctx.x_context(),
in_data,
out_data,
num,
channels,
in_depth,
in_height,
in_width,
pads_xpu,
value,
is_ncdhw);
PADDLE_ENFORCE_XDNN_SUCCESS(r, "constant_pad3d");
}
}
} // namespace phi
PD_REGISTER_KERNEL(pad3d, XPU, ALL_LAYOUT, phi::Pad3dKernel, float) {}
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import sys
sys.path.append("..")
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from op_test_xpu import XPUOpTest
from xpu.get_test_cover_info import (
create_test_class,
get_xpu_op_support_types,
XPUOpTestWrapper,
)
from paddle.fluid import Program, program_guard, Executor, default_main_program
paddle.enable_static()
class XPUTestPad3dOp(XPUOpTestWrapper):
def __init__(self):
self.op_name = 'pad3d'
class TestPad3dOp(XPUOpTest):
def setUp(self):
paddle.enable_static()
self.op_type = "pad3d"
self.dtype = self.in_type
self.place = paddle.XPUPlace(0)
self.value = 0.0
self.initTestCase()
self.python_api = paddle.nn.functional.pad
self.inputs = {'X': np.random.random(self.shape).astype(self.dtype)}
self.attrs = {}
if self.variable_paddings:
self.attrs['paddings'] = []
self.inputs['Paddings'] = (
np.array(self.paddings).flatten().astype("int32")
)
else:
self.attrs['paddings'] = (
np.array(self.paddings).flatten().astype("int32")
)
self.attrs['value'] = self.value
self.attrs['mode'] = self.mode
self.attrs['data_format'] = self.data_format
if self.data_format == "NCDHW":
paddings = [
(0, 0),
(0, 0),
(self.paddings[4], self.paddings[5]),
(self.paddings[2], self.paddings[3]),
(self.paddings[0], self.paddings[1]),
]
else:
paddings = [
(0, 0),
(self.paddings[4], self.paddings[5]),
(self.paddings[2], self.paddings[3]),
(self.paddings[0], self.paddings[1]),
(0, 0),
]
if self.mode == "constant":
out = np.pad(
self.inputs['X'],
paddings,
mode=self.mode,
constant_values=self.value,
)
elif self.mode == "reflect":
out = np.pad(self.inputs['X'], paddings, mode=self.mode)
elif self.mode == "replicate":
out = np.pad(self.inputs['X'], paddings, mode="edge")
elif self.mode == "circular":
out = np.pad(self.inputs['X'], paddings, mode="wrap")
self.outputs = {'Out': out}
def test_check_output(self):
self.check_output(check_eager=True)
def test_check_grad_normal(self):
self.check_grad(['X'], 'Out', check_eager=True)
def initTestCase(self):
self.shape = (2, 3, 4, 5, 6)
self.paddings = [0, 0, 0, 0, 0, 0]
self.mode = "constant"
self.data_format = "NCDHW"
self.pad_value = 0.0
self.variable_paddings = False
class TestCase1(TestPad3dOp):
def initTestCase(self):
self.shape = (2, 3, 4, 5, 6)
self.paddings = [0, 1, 2, 3, 4, 5]
self.mode = "constant"
self.data_format = "NCDHW"
self.value = 1.0
self.variable_paddings = False
class TestCase2(TestPad3dOp):
def initTestCase(self):
self.shape = (2, 3, 4, 5, 6)
self.paddings = [1, 1, 1, 1, 1, 1]
self.mode = "constant"
self.data_format = "NDHWC"
self.value = 1.0
self.variable_paddings = False
class TestCase3(TestPad3dOp):
def initTestCase(self):
self.shape = (2, 3, 4, 5, 6)
self.paddings = [0, 1, 1, 0, 2, 3]
self.mode = "reflect"
self.data_format = "NCDHW"
self.variable_paddings = False
class TestCase4(TestPad3dOp):
def initTestCase(self):
self.shape = (4, 4, 4, 4, 4)
self.paddings = [0, 1, 2, 1, 2, 3]
self.mode = "reflect"
self.data_format = "NDHWC"
self.variable_paddings = False
class TestCase5(TestPad3dOp):
def initTestCase(self):
self.shape = (2, 3, 4, 5, 6)
self.paddings = [0, 1, 2, 3, 2, 1]
self.mode = "replicate"
self.data_format = "NCDHW"
self.variable_paddings = False
class TestCase6(TestPad3dOp):
def initTestCase(self):
self.shape = (4, 4, 4, 4, 4)
self.paddings = [5, 4, 2, 1, 2, 3]
self.mode = "replicate"
self.data_format = "NDHWC"
self.variable_paddings = False
class TestCase7(TestPad3dOp):
def initTestCase(self):
self.shape = (2, 3, 4, 5, 6)
self.paddings = [0, 1, 2, 3, 4, 5]
self.mode = "constant"
self.data_format = "NCDHW"
self.value = 1.0
self.variable_paddings = True
class TestCase8(TestPad3dOp):
def initTestCase(self):
self.shape = (2, 3, 4, 5, 6)
self.paddings = [0, 1, 2, 3, 4, 5]
self.mode = "constant"
self.data_format = "NDHWC"
self.value = 1.0
self.variable_paddings = True
class TestPadAPI(unittest.TestCase):
def setUp(self):
self.places = [paddle.XPUPlace(0)]
self.dtype = self.in_type
def check_static_result_1(self, place):
paddle.enable_static()
with program_guard(Program(), Program()):
input_shape = (1, 2, 3, 4, 5)
pad = [1, 2, 1, 1, 3, 4]
mode = "constant"
value = 100
input_data = np.random.rand(*input_shape).astype(self.dtype)
x = paddle.fluid.data(name="x", shape=input_shape)
result = F.pad(
x=x, pad=pad, value=value, mode=mode, data_format="NCDHW"
)
exe = Executor(place)
fetches = exe.run(
default_main_program(),
feed={"x": input_data},
fetch_list=[result],
)
np_out = self._get_numpy_out(input_data, pad, mode, value)
np.testing.assert_allclose(fetches[0], np_out, rtol=1e-05)
def check_static_result_2(self, place):
paddle.enable_static()
with program_guard(Program(), Program()):
input_shape = (2, 3, 4, 5, 6)
pad = [1, 2, 1, 1, 1, 2]
mode = "reflect"
input_data = np.random.rand(*input_shape).astype(self.dtype)
x = paddle.fluid.data(name="x", shape=input_shape)
result1 = F.pad(x=x, pad=pad, mode=mode, data_format="NCDHW")
result2 = F.pad(x=x, pad=pad, mode=mode, data_format="NDHWC")
exe = Executor(place)
fetches = exe.run(
default_main_program(),
feed={"x": input_data},
fetch_list=[result1, result2],
)
np_out1 = self._get_numpy_out(
input_data, pad, mode, data_format="NCDHW"
)
np_out2 = self._get_numpy_out(
input_data, pad, mode, data_format="NDHWC"
)
np.testing.assert_allclose(fetches[0], np_out1, rtol=1e-05)
np.testing.assert_allclose(fetches[1], np_out2, rtol=1e-05)
def check_static_result_3(self, place):
paddle.enable_static()
with program_guard(Program(), Program()):
input_shape = (2, 3, 4, 5, 6)
pad = [1, 2, 1, 1, 3, 4]
mode = "replicate"
input_data = np.random.rand(*input_shape).astype(self.dtype)
x = paddle.fluid.data(name="x", shape=input_shape)
result1 = F.pad(x=x, pad=pad, mode=mode, data_format="NCDHW")
result2 = F.pad(x=x, pad=pad, mode=mode, data_format="NDHWC")
exe = Executor(place)
fetches = exe.run(
default_main_program(),
feed={"x": input_data},
fetch_list=[result1, result2],
)
np_out1 = self._get_numpy_out(
input_data, pad, mode, data_format="NCDHW"
)
np_out2 = self._get_numpy_out(
input_data, pad, mode, data_format="NDHWC"
)
np.testing.assert_allclose(fetches[0], np_out1, rtol=1e-05)
np.testing.assert_allclose(fetches[1], np_out2, rtol=1e-05)
def _get_numpy_out(
self, input_data, pad, mode, value=0, data_format="NCDHW"
):
if mode == "constant" and len(pad) == len(input_data.shape) * 2:
pad = np.reshape(pad, (-1, 2)).tolist()
elif data_format == "NCDHW":
pad = [
(0, 0),
(0, 0),
(pad[4], pad[5]),
(pad[2], pad[3]),
(pad[0], pad[1]),
]
elif data_format == "NDHWC":
pad = [
(0, 0),
(pad[4], pad[5]),
(pad[2], pad[3]),
(pad[0], pad[1]),
(0, 0),
]
elif data_format == "NCHW":
pad = [
(0, 0),
(0, 0),
(pad[2], pad[3]),
(pad[0], pad[1]),
]
elif data_format == "NHWC":
pad = [
(0, 0),
(pad[2], pad[3]),
(pad[0], pad[1]),
(0, 0),
]
elif data_format == "NCL":
pad = [
(0, 0),
(0, 0),
(pad[0], pad[1]),
]
elif data_format == "NLC":
pad = [
(0, 0),
(pad[0], pad[1]),
(0, 0),
]
if mode == "constant":
out = np.pad(input_data, pad, mode=mode, constant_values=value)
elif mode == "reflect":
out = np.pad(input_data, pad, mode=mode)
elif mode == "replicate":
out = np.pad(input_data, pad, mode="edge")
elif mode == "circular":
out = np.pad(input_data, pad, mode="wrap")
return out
def test_static(self):
for place in self.places:
self.check_static_result_1(place=place)
self.check_static_result_2(place=place)
self.check_static_result_3(place=place)
def test_dygraph_1(self):
paddle.disable_static()
input_shape = (1, 2, 3, 4, 5)
pad = [1, 2, 1, 1, 3, 4]
pad_3 = [1, 2, 1, 1, 3, 4, 5, 6, 7, 8]
mode = "constant"
value = 100
input_data = np.random.rand(*input_shape).astype(self.dtype)
np_out1 = self._get_numpy_out(
input_data, pad, mode, value, data_format="NCDHW"
)
np_out2 = self._get_numpy_out(
input_data, pad, mode, value, data_format="NDHWC"
)
np_out3 = self._get_numpy_out(
input_data, pad_3, mode, value, data_format="NCDHW"
)
tensor_data = paddle.to_tensor(input_data)
y1 = F.pad(
tensor_data,
pad=pad,
mode=mode,
value=value,
data_format="NCDHW",
)
y2 = F.pad(
tensor_data,
pad=pad,
mode=mode,
value=value,
data_format="NDHWC",
)
y3 = F.pad(
tensor_data,
pad=pad_3,
mode=mode,
value=value,
data_format="NCDHW",
)
np.testing.assert_allclose(y1.numpy(), np_out1, rtol=1e-05)
np.testing.assert_allclose(y2.numpy(), np_out2, rtol=1e-05)
np.testing.assert_allclose(y3.numpy(), np_out3, rtol=1e-05)
def test_dygraph_2(self):
paddle.disable_static()
input_shape = (2, 3, 4, 5)
pad = [1, 1, 3, 4]
pad_3 = [1, 2, 1, 1, 3, 4, 5, 6]
mode = "constant"
value = 100
input_data = np.random.rand(*input_shape).astype(self.dtype)
np_out1 = self._get_numpy_out(
input_data, pad, mode, value, data_format="NCHW"
)
np_out2 = self._get_numpy_out(
input_data, pad, mode, value, data_format="NHWC"
)
np_out3 = self._get_numpy_out(
input_data, pad_3, mode, value, data_format="NCHW"
)
tensor_data = paddle.to_tensor(input_data)
tensor_pad = paddle.to_tensor(pad, dtype="int32")
y1 = F.pad(
tensor_data,
pad=tensor_pad,
mode=mode,
value=value,
data_format="NCHW",
)
y2 = F.pad(
tensor_data,
pad=tensor_pad,
mode=mode,
value=value,
data_format="NHWC",
)
y3 = F.pad(
tensor_data,
pad=pad_3,
mode=mode,
value=value,
data_format="NCHW",
)
np.testing.assert_allclose(y1.numpy(), np_out1, rtol=1e-05)
np.testing.assert_allclose(y2.numpy(), np_out2, rtol=1e-05)
np.testing.assert_allclose(y3.numpy(), np_out3, rtol=1e-05)
def test_dygraph_3(self):
paddle.disable_static()
input_shape = (3, 4, 5)
pad = [3, 4]
pad_3 = [3, 4, 5, 6, 7, 8]
mode = "constant"
value = 100
input_data = np.random.rand(*input_shape).astype(self.dtype)
np_out1 = self._get_numpy_out(
input_data, pad, mode, value, data_format="NCL"
)
np_out2 = self._get_numpy_out(
input_data, pad, mode, value, data_format="NLC"
)
np_out3 = self._get_numpy_out(
input_data, pad_3, mode, value, data_format="NCL"
)
tensor_data = paddle.to_tensor(input_data)
tensor_pad = paddle.to_tensor(pad, dtype="int32")
y1 = F.pad(
tensor_data,
pad=tensor_pad,
mode=mode,
value=value,
data_format="NCL",
)
y2 = F.pad(
tensor_data,
pad=tensor_pad,
mode=mode,
value=value,
data_format="NLC",
)
y3 = F.pad(
tensor_data,
pad=pad_3,
mode=mode,
value=value,
data_format="NCL",
)
np.testing.assert_allclose(y1.numpy(), np_out1, rtol=1e-05)
np.testing.assert_allclose(y2.numpy(), np_out2, rtol=1e-05)
np.testing.assert_allclose(y3.numpy(), np_out3, rtol=1e-05)
class TestPad3dAPI(unittest.TestCase):
def _get_numpy_out(
self, input_data, pad, mode, value=0.0, data_format="NCDHW"
):
if data_format == "NCDHW":
pad = [
(0, 0),
(0, 0),
(pad[4], pad[5]),
(pad[2], pad[3]),
(pad[0], pad[1]),
]
else:
pad = [
(0, 0),
(pad[4], pad[5]),
(pad[2], pad[3]),
(pad[0], pad[1]),
(0, 0),
]
if mode == "constant":
out = np.pad(input_data, pad, mode=mode, constant_values=value)
elif mode == "reflect":
out = np.pad(input_data, pad, mode=mode)
elif mode == "replicate":
out = np.pad(input_data, pad, mode="edge")
elif mode == "circular":
out = np.pad(input_data, pad, mode="wrap")
return out
def setUp(self):
self.places = [paddle.XPUPlace(0)]
self.dtype = self.in_type
def test_class(self):
paddle.disable_static()
for place in self.places:
input_shape = (3, 4, 5, 6, 7)
pad = [1, 2, 2, 1, 1, 0]
pad_int = 1
value = 100
input_data = np.random.rand(*input_shape).astype(self.dtype)
pad_reflection = nn.Pad3D(padding=pad, mode="reflect")
pad_replication = nn.Pad3D(padding=pad, mode="replicate")
pad_constant = nn.Pad3D(
padding=pad, mode="constant", value=value
)
pad_constant_int = nn.Pad3D(
padding=pad_int, mode="constant", value=value
)
pad_circular = nn.Pad3D(padding=pad, mode="circular")
data = paddle.to_tensor(input_data)
output = pad_reflection(data)
np_out = self._get_numpy_out(
input_data, pad, "reflect", data_format="NCDHW"
)
np.testing.assert_allclose(output.numpy(), np_out, rtol=1e-05)
output = pad_replication(data)
np_out = self._get_numpy_out(
input_data, pad, "replicate", data_format="NCDHW"
)
np.testing.assert_allclose(output.numpy(), np_out, rtol=1e-05)
output = pad_constant(data)
np_out = self._get_numpy_out(
input_data,
pad,
"constant",
value=value,
data_format="NCDHW",
)
np.testing.assert_allclose(output.numpy(), np_out, rtol=1e-05)
output = pad_constant_int(data)
np_out = self._get_numpy_out(
input_data,
[pad_int] * 6,
"constant",
value=value,
data_format="NCDHW",
)
np.testing.assert_allclose(output.numpy(), np_out, rtol=1e-05)
def test_pad_tensor(self):
paddle.disable_static()
for place in self.places:
input_shape = (3, 4, 5, 6, 7)
pad = [1, 2, 2, 1, 1, 0]
pad_tensor = paddle.to_tensor(pad)
input_data = np.random.rand(*input_shape).astype(np.float32)
pad_reflection_ncdhw = nn.Pad3D(
padding=pad_tensor, mode="reflect", data_format="NCDHW"
)
pad_reflection_ndhwc = nn.Pad3D(
padding=pad_tensor, mode="reflect", data_format="NDHWC"
)
data = paddle.to_tensor(input_data)
output = pad_reflection_ncdhw(data)
np_out = self._get_numpy_out(
input_data, pad, "reflect", data_format="NCDHW"
)
np.testing.assert_allclose(output.numpy(), np_out, rtol=1e-05)
output = pad_reflection_ndhwc(data)
np_out = self._get_numpy_out(
input_data, pad, "reflect", data_format="NDHWC"
)
np.testing.assert_allclose(output.numpy(), np_out, rtol=1e-05)
class TestPad3dOpError(unittest.TestCase):
def setUp(self):
self.places = [paddle.XPUPlace(0)]
self.dtype = self.in_type
def test_errors(self):
def test_variable():
input_shape = (1, 2, 3, 4, 5)
data = np.random.rand(*input_shape).astype(self.dtype)
y = F.pad(x=data, pad=[1, 1, 1, 1, 1, 1], data_format="NCDHW")
def test_reflect_1():
input_shape = (1, 2, 3, 4, 5)
data = np.random.rand(*input_shape).astype(self.dtype)
x = paddle.to_tensor(data)
y = F.pad(
x,
pad=[5, 6, 1, 1, 1, 1],
value=1,
mode='reflect',
data_format="NCDHW",
)
def test_reflect_2():
input_shape = (1, 2, 3, 4, 5)
data = np.random.rand(*input_shape).astype(self.dtype)
x = paddle.to_tensor(data)
y = F.pad(
x,
pad=[1, 1, 4, 3, 1, 1],
value=1,
mode='reflect',
data_format="NCDHW",
)
def test_reflect_3():
input_shape = (1, 2, 3, 4, 5)
data = np.random.rand(*input_shape).astype(self.dtype)
x = paddle.to_tensor(data)
y = F.pad(
x,
pad=[1, 1, 1, 1, 2, 3],
value=1,
mode='reflect',
data_format="NCDHW",
)
def test_replicate_1():
input_shape = (1, 2, 0, 4, 5)
data = np.random.rand(*input_shape).astype(self.dtype)
x = paddle.to_tensor(data)
y = F.pad(
x,
pad=[1, 1, 1, 1, 2, 3],
mode='replicate',
data_format="NCDHW",
)
paddle.disable_static()
for place in self.places:
self.assertRaises(ValueError, test_variable)
self.assertRaises(Exception, test_reflect_1)
self.assertRaises(Exception, test_reflect_2)
self.assertRaises(Exception, test_reflect_3)
self.assertRaises(Exception, test_replicate_1)
paddle.enable_static()
class TestPadDataformatError(unittest.TestCase):
def test_errors(self):
def test_ncl():
input_shape = (1, 2, 3, 4)
pad = paddle.to_tensor(np.array([2, 1, 2, 1]).astype('int32'))
data = (
np.arange(np.prod(input_shape), dtype=np.float64).reshape(
input_shape
)
+ 1
)
my_pad = nn.Pad1D(
padding=pad, mode="replicate", data_format="NCL"
)
data = paddle.to_tensor(data)
result = my_pad(data)
def test_nchw():
input_shape = (1, 2, 4)
pad = paddle.to_tensor(np.array([2, 1, 2, 1]).astype('int32'))
data = (
np.arange(np.prod(input_shape), dtype=np.float64).reshape(
input_shape
)
+ 1
)
my_pad = nn.Pad1D(
padding=pad, mode="replicate", data_format="NCHW"
)
data = paddle.to_tensor(data)
result = my_pad(data)
def test_ncdhw():
input_shape = (1, 2, 3, 4)
pad = paddle.to_tensor(np.array([2, 1, 2, 1]).astype('int32'))
data = (
np.arange(np.prod(input_shape), dtype=np.float64).reshape(
input_shape
)
+ 1
)
my_pad = nn.Pad1D(
padding=pad, mode="replicate", data_format="NCDHW"
)
data = paddle.to_tensor(data)
result = my_pad(data)
self.assertRaises(AssertionError, test_ncl)
self.assertRaises(AssertionError, test_nchw)
self.assertRaises(AssertionError, test_ncdhw)
support_types = get_xpu_op_support_types('pad3d')
for stype in support_types:
create_test_class(globals(), XPUTestPad3dOp, stype)
if __name__ == '__main__':
unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册