未验证 提交 2ed84a67 编写于 作者: L littletomatodonkey 提交者: GitHub

Add API for pad op. (#27943)

* add pad apis
* rm pad2d test_layer
* fix code example
上级 3718b2e7
develop 2.0.1-rocm-post Ligoml-patch-1 OliverLPH-patch-1 OliverLPH-patch-2 PaddlePM-patch-1 PaddlePM-patch-2 ZHUI-patch-1 add_default_att add_model_benchmark_ci add_some_yaml_config addfile all_new_design_exec ascendrc ascendrelease cherry_undefined_var compile_windows delete_2.0.1-rocm-post delete_add_default_att delete_all_new_design_exec delete_ascendrc delete_compile_windows delete_delete_addfile delete_disable_iterable_dataset_unittest delete_fix_dataloader_memory_leak delete_fix_imperative_dygraph_error delete_fix_retry_ci delete_fix_undefined_var delete_improve_sccache delete_paralleltest delete_prv-disable-more-cache delete_revert-31068-fix_conv3d_windows delete_revert-31562-mean delete_revert-33630-bug-fix delete_revert-34159-add_npu_bce_logical_dev delete_revert-34910-spinlocks_for_allocator delete_revert-35069-revert-34910-spinlocks_for_allocator delete_revert-36057-dev/read_flags_in_ut dingjiaweiww-patch-1 disable_iterable_dataset_unittest dy2static enable_eager_model_test final_state_gen_python_c final_state_intermediate fix-numpy-issue fix_concat_slice fix_dataloader_memory_leak fix_imperative_dygraph_error fix_npu_ci fix_op_flops fix_retry_ci fix_rnn_docs fix_tensor_type fix_undefined_var fixiscan fixiscan1 fixiscan2 fixiscan3 improve_sccache incubate/infrt inplace_addto make_flag_adding_easier move_embedding_to_phi move_histogram_to_pten move_sgd_to_phi move_slice_to_pten move_temporal_shift_to_phi move_yolo_box_to_phi npu_fix_alloc paralleltest preln_ernie prv-disable-more-cache prv-md-even-more prv-onednn-2.5 pten_tensor_refactor release/2.0 release/2.0-rc1 release/2.1 release/2.2 release/2.3 release/2.3-fc-ernie-fix release/2.4 revert-31068-fix_conv3d_windows revert-31562-mean revert-32290-develop-hardlabel revert-33037-forci revert-33475-fix_cifar_label_dimension revert-33630-bug-fix revert-34159-add_npu_bce_logical_dev revert-34406-add_copy_from_tensor revert-34910-spinlocks_for_allocator revert-35069-revert-34910-spinlocks_for_allocator revert-36057-dev/read_flags_in_ut revert-36201-refine_fast_threaded_ssa_graph_executor revert-36985-add_license revert-37318-refactor_dygraph_to_eager revert-37926-eager_coreops_500 revert-37956-revert-37727-pylayer_support_tuple revert-38100-mingdong revert-38301-allocation_rearrange_pr revert-38703-numpy_bf16_package_reupload revert-38732-remove_useless_header_in_elementwise_mul_grad revert-38959-Reduce_Grad revert-39143-adjust_empty revert-39227-move_trace_op_to_pten revert-39268-dev/remove_concat_fluid_kernel revert-40170-support_partial_grad revert-41056-revert-40727-move_some_activaion_to_phi revert-41065-revert-40993-mv_ele_floordiv_pow revert-41068-revert-40790-phi_new revert-41944-smaller_inference_api_test revert-42149-do-not-reset-default-stream-for-stream-safe-cuda-allocator revert-43155-fix_ut_tempfile revert-43882-revert-41944-smaller_inference_api_test revert-45808-phi/simplify_size_op revert-46827-deform_comment rocm_dev_0217 support_weight_transpose test_benchmark_ci test_model_benchmark test_model_benchmark_ci zhiqiu-patch-1 v2.4.0-rc0 v2.3.2 v2.3.1 v2.3.0 v2.3.0-rc0 v2.2.2 v2.2.1 v2.2.0 v2.2.0-rc0 v2.2.0-bak0 v2.1.3 v2.1.2 v2.1.1 v2.1.0 v2.1.0-rc0 v2.0.2 v2.0.1 v2.0.0 v2.0.0-rc1
无相关合并请求
......@@ -606,20 +606,6 @@ class TestBilinearInterpOpAPI(unittest.TestCase):
self.assertTrue(np.allclose(res, expect_res))
class TestUpsampleBilinear2dInterpOpAPI2_0(unittest.TestCase):
def test_case(self):
# dygraph
x_data = np.random.random((1, 3, 6, 6)).astype("float32")
upsample = paddle.nn.UpsamplingBilinear2d(scale_factor=[2, 2])
with fluid.dygraph.guard():
x = fluid.dygraph.to_variable(x_data)
interp = upsample(x)
expect = bilinear_interp_np(
x_data, out_h=12, out_w=12, align_corners=True)
self.assertTrue(np.allclose(interp.numpy(), expect))
class TestBilinearInterpOpAPI_dy(unittest.TestCase):
def test_case(self):
import paddle
......
......@@ -316,23 +316,6 @@ class TestLayer(LayerTest):
self.assertTrue(np.allclose(static_ret, dy_ret_value))
def test_pad2d(self):
with self.static_graph():
t = layers.data(name='t', shape=[-1, 3, 5, 5], dtype='float32')
ret = layers.pad2d(t, paddings=[1, 1, 1, 1])
static_ret = self.get_static_graph_result(
feed={'t': np.ones(
[3, 3, 5, 5], dtype='float32')},
fetch_list=[ret])[0]
with self.dynamic_graph():
t = np.ones([3, 3, 5, 5], dtype='float32')
my_pad2d = paddle.nn.layer.Pad2D(paddings=1)
dy_ret = my_pad2d(base.to_variable(t))
dy_ret_value = dy_ret.numpy()
self.assertTrue(np.allclose(static_ret, dy_ret_value))
def test_matmul(self):
with self.static_graph():
t = layers.data(name='t', shape=[3, 3], dtype='float32')
......
......@@ -526,20 +526,6 @@ class TestNearestAPI(unittest.TestCase):
self.assertTrue(np.allclose(results[i + 1], expect_res))
class TestUpsampleNearest2dInterpOpAPI2_0(unittest.TestCase):
def test_case(self):
# dygraph
x_data = np.random.random((1, 3, 6, 6)).astype("float32")
upsample = paddle.nn.UpsamplingNearest2d(scale_factor=[2, 2])
with fluid.dygraph.guard():
x = fluid.dygraph.to_variable(x_data)
interp = upsample(x)
expect = nearest_neighbor_interp_np(
x_data, out_h=12, out_w=12, align_corners=False)
self.assertTrue(np.allclose(interp.numpy(), expect))
class TestNearestInterpException(unittest.TestCase):
def test_exception(self):
input = fluid.data(name="input", shape=[1, 3, 6, 6], dtype="float32")
......
......@@ -314,7 +314,6 @@ class TestPadAPI(unittest.TestCase):
def test_dygraph_1(self):
paddle.disable_static()
input_shape = (1, 2, 3, 4, 5)
pad = [1, 2, 1, 1, 3, 4]
mode = "constant"
......@@ -342,7 +341,6 @@ class TestPadAPI(unittest.TestCase):
def test_dygraph_2(self):
paddle.disable_static()
input_shape = (2, 3, 4, 5)
pad = [1, 1, 3, 4]
mode = "constant"
......@@ -370,38 +368,8 @@ class TestPadAPI(unittest.TestCase):
self.assertTrue(np.allclose(y1.numpy(), np_out1))
self.assertTrue(np.allclose(y2.numpy(), np_out2))
def test_dygraph_2(self):
paddle.disable_static()
input_shape = (2, 3, 4, 5)
pad = [1, 1, 3, 4]
mode = "constant"
value = 100
input_data = np.random.rand(*input_shape).astype(np.float32)
np_out1 = self._get_numpy_out(
input_data, pad, mode, value, data_format="NCHW")
np_out2 = self._get_numpy_out(
input_data, pad, mode, value, data_format="NHWC")
tensor_data = paddle.to_tensor(input_data)
tensor_pad = paddle.to_tensor(pad, dtype="int32")
y1 = F.pad(tensor_data,
pad=tensor_pad,
mode=mode,
value=value,
data_format="NCHW")
y2 = F.pad(tensor_data,
pad=tensor_pad,
mode=mode,
value=value,
data_format="NHWC")
self.assertTrue(np.allclose(y1.numpy(), np_out1))
self.assertTrue(np.allclose(y2.numpy(), np_out2))
def test_dygraph_3(self):
paddle.disable_static()
input_shape = (3, 4, 5)
pad = [3, 4]
mode = "constant"
......@@ -455,6 +423,8 @@ class TestPad1dAPI(unittest.TestCase):
out = np.pad(input_data, pad, mode=mode)
elif mode == "replicate":
out = np.pad(input_data, pad, mode="edge")
elif mode == "circular":
out = np.pad(input_data, pad, mode="wrap")
return out
......@@ -471,9 +441,10 @@ class TestPad1dAPI(unittest.TestCase):
value = 100
input_data = np.random.rand(*input_shape).astype(np.float32)
pad_reflection = nn.ReflectionPad1d(padding=pad)
pad_replication = nn.ReplicationPad1d(padding=pad)
pad_constant = nn.ConstantPad1d(padding=pad, value=value)
pad_reflection = nn.Pad1D(padding=pad, mode="reflect")
pad_replication = nn.Pad1D(padding=pad, mode="replicate")
pad_constant = nn.Pad1D(padding=pad, mode="constant", value=value)
pad_circular = nn.Pad1D(padding=pad, mode="circular")
data = paddle.to_tensor(input_data)
......@@ -492,6 +463,11 @@ class TestPad1dAPI(unittest.TestCase):
input_data, pad, "constant", value=value, data_format="NCL")
self.assertTrue(np.allclose(output.numpy(), np_out))
output = pad_circular(data)
np_out = self._get_numpy_out(
input_data, pad, "circular", value=value, data_format="NCL")
self.assertTrue(np.allclose(output.numpy(), np_out))
class TestPad2dAPI(unittest.TestCase):
def _get_numpy_out(self,
......@@ -521,6 +497,8 @@ class TestPad2dAPI(unittest.TestCase):
out = np.pad(input_data, pad, mode=mode)
elif mode == "replicate":
out = np.pad(input_data, pad, mode="edge")
elif mode == "circular":
out = np.pad(input_data, pad, mode="wrap")
return out
......@@ -537,10 +515,10 @@ class TestPad2dAPI(unittest.TestCase):
value = 100
input_data = np.random.rand(*input_shape).astype(np.float32)
pad_reflection = nn.ReflectionPad2d(padding=pad)
pad_replication = nn.ReplicationPad2d(padding=pad)
pad_constant = nn.ConstantPad2d(padding=pad, value=value)
pad_zero = nn.ZeroPad2d(padding=pad)
pad_reflection = nn.Pad2D(padding=pad, mode="reflect")
pad_replication = nn.Pad2D(padding=pad, mode="replicate")
pad_constant = nn.Pad2D(padding=pad, mode="constant", value=value)
pad_circular = nn.Pad2D(padding=pad, mode="circular")
data = paddle.to_tensor(input_data)
......@@ -559,9 +537,9 @@ class TestPad2dAPI(unittest.TestCase):
input_data, pad, "constant", value=value, data_format="NCHW")
self.assertTrue(np.allclose(output.numpy(), np_out))
output = pad_zero(data)
output = pad_circular(data)
np_out = self._get_numpy_out(
input_data, pad, "constant", value=0, data_format="NCHW")
input_data, pad, "circular", data_format="NCHW")
self.assertTrue(np.allclose(output.numpy(), np_out))
......@@ -595,6 +573,8 @@ class TestPad3dAPI(unittest.TestCase):
out = np.pad(input_data, pad, mode=mode)
elif mode == "replicate":
out = np.pad(input_data, pad, mode="edge")
elif mode == "circular":
out = np.pad(input_data, pad, mode="wrap")
return out
......@@ -611,11 +591,18 @@ class TestPad3dAPI(unittest.TestCase):
value = 100
input_data = np.random.rand(*input_shape).astype(np.float32)
pad_replication = nn.ReplicationPad3d(padding=pad)
pad_constant = nn.ConstantPad3d(padding=pad, value=value)
pad_reflection = nn.Pad3D(padding=pad, mode="reflect")
pad_replication = nn.Pad3D(padding=pad, mode="replicate")
pad_constant = nn.Pad3D(padding=pad, mode="constant", value=value)
pad_circular = nn.Pad3D(padding=pad, mode="circular")
data = paddle.to_tensor(input_data)
output = pad_reflection(data)
np_out = self._get_numpy_out(
input_data, pad, "reflect", data_format="NCDHW")
self.assertTrue(np.allclose(output.numpy(), np_out))
output = pad_replication(data)
np_out = self._get_numpy_out(
input_data, pad, "replicate", data_format="NCDHW")
......@@ -626,6 +613,11 @@ class TestPad3dAPI(unittest.TestCase):
input_data, pad, "constant", value=value, data_format="NCDHW")
self.assertTrue(np.allclose(output.numpy(), np_out))
output = pad_circular(data)
np_out = self._get_numpy_out(
input_data, pad, "circular", data_format="NCDHW")
self.assertTrue(np.allclose(output.numpy(), np_out))
class TestPad3dOpError(unittest.TestCase):
def test_errors(self):
......@@ -673,32 +665,30 @@ class TestPad3dOpError(unittest.TestCase):
class TestPadDataformatError(unittest.TestCase):
def test_errors(self):
def test_ncl():
paddle.disable_static(paddle.CPUPlace())
input_shape = (1, 2, 3, 4)
pad = paddle.to_tensor(np.array([2, 1, 2, 1]).astype('int32'))
data = np.arange(
np.prod(input_shape), dtype=np.float64).reshape(input_shape) + 1
my_pad = nn.ReplicationPad1d(padding=pad, data_format="NCL")
my_pad = nn.Pad1D(padding=pad, mode="replicate", data_format="NCL")
data = paddle.to_tensor(data)
result = my_pad(data)
def test_nchw():
paddle.disable_static(paddle.CPUPlace())
input_shape = (1, 2, 4)
pad = paddle.to_tensor(np.array([2, 1, 2, 1]).astype('int32'))
data = np.arange(
np.prod(input_shape), dtype=np.float64).reshape(input_shape) + 1
my_pad = nn.ReplicationPad1d(padding=pad, data_format="NCHW")
my_pad = nn.Pad1D(padding=pad, mode="replicate", data_format="NCHW")
data = paddle.to_tensor(data)
result = my_pad(data)
def test_ncdhw():
paddle.disable_static(paddle.CPUPlace())
input_shape = (1, 2, 3, 4)
pad = paddle.to_tensor(np.array([2, 1, 2, 1]).astype('int32'))
data = np.arange(
np.prod(input_shape), dtype=np.float64).reshape(input_shape) + 1
my_pad = nn.ReplicationPad1d(padding=pad, data_format="NCDHW")
my_pad = nn.Pad1D(
padding=pad, mode="replicate", data_format="NCDHW")
data = paddle.to_tensor(data)
result = my_pad(data)
......
......@@ -71,22 +71,16 @@ from .layer.activation import Tanhshrink #DEFINE_ALIAS
from .layer.activation import ThresholdedReLU #DEFINE_ALIAS
from .layer.activation import LogSoftmax #DEFINE_ALIAS
from .layer.activation import Maxout #DEFINE_ALIAS
from .layer.common import ReflectionPad1d #DEFINE_ALIAS
from .layer.common import ReplicationPad1d #DEFINE_ALIAS
from .layer.common import ConstantPad1d #DEFINE_ALIAS
from .layer.common import ReflectionPad2d #DEFINE_ALIAS
from .layer.common import ReplicationPad2d #DEFINE_ALIAS
from .layer.common import ConstantPad2d #DEFINE_ALIAS
from .layer.common import ZeroPad2d #DEFINE_ALIAS
from .layer.common import ReplicationPad3d #DEFINE_ALIAS
from .layer.common import ConstantPad3d #DEFINE_ALIAS
from .layer.common import BilinearTensorProduct #DEFINE_ALIAS
from .layer.common import Pool2D #DEFINE_ALIAS
from .layer.common import Pad1D #DEFINE_ALIAS
from .layer.common import Pad2D #DEFINE_ALIAS
from .layer.common import Pad3D #DEFINE_ALIAS
from .layer.common import CosineSimilarity #DEFINE_ALIAS
from .layer.common import Embedding #DEFINE_ALIAS
from .layer.common import Linear #DEFINE_ALIAS
from .layer.common import Flatten #DEFINE_ALIAS
from .layer.common import Upsample #DEFINE_ALIAS
from .layer.common import UpsamplingNearest2d #DEFINE_ALIAS
from .layer.common import UpsamplingBilinear2d #DEFINE_ALIAS
from .layer.common import Bilinear #DEFINE_ALIAS
from .layer.common import Dropout #DEFINE_ALIAS
from .layer.common import Dropout2d #DEFINE_ALIAS
......
......@@ -44,23 +44,14 @@ from .activation import LogSoftmax #DEFINE_ALIAS
from .common import BilinearTensorProduct #DEFINE_ALIAS
from .common import Bilinear #DEFINE_ALIAS
from .common import Pool2D #DEFINE_ALIAS
from .common import Pad1D #DEFINE_ALIAS
from .common import Pad2D #DEFINE_ALIAS
from .common import ReflectionPad1d #DEFINE_ALIAS
from .common import ReplicationPad1d #DEFINE_ALIAS
from .common import ConstantPad1d #DEFINE_ALIAS
from .common import ReflectionPad2d #DEFINE_ALIAS
from .common import ReplicationPad2d #DEFINE_ALIAS
from .common import ConstantPad2d #DEFINE_ALIAS
from .common import ZeroPad2d #DEFINE_ALIAS
from .common import ReplicationPad3d #DEFINE_ALIAS
from .common import ConstantPad3d #DEFINE_ALIAS
from .common import Pad3D #DEFINE_ALIAS
from .common import CosineSimilarity #DEFINE_ALIAS
from .common import Embedding #DEFINE_ALIAS
from .common import Linear #DEFINE_ALIAS
from .common import Flatten #DEFINE_ALIAS
from .common import Upsample #DEFINE_ALIAS
from .common import UpsamplingNearest2d #DEFINE_ALIAS
from .common import UpsamplingBilinear2d #DEFINE_ALIAS
from .common import Dropout #DEFINE_ALIAS
from .common import Dropout2d #DEFINE_ALIAS
from .common import Dropout3d #DEFINE_ALIAS
......
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册
反馈
建议
客服 返回
顶部