未验证 提交 58f3ef98 编写于 作者: X xiaoting 提交者: GitHub

fix typo for interp_v2,test=develop (#26843)

* fix typo for interp_v2,test=develop

* align with torch, test=develop

* add area mode, test=develop

* fix bug, test=develop

* format notes, test=develop

* update for converage, test=develop

* fix bilinear, test=develop

* fix bicubic, test=develop

* fix typo, test=develop

* fix coverage, test=develop

* fix helper.input_dtype, test=develop

* polish notes, test=develop

* polish notes, test=develop

* polish notes, test=develop
上级 ed2f57cc
......@@ -67,7 +67,7 @@ static void Interpolate1DInferShapeCheck(framework::InferShapeContext* ctx) {
scale_tensor[0], 1,
platform::errors::InvalidArgument(
"Scale's shape must be 1, but got shape = %d .", scale_tensor[0]));
// out_w = -1;
out_w = -1;
} else {
auto scale = ctx->Attrs().Get<std::vector<float>>("scale");
if (scale.size() > 0) {
......@@ -159,8 +159,8 @@ static void Interpolate2DInferShapeCheck(framework::InferShapeContext* ctx) {
platform::errors::InvalidArgument(
"Scale's shape must be 2 or 1, but got shape = %d .",
scale_tensor[0]));
// out_h = -1;
// out_w = -1;
out_h = -1;
out_w = -1;
} else {
auto scale = ctx->Attrs().Get<std::vector<float>>("scale");
if (scale.size() > 0) {
......@@ -264,9 +264,9 @@ static void Interpolate3DInferShapeCheck(framework::InferShapeContext* ctx) {
platform::errors::InvalidArgument(
"Scale's shape must be 3 or 1, but got shape = %d .",
scale_tensor[0]));
// out_d = -1;
// out_h = -1;
// out_w = -1;
out_d = -1;
out_h = -1;
out_w = -1;
} else {
auto scale = ctx->Attrs().Get<std::vector<float>>("scale");
if (scale.size() > 0) {
......@@ -633,6 +633,9 @@ DECLARE_NO_NEED_BUFFER_VARS_INFERER(InterpolateV2GradNoNeedBufferVarsInferer,
} // namespace operators
} // namespace paddle
// interp_v2 support scale_factor whose input type is list, this operation is
// not
// compatible with interp_op, so a new one is added in paddle2.0
namespace ops = paddle::operators;
REGISTER_OPERATOR(bilinear_interp_v2, ops::InterpolateV2Op,
ops::InterpolateV2OpMaker,
......
......@@ -836,12 +836,12 @@ static void Interpolate1DCUDAFwd(const framework::ExecutionContext& ctx,
int out_w = ctx.Attr<int>("out_w");
auto list_new_shape_tensor = ctx.MultiInput<framework::Tensor>("SizeTensor");
float scale_w = -1;
if (list_new_shape_tensor.size() > 0) {
// have size tensor
auto new_size = get_new_shape(list_new_shape_tensor);
out_w = new_size[0];
} else {
float scale_w = -1;
auto scale_tensor = ctx.Input<Tensor>("Scale");
auto scale = ctx.Attr<std::vector<float>>("scale");
if (scale_tensor != nullptr) {
......@@ -887,8 +887,11 @@ static void Interpolate1DCUDAFwd(const framework::ExecutionContext& ctx,
float ratio_w = 0.f;
if (out_w > 1) {
ratio_w = (align_corners) ? static_cast<float>(in_w - 1.0) / (out_w - 1.0)
float new_scale_w = 0.f;
new_scale_w = (scale_w > 0) ? static_cast<float>(1. / scale_w)
: static_cast<float>(in_w) / out_w;
ratio_w = (align_corners) ? static_cast<float>(in_w - 1.0) / (out_w - 1.0)
: static_cast<float>(new_scale_w);
}
int in_cw = c * in_w;
......@@ -924,14 +927,14 @@ static void Interpolate2DCUDAFwd(const framework::ExecutionContext& ctx,
int out_w = ctx.Attr<int>("out_w");
auto list_new_shape_tensor = ctx.MultiInput<framework::Tensor>("SizeTensor");
float scale_w = -1;
float scale_h = -1;
if (list_new_shape_tensor.size() > 0) {
// have size tensor
auto new_size = get_new_shape(list_new_shape_tensor);
out_h = new_size[0];
out_w = new_size[1];
} else {
float scale_h = -1;
float scale_w = -1;
auto scale_tensor = ctx.Input<Tensor>("Scale");
auto scale = ctx.Attr<std::vector<float>>("scale");
if (scale_tensor != nullptr) {
......@@ -993,12 +996,18 @@ static void Interpolate2DCUDAFwd(const framework::ExecutionContext& ctx,
float ratio_h = 0.f;
float ratio_w = 0.f;
if (out_h > 1) {
ratio_h = (align_corners) ? static_cast<float>(in_h - 1) / (out_h - 1)
float new_scale_h = 0.f;
new_scale_h = (scale_h > 0) ? static_cast<float>(1. / scale_h)
: static_cast<float>(in_h) / out_h;
ratio_h = (align_corners) ? static_cast<float>(in_h - 1) / (out_h - 1)
: static_cast<float>(new_scale_h);
}
if (out_w > 1) {
ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1)
float new_scale_w = 0.f;
new_scale_w = (scale_w > 0) ? static_cast<float>(1. / scale_w)
: static_cast<float>(in_w) / out_w;
ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1)
: static_cast<float>(new_scale_w);
}
int in_hw = in_h * in_w;
......@@ -1048,6 +1057,9 @@ static void Interpolate3DCUDAFwd(const framework::ExecutionContext& ctx,
int out_w = ctx.Attr<int>("out_w");
auto list_new_shape_tensor = ctx.MultiInput<framework::Tensor>("SizeTensor");
float scale_w = -1;
float scale_d = -1;
float scale_h = -1;
if (list_new_shape_tensor.size() > 0) {
// have size tensor
auto new_size = get_new_shape(list_new_shape_tensor);
......@@ -1055,9 +1067,6 @@ static void Interpolate3DCUDAFwd(const framework::ExecutionContext& ctx,
out_h = new_size[1];
out_w = new_size[2];
} else {
float scale_d = -1;
float scale_h = -1;
float scale_w = -1;
auto scale_tensor = ctx.Input<Tensor>("Scale");
auto scale = ctx.Attr<std::vector<float>>("scale");
if (scale_tensor != nullptr) {
......@@ -1129,16 +1138,25 @@ static void Interpolate3DCUDAFwd(const framework::ExecutionContext& ctx,
float ratio_h = 0.f;
float ratio_w = 0.f;
if (out_d > 1) {
ratio_d = (align_corners) ? static_cast<float>(in_d - 1) / (out_d - 1)
float new_scale_d = 0.f;
new_scale_d = (scale_d > 0) ? static_cast<float>(1. / scale_d)
: static_cast<float>(in_d) / out_d;
ratio_d = (align_corners) ? static_cast<float>(in_d - 1) / (out_d - 1)
: static_cast<float>(new_scale_d);
}
if (out_h > 1) {
ratio_h = (align_corners) ? static_cast<float>(in_h - 1) / (out_h - 1)
float new_scale_h = 0.f;
new_scale_h = (scale_h > 0) ? static_cast<float>(1. / scale_h)
: static_cast<float>(in_h) / out_h;
ratio_h = (align_corners) ? static_cast<float>(in_h - 1) / (out_h - 1)
: static_cast<float>(new_scale_h);
}
if (out_w > 1) {
ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1)
float new_scale_w = 0.f;
new_scale_w = (scale_w > 0) ? static_cast<float>(1. / scale_w)
: static_cast<float>(in_w) / out_w;
ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1)
: static_cast<float>(new_scale_w);
}
int in_dhw = in_d * in_h * in_w;
......@@ -1230,8 +1248,11 @@ static void Interpolate1DCUDABwd(const framework::ExecutionContext& ctx,
float ratio_w = 0.f;
if (out_w > 1) {
ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1)
float new_scale_w = 0.f;
new_scale_w = (scale_w > 0) ? static_cast<float>(1. / scale_w)
: static_cast<float>(in_w) / out_w;
ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1)
: static_cast<float>(new_scale_w);
}
int in_cw = c * in_w;
int out_cw = c * out_w;
......@@ -1333,12 +1354,18 @@ static void Interpolate2DCUDABwd(const framework::ExecutionContext& ctx,
float ratio_h = 0.f;
float ratio_w = 0.f;
if (out_h > 1) {
ratio_h = (align_corners) ? static_cast<float>(in_h - 1) / (out_h - 1)
float new_scale_h = 0.f;
new_scale_h = (scale_h > 0) ? static_cast<float>(1. / scale_h)
: static_cast<float>(in_h) / out_h;
ratio_h = (align_corners) ? static_cast<float>(in_h - 1) / (out_h - 1)
: static_cast<float>(new_scale_h);
}
if (out_w > 1) {
ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1)
float new_scale_w = 0.f;
new_scale_w = (scale_w > 0) ? static_cast<float>(1. / scale_w)
: static_cast<float>(in_w) / out_w;
ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1)
: static_cast<float>(new_scale_w);
}
int in_hw = in_h * in_w;
......@@ -1464,16 +1491,25 @@ static void Interpolate3DCUDABwd(const framework::ExecutionContext& ctx,
float ratio_h = 0.f;
float ratio_w = 0.f;
if (out_d > 1) {
ratio_d = (align_corners) ? static_cast<float>(in_d - 1) / (out_d - 1)
float new_scale_d = 0.f;
new_scale_d = (scale_d > 0) ? static_cast<float>(1. / scale_d)
: static_cast<float>(in_d) / out_d;
ratio_d = (align_corners) ? static_cast<float>(in_d - 1) / (out_d - 1)
: static_cast<float>(new_scale_d);
}
if (out_h > 1) {
ratio_h = (align_corners) ? static_cast<float>(in_h - 1) / (out_h - 1)
float new_scale_h = 0.f;
new_scale_h = (scale_h > 0) ? static_cast<float>(1. / scale_h)
: static_cast<float>(in_h) / out_h;
ratio_h = (align_corners) ? static_cast<float>(in_h - 1) / (out_h - 1)
: static_cast<float>(new_scale_h);
}
if (out_w > 1) {
ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1)
float new_scale_w = 0.f;
new_scale_w = (scale_w > 0) ? static_cast<float>(1. / scale_w)
: static_cast<float>(in_w) / out_w;
ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1)
: static_cast<float>(new_scale_w);
}
int in_dhw = in_d * in_h * in_w;
......
......@@ -783,12 +783,13 @@ static void Interpolate1DCPUFwd(const framework::ExecutionContext& ctx,
int out_w = ctx.Attr<int>("out_w");
auto list_new_size_tensor = ctx.MultiInput<framework::Tensor>("SizeTensor");
float scale_w = -1.;
if (list_new_size_tensor.size() > 0) {
// have size tensor
auto new_size = get_new_shape(list_new_size_tensor);
out_w = new_size[0];
} else {
float scale_w = -1;
// float scale_w = -1;
auto scale_tensor = ctx.Input<Tensor>("Scale");
auto scale = ctx.Attr<std::vector<float>>("scale");
if (scale_tensor != nullptr) {
......@@ -833,8 +834,11 @@ static void Interpolate1DCPUFwd(const framework::ExecutionContext& ctx,
float ratio_w = 0.f;
if (out_w > 1) {
ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1)
float new_scale_w = 0.f;
new_scale_w = (scale_w > 0) ? static_cast<float>(1. / scale_w)
: static_cast<float>(in_w) / out_w;
ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1)
: static_cast<float>(new_scale_w);
}
if ("linear" == interp_method) {
LinearInterpolation<T>(input, output, ratio_w, in_w, n, c, out_w,
......@@ -856,6 +860,8 @@ static void Interpolate2DCPUFwd(const framework::ExecutionContext& ctx,
int out_h = ctx.Attr<int>("out_h");
int out_w = ctx.Attr<int>("out_w");
float scale_h = -1;
float scale_w = -1;
auto list_new_size_tensor = ctx.MultiInput<framework::Tensor>("SizeTensor");
if (list_new_size_tensor.size() > 0) {
......@@ -864,8 +870,6 @@ static void Interpolate2DCPUFwd(const framework::ExecutionContext& ctx,
out_h = new_size[0];
out_w = new_size[1];
} else {
float scale_h = -1;
float scale_w = -1;
auto scale_tensor = ctx.Input<Tensor>("Scale");
auto scale = ctx.Attr<std::vector<float>>("scale");
if (scale_tensor != nullptr) {
......@@ -925,12 +929,18 @@ static void Interpolate2DCPUFwd(const framework::ExecutionContext& ctx,
float ratio_h = 0.f;
float ratio_w = 0.f;
if (out_h > 1) {
ratio_h = (align_corners) ? static_cast<float>(in_h - 1) / (out_h - 1)
float new_scale_h = 0.f;
new_scale_h = (scale_h > 0) ? static_cast<float>(1. / scale_h)
: static_cast<float>(in_h) / out_h;
ratio_h = (align_corners) ? static_cast<float>(in_h - 1) / (out_h - 1)
: static_cast<float>(new_scale_h);
}
if (out_w > 1) {
ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1)
float new_scale_w = 0.f;
new_scale_w = (scale_w > 0) ? static_cast<float>(1. / scale_w)
: static_cast<float>(in_w) / out_w;
ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1)
: static_cast<float>(new_scale_w);
}
if ("bilinear" == interp_method) {
......@@ -962,6 +972,10 @@ static void Interpolate3DCPUFwd(const framework::ExecutionContext& ctx,
int out_h = ctx.Attr<int>("out_h");
int out_w = ctx.Attr<int>("out_w");
float scale_d = -1;
float scale_h = -1;
float scale_w = -1;
auto list_new_size_tensor = ctx.MultiInput<framework::Tensor>("SizeTensor");
if (list_new_size_tensor.size() > 0) {
// have size tensor
......@@ -970,9 +984,6 @@ static void Interpolate3DCPUFwd(const framework::ExecutionContext& ctx,
out_h = new_size[1];
out_w = new_size[2];
} else {
float scale_d = -1;
float scale_h = -1;
float scale_w = -1;
auto scale_tensor = ctx.Input<Tensor>("Scale");
auto scale = ctx.Attr<std::vector<float>>("scale");
if (scale_tensor != nullptr) {
......@@ -1043,16 +1054,25 @@ static void Interpolate3DCPUFwd(const framework::ExecutionContext& ctx,
float ratio_h = 0.f;
float ratio_w = 0.f;
if (out_d > 1) {
ratio_d = (align_corners) ? static_cast<float>(in_d - 1) / (out_d - 1)
float new_scale_d = 0.f;
new_scale_d = (scale_d > 0) ? static_cast<float>(1. / scale_d)
: static_cast<float>(in_d) / out_d;
ratio_d = (align_corners) ? static_cast<float>(in_d - 1) / (out_d - 1)
: static_cast<float>(new_scale_d);
}
if (out_h > 1) {
ratio_h = (align_corners) ? static_cast<float>(in_h - 1) / (out_h - 1)
float new_scale_h = 0.f;
new_scale_h = (scale_h > 0) ? static_cast<float>(1. / scale_h)
: static_cast<float>(in_h) / out_h;
ratio_h = (align_corners) ? static_cast<float>(in_h - 1) / (out_h - 1)
: static_cast<float>(new_scale_h);
}
if (out_w > 1) {
ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1)
float new_scale_w = 0.f;
new_scale_w = (scale_w > 0) ? static_cast<float>(1. / scale_w)
: static_cast<float>(in_w) / out_w;
ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1)
: static_cast<float>(new_scale_w);
}
if ("trilinear" == interp_method) {
......@@ -1127,8 +1147,11 @@ static void Interpolate1DCPUBwd(const framework::ExecutionContext& ctx,
float ratio_w = 0.f;
if (out_w > 1) {
ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1)
float new_scale_w = 0.f;
new_scale_w = (scale_w > 0) ? static_cast<float>(1. / scale_w)
: static_cast<float>(in_w) / out_w;
ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1)
: static_cast<float>(new_scale_w);
}
if ("linear" == interp_method) {
LinearInterpolationGrad<T>(output_grad, input_grad, ratio_w, in_w, n, c,
......@@ -1216,12 +1239,18 @@ static void Interpolate2DCPUBwd(const framework::ExecutionContext& ctx,
float ratio_h = 0.f;
float ratio_w = 0.f;
if (out_h > 1) {
ratio_h = (align_corners) ? static_cast<float>(in_h - 1) / (out_h - 1)
float new_scale_h = 0.f;
new_scale_h = (scale_h > 0) ? static_cast<float>(1. / scale_h)
: static_cast<float>(in_h) / out_h;
ratio_h = (align_corners) ? static_cast<float>(in_h - 1) / (out_h - 1)
: static_cast<float>(new_scale_h);
}
if (out_w > 1) {
ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1)
float new_scale_w = 0.f;
new_scale_w = (scale_w > 0) ? static_cast<float>(1. / scale_w)
: static_cast<float>(in_w) / out_w;
ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1)
: static_cast<float>(new_scale_w);
}
if ("bilinear" == interp_method) {
......@@ -1327,16 +1356,25 @@ static void Interpolate3DCPUBwd(const framework::ExecutionContext& ctx,
float ratio_h = 0.f;
float ratio_w = 0.f;
if (out_d > 1) {
ratio_d = (align_corners) ? static_cast<float>(in_d - 1) / (out_d - 1)
float new_scale_d = 0.f;
new_scale_d = (scale_d > 0) ? static_cast<float>(1. / scale_d)
: static_cast<float>(in_d) / out_d;
ratio_d = (align_corners) ? static_cast<float>(in_d - 1) / (out_d - 1)
: static_cast<float>(new_scale_d);
}
if (out_h > 1) {
ratio_h = (align_corners) ? static_cast<float>(in_h - 1) / (out_h - 1)
float new_scale_h = 0.f;
new_scale_h = (scale_h > 0) ? static_cast<float>(1. / scale_h)
: static_cast<float>(in_h) / out_h;
ratio_h = (align_corners) ? static_cast<float>(in_h - 1) / (out_h - 1)
: static_cast<float>(new_scale_h);
}
if (out_w > 1) {
ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1)
float new_scale_w = 0.f;
new_scale_w = (scale_w > 0) ? static_cast<float>(1. / scale_w)
: static_cast<float>(in_w) / out_w;
ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1)
: static_cast<float>(new_scale_w);
}
if ("trilinear" == interp_method) {
......
......@@ -94,6 +94,10 @@ class TestPool1d_API(unittest.TestCase):
result = ada_max_pool1d_dg(input)
self.assertTrue(np.allclose(result.numpy(), result_np))
result = paddle.nn.functional.common.interpolate(
input, mode="area", size=16)
self.assertTrue(np.allclose(result.numpy(), result_np))
def check_adaptive_avg_static_results(self, place):
with fluid.program_guard(fluid.Program(), fluid.Program()):
input = fluid.data(name="input", shape=[2, 3, 32], dtype="float32")
......
......@@ -163,6 +163,9 @@ class TestAdaptiveAvgPool2dAPI(unittest.TestCase):
out_5 = paddle.nn.functional.adaptive_avg_pool2d(
x=x, output_size=[None, 3])
out_6 = paddle.nn.functional.interpolate(
x=x, mode="area", size=[2, 5])
assert np.allclose(out_1.numpy(), self.res_1_np)
assert np.allclose(out_2.numpy(), self.res_2_np)
......@@ -173,6 +176,8 @@ class TestAdaptiveAvgPool2dAPI(unittest.TestCase):
assert np.allclose(out_5.numpy(), self.res_5_np)
assert np.allclose(out_6.numpy(), self.res_3_np)
class TestAdaptiveAvgPool2dClassAPI(unittest.TestCase):
def setUp(self):
......
......@@ -178,6 +178,9 @@ class TestAdaptiveAvgPool3dAPI(unittest.TestCase):
out_5 = paddle.nn.functional.adaptive_avg_pool3d(
x=x, output_size=[None, 3, None])
out_6 = paddle.nn.functional.interpolate(
x=x, mode="area", size=[2, 3, 5])
assert np.allclose(out_1.numpy(), self.res_1_np)
assert np.allclose(out_2.numpy(), self.res_2_np)
......@@ -188,6 +191,8 @@ class TestAdaptiveAvgPool3dAPI(unittest.TestCase):
assert np.allclose(out_5.numpy(), self.res_5_np)
assert np.allclose(out_6.numpy(), self.res_3_np)
class TestAdaptiveAvgPool3dClassAPI(unittest.TestCase):
def setUp(self):
......
......@@ -53,6 +53,8 @@ def value_bound(input, w, h, x, y):
def bicubic_interp_np(input,
out_h,
out_w,
scale_h=0,
scale_w=0,
out_size=None,
actual_shape=None,
align_corners=True,
......@@ -72,12 +74,18 @@ def bicubic_interp_np(input,
if out_h > 1:
if (align_corners):
ratio_h = (in_h - 1.0) / (out_h - 1.0)
else:
if scale_h > 0:
ratio_h = 1.0 / scale_h
else:
ratio_h = 1.0 * in_h / out_h
if out_w > 1:
if (align_corners):
ratio_w = (in_w - 1.0) / (out_w - 1.0)
else:
if scale_w > 0:
ratio_w = 1.0 / scale_w
else:
ratio_w = 1.0 * in_w / out_w
......@@ -128,7 +136,8 @@ class TestBicubicInterpOp(OpTest):
self.init_test_case()
self.op_type = "bicubic_interp_v2"
input_np = np.random.random(self.input_shape).astype("float64")
scale_h = 0
scale_w = 0
if self.data_layout == "NCHW":
in_h = self.input_shape[2]
in_w = self.input_shape[3]
......@@ -151,9 +160,9 @@ class TestBicubicInterpOp(OpTest):
out_h = self.out_h
out_w = self.out_w
output_np = bicubic_interp_np(input_np, out_h, out_w, self.out_size,
self.actual_shape, self.align_corners,
self.data_layout)
output_np = bicubic_interp_np(input_np, out_h, out_w, scale_h, scale_w,
self.out_size, self.actual_shape,
self.align_corners, self.data_layout)
self.inputs = {'X': input_np}
if self.out_size is not None:
self.inputs['OutSize'] = self.out_size
......@@ -480,10 +489,34 @@ class TestBicubicOpError(unittest.TestCase):
out = interpolate(
x,
size=None,
mode='trilinear',
mode='bicubic',
align_corners=False,
scale_factor=[1, 2, 2])
def test_size_and_scale():
x = fluid.data(name="x", shape=[2, 3, 6, 6], dtype="float32")
out = interpolate(
x,
size=None,
mode='bicubic',
align_corners=False,
scale_factor=None)
def test_size_and_scale2():
x = fluid.data(
name="input", shape=[2, 3, 6, 9, 4], dtype="float32")
out = interpolate(
x,
size=[2, 2, 2],
mode='trilinear',
align_corners=False,
scale_factor=2.0)
def test_size_type():
x = fluid.data(name="x", shape=[2, 3, 6, 6], dtype="float32")
out = interpolate(
x, size={2, 2}, mode='bicubic', align_corners=False)
self.assertRaises(ValueError, test_mode_type)
self.assertRaises(ValueError, test_input_shape)
self.assertRaises(TypeError, test_align_corcers)
......@@ -498,6 +531,9 @@ class TestBicubicOpError(unittest.TestCase):
self.assertRaises(ValueError, test_align_corners_and_nearest)
self.assertRaises(ValueError, test_scale_shape)
self.assertRaises(ValueError, test_scale_value)
self.assertRaises(ValueError, test_size_and_scale)
self.assertRaises(ValueError, test_size_and_scale2)
self.assertRaises(TypeError, test_size_type)
if __name__ == "__main__":
......
......@@ -26,6 +26,8 @@ import paddle
def bilinear_interp_np(input,
out_h,
out_w,
scale_w=0,
scale_h=0,
out_size=None,
actual_shape=None,
align_corners=True,
......@@ -46,11 +48,17 @@ def bilinear_interp_np(input,
if out_h > 1:
if (align_corners):
ratio_h = (in_h - 1.0) / (out_h - 1.0)
else:
if scale_h > 0:
ratio_h = 1.0 / scale_h
else:
ratio_h = 1.0 * in_h / out_h
if out_w > 1:
if (align_corners):
ratio_w = (in_w - 1.0) / (out_w - 1.0)
else:
if scale_w > 0:
ratio_w = 1.0 / scale_w
else:
ratio_w = 1.0 * in_w / out_w
......@@ -110,7 +118,8 @@ class TestBilinearInterpOp(OpTest):
else:
in_h = self.input_shape[1]
in_w = self.input_shape[2]
scale_h = 0
scale_w = 0
if self.scale:
if isinstance(self.scale, float) or isinstance(self.scale, int):
if self.scale > 0.:
......@@ -126,9 +135,9 @@ class TestBilinearInterpOp(OpTest):
out_h = self.out_h
out_w = self.out_w
output_np = bilinear_interp_np(input_np, out_h, out_w, self.out_size,
self.actual_shape, self.align_corners,
self.align_mode, self.data_layout)
output_np = bilinear_interp_np(
input_np, out_h, out_w, 0, 0, self.out_size, self.actual_shape,
self.align_corners, self.align_mode, self.data_layout)
self.inputs = {'X': input_np}
if self.out_size is not None:
self.inputs['OutSize'] = self.out_size
......@@ -238,6 +247,17 @@ class TestBilinearInterpCase6(TestBilinearInterpOp):
self.align_mode = 1
class TestBilinearInterpCase7(TestBilinearInterpOp):
def init_test_case(self):
self.interp_method = 'bilinear'
self.input_shape = [1, 1, 32, 64]
self.out_h = 64
self.out_w = 32
self.scale = [2.0, 0.5]
self.align_corners = False
self.align_mode = 1
class TestBilinearInterpSame(TestBilinearInterpOp):
def init_test_case(self):
self.interp_method = 'bilinear'
......@@ -298,9 +318,9 @@ class TestBilinearInterpOpUint8(OpTest):
out_h = self.out_h
out_w = self.out_w
output_np = bilinear_interp_np(input_np, out_h, out_w, self.out_size,
self.actual_shape, self.align_corners,
self.align_mode)
output_np = bilinear_interp_np(input_np, out_h, out_w, 0, 0,
self.out_size, self.actual_shape,
self.align_corners, self.align_mode)
self.inputs = {'X': input_np}
if self.out_size is not None:
self.inputs['OutSize'] = self.out_size
......@@ -481,8 +501,9 @@ class TestBilinearInterpOp_attr_tensor(OpTest):
if isinstance(self.scale, list) and len(self.scale) == 1:
self.scale = [self.scale[0], self.scale[0]]
self.attrs['scale'] = self.scale
output_np = bilinear_interp_np(input_np, out_h, out_w, self.out_size,
self.actual_shape, self.align_corners)
output_np = bilinear_interp_np(input_np, out_h, out_w, 0, 0,
self.out_size, self.actual_shape,
self.align_corners)
self.outputs = {'Out': output_np}
def test_check_output(self):
......
......@@ -293,7 +293,7 @@ class TestLinearInterpOpAPI2_0(unittest.TestCase):
# dygraph
x_data = np.random.random((1, 3, 128)).astype("float32")
us_1 = paddle.nn.UpSample(
us_1 = paddle.nn.Upsample(
size=[64, ],
mode='linear',
align_mode=1,
......@@ -385,19 +385,19 @@ class TestLinearInterpOpError(unittest.TestCase):
def input_shape_error():
x1 = fluid.data(name="x1", shape=[1], dtype="float32")
out1 = paddle.nn.UpSample(
out1 = paddle.nn.Upsample(
size=[256, ], data_format='NCW', mode='linear')
out1_res = out1(x1)
def data_format_error():
x2 = fluid.data(name="x2", shape=[1, 3, 128], dtype="float32")
out2 = paddle.nn.UpSample(
out2 = paddle.nn.Upsample(
size=[256, ], data_format='NHWCD', mode='linear')
out2_res = out2(x2)
def out_shape_error():
x3 = fluid.data(name="x3", shape=[1, 3, 128], dtype="float32")
out3 = paddle.nn.UpSample(
out3 = paddle.nn.Upsample(
size=[
256,
256,
......
......@@ -26,6 +26,7 @@ from paddle.nn.functional import interpolate
def linear_interp_np(input,
out_w,
scale_w=0,
out_size=None,
actual_shape=None,
align_corners=True,
......@@ -43,6 +44,9 @@ def linear_interp_np(input,
if out_w > 1:
if (align_corners):
ratio_w = (in_w - 1.0) / (out_w - 1.0)
else:
if scale_w > 0:
ratio_w = 1.0 / scale_w
else:
ratio_w = 1.0 * in_w / out_w
......@@ -81,6 +85,7 @@ class TestLinearInterpOp(OpTest):
self.op_type = "linear_interp_v2"
input_np = np.random.random(self.input_shape).astype("float64")
scale_w = 0
if self.data_layout == "NCHW":
in_w = self.input_shape[2]
else:
......@@ -95,7 +100,7 @@ class TestLinearInterpOp(OpTest):
else:
out_w = self.out_w
output_np = linear_interp_np(input_np, out_w, self.out_size,
output_np = linear_interp_np(input_np, out_w, self.scale, self.out_size,
self.actual_shape, self.align_corners,
self.align_mode, self.data_layout)
self.inputs = {'X': input_np}
......@@ -195,7 +200,7 @@ class TestLinearInterpOpSizeTensor(TestLinearInterpOp):
else:
out_w = self.out_w
output_np = linear_interp_np(input_np, out_w, self.out_size,
output_np = linear_interp_np(input_np, out_w, 0, self.out_size,
self.actual_shape, self.align_corners,
self.align_mode, self.data_layout)
......@@ -309,7 +314,7 @@ class TestLinearInterpOpAPI2_0(unittest.TestCase):
# dygraph
x_data = np.random.random((1, 3, 128)).astype("float32")
us_1 = paddle.nn.UpSample(
us_1 = paddle.nn.Upsample(
size=[64, ],
mode='linear',
align_mode=1,
......@@ -342,7 +347,7 @@ class TestResizeLinearOpUint8(OpTest):
else:
out_w = self.out_w
output_np = linear_interp_np(input_np, out_w, self.out_size,
output_np = linear_interp_np(input_np, out_w, 0, self.out_size,
self.actual_shape, self.align_corners,
self.align_mode)
self.inputs = {'X': input_np}
......@@ -410,19 +415,19 @@ class TestLinearInterpOpError(unittest.TestCase):
def input_shape_error():
x1 = fluid.data(name="x1", shape=[1], dtype="float32")
out1 = paddle.nn.UpSample(
out1 = paddle.nn.Upsample(
size=[256, ], data_format='NCW', mode='linear')
out1_res = out1(x1)
def data_format_error():
x2 = fluid.data(name="x2", shape=[1, 3, 128], dtype="float32")
out2 = paddle.nn.UpSample(
out2 = paddle.nn.Upsample(
size=[256, ], data_format='NHWCD', mode='linear')
out2_res = out2(x2)
def out_shape_error():
x3 = fluid.data(name="x3", shape=[1, 3, 128], dtype="float32")
out3 = paddle.nn.UpSample(
out3 = paddle.nn.Upsample(
size=[
256,
256,
......
......@@ -26,6 +26,8 @@ import paddle
def nearest_neighbor_interp_np(X,
out_h,
out_w,
scale_h=0,
scale_w=0,
out_size=None,
actual_shape=None,
align_corners=True,
......@@ -45,14 +47,19 @@ def nearest_neighbor_interp_np(X,
if (out_h > 1):
if (align_corners):
ratio_h = (in_h - 1.0) / (out_h - 1.0)
else:
if scale_h > 0:
ratio_h = 1.0 / scale_h
else:
ratio_h = 1.0 * in_h / out_h
if (out_w > 1):
if (align_corners):
ratio_w = (in_w - 1.0) / (out_w - 1.0)
else:
if scale_w > 0:
ratio_w = 1.0 / scale_w
else:
ratio_w = 1.0 * in_w / out_w
out = np.zeros((n, c, out_h, out_w))
if align_corners:
......@@ -89,7 +96,8 @@ class TestNearestInterpOp(OpTest):
else:
in_h = self.input_shape[1]
in_w = self.input_shape[2]
scale_h = 0
scale_w = 0
if self.scale:
if isinstance(self.scale, float) or isinstance(self.scale, int):
if self.scale > 0:
......@@ -106,8 +114,8 @@ class TestNearestInterpOp(OpTest):
out_w = self.out_w
output_np = nearest_neighbor_interp_np(
input_np, out_h, out_w, self.out_size, self.actual_shape,
self.align_corners, self.data_layout)
input_np, out_h, out_w, scale_h, scale_w, self.out_size,
self.actual_shape, self.align_corners, self.data_layout)
self.inputs = {'X': input_np}
if self.out_size is not None:
self.inputs['OutSize'] = self.out_size
......@@ -265,7 +273,7 @@ class TestNearestInterpOpUint8(OpTest):
out_h = self.out_h
out_w = self.out_w
output_np = nearest_neighbor_interp_np(input_np, out_h, out_w,
output_np = nearest_neighbor_interp_np(input_np, out_h, out_w, 0, 0,
self.out_size, self.actual_shape,
self.align_corners)
self.inputs = {'X': input_np}
......@@ -408,7 +416,7 @@ class TestNearestInterpOp_attr_tensor(OpTest):
if isinstance(self.scale, list) and len(self.scale) == 1:
self.scale = [self.scale[0], self.scale[0]]
self.attrs['scale'] = self.scale
output_np = nearest_neighbor_interp_np(input_np, out_h, out_w,
output_np = nearest_neighbor_interp_np(input_np, out_h, out_w, 0, 0,
self.out_size, self.actual_shape,
self.align_corners)
self.outputs = {'Out': output_np}
......
......@@ -26,6 +26,9 @@ def trilinear_interp_np(input,
out_d,
out_h,
out_w,
scale_d=0,
scale_h=0,
scale_w=0,
out_size=None,
actual_shape=None,
align_corners=True,
......@@ -48,16 +51,25 @@ def trilinear_interp_np(input,
if out_d > 1:
if (align_corners):
ratio_d = (in_d - 1.0) / (out_d - 1.0)
else:
if scale_d > 0:
ratio_d = 1.0 / scale_d
else:
ratio_d = 1.0 * in_d / out_d
if out_h > 1:
if (align_corners):
ratio_h = (in_h - 1.0) / (out_h - 1.0)
else:
if scale_h > 0:
ratio_h = 1.0 / scale_h
else:
ratio_h = 1.0 * in_h / out_h
if out_w > 1:
if (align_corners):
ratio_w = (in_w - 1.0) / (out_w - 1.0)
else:
if scale_w > 0:
ratio_w = 1.0 / scale_w
else:
ratio_w = 1.0 * in_w / out_w
......@@ -133,6 +145,9 @@ class TestTrilinearInterpOp(OpTest):
self.op_type = "trilinear_interp_v2"
input_np = np.random.random(self.input_shape).astype("float32")
scale_w = 0
scale_h = 0
scale_d = 0
if self.data_layout == "NCDHW":
in_d = self.input_shape[2]
in_h = self.input_shape[3]
......@@ -159,9 +174,10 @@ class TestTrilinearInterpOp(OpTest):
out_h = self.out_h
out_w = self.out_w
output_np = trilinear_interp_np(
input_np, out_d, out_h, out_w, self.out_size, self.actual_shape,
self.align_corners, self.align_mode, self.data_layout)
output_np = trilinear_interp_np(input_np, out_d, out_h, out_w, scale_d,
scale_h, scale_w, self.out_size,
self.actual_shape, self.align_corners,
self.align_mode, self.data_layout)
self.inputs = {'X': input_np}
if self.out_size is not None:
self.inputs['OutSize'] = self.out_size
......@@ -359,7 +375,7 @@ class TestTrilinearInterpOpUint8(OpTest):
out_h = self.out_h
out_w = self.out_w
output_np = trilinear_interp_np(input_np, out_d, out_h, out_w,
output_np = trilinear_interp_np(input_np, out_d, out_h, out_w, 0, 0, 0,
self.out_size, self.actual_shape,
self.align_corners, self.align_mode)
self.inputs = {'X': input_np}
......@@ -482,7 +498,7 @@ class TestTrilinearInterpZero(TestTrilinearInterpOp):
self.out_d = 60
self.out_h = 40
self.out_w = 25
self.scale = 0.2
self.scale = 0.0
self.align_corners = False
self.align_mode = 0
......@@ -541,7 +557,7 @@ class TestTrilinearInterpOp_attr_tensor(OpTest):
if isinstance(self.scale, list) and len(self.scale) == 1:
self.scale = [self.scale[0], self.scale[0], self.scale[0]]
self.attrs['scale'] = self.scale
output_np = trilinear_interp_np(input_np, out_d, out_h, out_w,
output_np = trilinear_interp_np(input_np, out_d, out_h, out_w, 0, 0, 0,
self.out_size, self.actual_shape,
self.align_corners, self.align_mode)
self.outputs = {'Out': output_np}
......
......@@ -89,7 +89,7 @@ from .layer.common import CosineSimilarity #DEFINE_ALIAS
from .layer.common import Embedding #DEFINE_ALIAS
from .layer.common import Linear #DEFINE_ALIAS
from .layer.common import Flatten #DEFINE_ALIAS
from .layer.common import UpSample #DEFINE_ALIAS
from .layer.common import Upsample #DEFINE_ALIAS
from .layer.common import UpsamplingNearest2d #DEFINE_ALIAS
from .layer.common import UpsamplingBilinear2d #DEFINE_ALIAS
from .layer.common import Bilinear #DEFINE_ALIAS
......
......@@ -72,6 +72,7 @@ from .common import unfold #DEFINE_ALIAS
# from .common import bilinear_tensor_product #DEFINE_ALIAS
from .common import assign #DEFINE_ALIAS
from .common import interpolate #DEFINE_ALIAS
from .common import upsample #DEFINE_ALIAS
from .common import bilinear #DEFINE_ALIAS
from .conv import conv1d #DEFINE_ALIAS
from .conv import conv_transpose1d #DEFINE_ALIAS
......
......@@ -80,6 +80,8 @@ def interpolate(x,
The input must be a 3-D Tensor of the shape (num_batches, channels, in_w)
or 4-D (num_batches, channels, in_h, in_w), or a 5-D Tensor of the shape
(num_batches, channels, in_d, in_h, in_w) or (num_batches, in_d, in_h, in_w, channels),
Where in_w is width of the input tensor, in_h is the height of the input tensor,
in_d is the depth of the intput tensor.
and the resizing only applies on the three dimensions(depth, height and width).
Supporting resample methods:
......@@ -88,6 +90,7 @@ def interpolate(x,
'trilinear' : Trilinear interpolation
'nearest' : Nearest neighbor interpolation
'bicubic' : Bicubic interpolation
'area': Area interpolation
Linear interpolation is the method of using a line connecting two known quantities
to determine the value of an unknown quantity between the two known quantities.
......@@ -114,6 +117,12 @@ def interpolate(x,
smoother than corresponding surfaces obtained by bilinear interpolation or
nearest-neighbor interpolation.
Area interpolation is to perform area interpolation
in both the 3rd dimension(in height direction) , the 4th dimension(in width
direction) and the 5th dimension(in depth direction) on input tensor. Set to
area will directly call `paddle.nn.functional.adaptive_avg_pool1d` or
`paddle.nn.functional.adaptive_avg_pool2d` or `paddle.nn.functional.adaptive_avg_pool3d`.
Example:
.. code-block:: text
......@@ -207,11 +216,11 @@ def interpolate(x,
when input is a 4-D Tensor and is (out_d, out_h, out_w) when input is a 5-D Tensor.
Default: None. If a list, each element can be an integer or a Tensor Variable of shape: [1].
If a Tensor Variable, its dimensions size should be a 1.
scale_factor (float|Tensor|list|None): The multiplier for the input height or width. At
least one of :attr:`out_shape` or :attr:`scale_factor` must be set.
And :attr:`out_shape` has a higher priority than :attr:`scale_factor`.Has to match input size if it is a list.
scale_factor (float|Tensor|list|tuple|None): The multiplier for the input height or width. At
least one of :attr:`size` or :attr:`scale_factor` must be set.
And :attr:`size` has a higher priority than :attr:`scale_factor`.Has to match input size if it is either a list or a tuple or a Tensor.
Default: None.
mode (str): The resample method. It supports 'linear', 'nearest', 'bilinear',
mode (str): The resample method. It supports 'linear', 'area', 'nearest', 'bilinear',
'bicubic' and 'trilinear' currently. Default: 'nearest'
align_corners(bool) : An optional bool, If True, the centers of the 4 corner pixels of the
input and output tensors are aligned, preserving the values at the
......@@ -235,7 +244,7 @@ def interpolate(x,
Raises:
TypeError: size should be a list or tuple or Tensor.
ValueError: The 'mode' of image_resize can only be 'linear', 'bilinear',
'trilinear', 'bicubic', or 'nearest' currently.
'trilinear', 'bicubic', 'area' or 'nearest' currently.
ValueError: 'linear' only support 3-D tensor.
ValueError: 'bilinear', 'bicubic' and 'nearest' only support 4-D tensor.
ValueError: 'trilinear' only support 5-D tensor.
......@@ -283,10 +292,11 @@ def interpolate(x,
'TRILINEAR',
'NEAREST',
'BICUBIC',
'AREA',
]
if resample not in resample_methods:
raise ValueError(
"The 'resample' of image_resize can only be 'linaer', 'bilinear', 'trilinear', "
"The 'resample' of image_resize can only be 'area', 'linear', 'bilinear', 'trilinear', "
" 'bicubic' or 'nearest' currently.")
if resample in ['LINEAR'] and len(x.shape) != 3:
......@@ -310,8 +320,17 @@ def interpolate(x,
raise ValueError(
"align_corners option can only be set with the interpolating modes: linear | bilinear | bicubic | trilinear"
)
if resample == 'AREA' and len(x.shape) == 3:
return paddle.nn.functional.adaptive_avg_pool1d(x, size)
if resample == 'AREA' and len(x.shape) == 4:
return paddle.nn.functional.adaptive_avg_pool2d(x, size)
if resample == 'AREA' and len(x.shape) == 5:
return paddle.nn.functional.adaptive_avg_pool3d(x, size)
helper = LayerHelper('{}_interp_v2'.format(resample_type), **locals())
dtype = helper.input_dtype()
dtype = helper.input_dtype(input_param_name='x')
if len(x.shape) == 3 and data_format not in ['NCW', 'NWC']:
raise ValueError(
"Got wrong value for param `data_format`: " + data_format +
......@@ -349,14 +368,15 @@ def interpolate(x,
out_shape = size
scale = scale_factor
if out_shape is not None and scale is not None:
raise ValueError("Only one of size or scale_factor should be defined.")
if out_shape is not None:
if isinstance(out_shape, Variable):
out_shape.stop_gradient = True
inputs['OutSize'] = out_shape
else:
if not (_is_list_or_turple_(out_shape)):
raise TypeError(
"out_shape should be a list or tuple or Variable.")
raise TypeError("size should be a list or tuple or Variable.")
# Validate the shape
contain_var = False
for dim_idx, dim_size in enumerate(out_shape):
......@@ -388,7 +408,7 @@ def interpolate(x,
if len(x.shape) == 3:
if len(out_shape) != 1:
raise ValueError(
"out_shape length should be 2 for input 3-D tensor")
"size length should be 2 for input 3-D tensor")
if contain_var:
attrs['out_w'] = size_list[0]
else:
......@@ -396,7 +416,7 @@ def interpolate(x,
attrs['out_w'] = out_shape[0]
if len(x.shape) == 4:
if len(out_shape) != 2:
raise ValueError("out_shape length should be 2 for "
raise ValueError("size length should be 2 for "
"input 4-D tensor.")
if contain_var:
attrs['out_h'] = size_list[0]
......@@ -407,7 +427,7 @@ def interpolate(x,
attrs['out_w'] = out_shape[1]
if len(x.shape) == 5:
if len(out_shape) != 3:
raise ValueError("out_shape length should be 3 for "
raise ValueError("size length should be 3 for "
"input 5-D tensor.")
if contain_var:
attrs['out_d'] = size_list[0]
......@@ -430,7 +450,7 @@ def interpolate(x,
for i in range(len(x.shape) - 2):
scale_list.append(scale)
attrs['scale'] = list(map(float, scale_list))
elif isinstance(scale, list):
elif isinstance(scale, list) or isinstance(scale, float):
if len(scale) != len(x.shape) - 2:
raise ValueError("scale_shape length should be {} for "
"input {}-D tensor.".format(
......@@ -441,7 +461,8 @@ def interpolate(x,
attrs['scale'] = list(map(float, scale))
else:
raise TypeError(
"Attr(scale)'s type should be float, int, list or Tensor.")
"Attr(scale)'s type should be float, int, list, tuple, or Tensor."
)
if in_dygraph_mode():
attr_list = []
......@@ -480,9 +501,12 @@ def upsample(x,
name=None):
"""
This op resizes a batch of images.
The input must be a 3-D Tensor of the shape (num_batches, channels, in_w)
or 4-D (num_batches, channels, in_h, in_w), or a 5-D Tensor of the shape
(num_batches, channels, in_d, in_h, in_w) or (num_batches, in_d, in_h, in_w, channels),
Where in_w is width of the input tensor, in_h is the height of the input tensor,
in_d is the depth of the intput tensor.
and the resizing only applies on the three dimensions(depth, height and width).
Supporting resample methods:
......@@ -507,12 +531,21 @@ def upsample(x,
data points on a two-dimensional regular grid. The interpolated surface is
smoother than corresponding surfaces obtained by bilinear interpolation or
nearest-neighbor interpolation.
Trilinear interpolation is an extension of linear interpolation for
interpolating functions of three variables (e.g. D-direction,
H-direction and W-direction in this op) on a rectilinear 3D grid.
The linear interpolation is performed on three directions.
align_corners and align_mode are optional parameters,the calculation method
of interpolation can be selected by them.
Area interpolation is to perform area interpolation
in both the 3rd dimension(in height direction) , the 4th dimension(in width
direction) and the 5th dimension(in depth direction) on input tensor. Set to
area will directly call `paddle.nn.functional.adaptive_avg_pool1d` or
`paddle.nn.functional.adaptive_avg_pool2d` or `paddle.nn.functional.adaptive_avg_pool3d`.
Example:
.. code-block:: text
For scale_factor:
......@@ -605,9 +638,10 @@ def upsample(x,
when input is a 4-D Tensor and is (out_d, out_h, out_w) when input is a 5-D Tensor.
Default: None. If a list, each element can be an integer or a Tensor Variable of shape: [1].
If a Tensor Variable, its dimensions size should be a 1.
scale_factor (float|Tensor|list|None): The multiplier for the input height or width. At
least one of :attr:`out_shape` or :attr:`scale_factor` must be set.
And :attr:`out_shape` has a higher priority than :attr:`scale_factor`.
scale_factor (float|Tensor|list|tuple|None): The multiplier for the input height or width. At
least one of :attr:`size` or :attr:`scale_factor` must be set.
And :attr:`size` has a higher priority than :attr:`scale_factor`.Has to match input size if
it is either a list or a tuple or a Tensor.
Default: None.
mode (str): The resample method. It supports 'linear', 'nearest', 'bilinear',
'bicubic' and 'trilinear' currently. Default: 'nearest'
......
......@@ -59,7 +59,7 @@ from .common import CosineSimilarity #DEFINE_ALIAS
from .common import Embedding #DEFINE_ALIAS
from .common import Linear #DEFINE_ALIAS
from .common import Flatten #DEFINE_ALIAS
from .common import UpSample #DEFINE_ALIAS
from .common import Upsample #DEFINE_ALIAS
from .common import UpsamplingNearest2d #DEFINE_ALIAS
from .common import UpsamplingBilinear2d #DEFINE_ALIAS
from .common import Dropout #DEFINE_ALIAS
......
......@@ -26,7 +26,7 @@ __all__ = [
'Pool2D',
'Embedding',
'Linear',
'UpSample',
'Upsample',
'Pad2D',
'UpsamplingNearest2d',
'UpsamplingBilinear2d',
......@@ -131,12 +131,15 @@ class Linear(layers.Layer):
return out
class UpSample(layers.Layer):
class Upsample(layers.Layer):
"""
This op resizes a batch of images.
The input must be a 3-D Tensor of the shape (num_batches, channels, in_w)
or 4-D (num_batches, channels, in_h, in_w), or a 5-D Tensor of the shape
(num_batches, channels, in_d, in_h, in_w) or (num_batches, in_d, in_h, in_w, channels),
Where in_w is width of the input tensor, in_h is the height of the input tensor,
in_d is the depth of the intput tensor.
and the resizing only applies on the three dimensions(depth, height and width).
Supporting resample methods:
......@@ -171,6 +174,12 @@ class UpSample(layers.Layer):
align_corners and align_mode are optional parameters,the calculation method
of interpolation can be selected by them.
Area interpolation is to perform area interpolation
in both the 3rd dimension(in height direction) , the 4th dimension(in width
direction) and the 5th dimension(in depth direction) on input tensor. Set to
area will directly call `paddle.nn.functional.adaptive_avg_pool1d` or
`paddle.nn.functional.adaptive_avg_pool2d` or `paddle.nn.functional.adaptive_avg_pool3d`.
Example:
.. code-block:: text
......@@ -273,9 +282,9 @@ class UpSample(layers.Layer):
when input is a 4-D Tensor and is (out_d, out_h, out_w) when input is a 5-D Tensor.
Default: None. If a list, each element can be an integer or a Tensor Variable of shape: [1].
If a Tensor Variable, its dimensions size should be a 1.
scale_factor (float|Tensor|list|None): The multiplier for the input height or width. At
least one of :attr:`out_shape` or :attr:`scale_factor` must be set.
And :attr:`out_shape` has a higher priority than :attr:`scale_factor`.Has to match input size if it is a list.
scale_factor (float|Tensor|list|tuple|None): The multiplier for the input height or width. At
least one of :attr:`size` or :attr:`scale_factor` must be set.
And :attr:`size` has a higher priority than :attr:`scale_factor`. Has to match input size if it is either a list or a tuple or a Tensor.
Default: None.
mode (str): The resample method. It supports 'linear', 'nearst', 'bilinear',
'bicubic' and 'trilinear' currently. Default: 'nearest'
......@@ -322,7 +331,7 @@ class UpSample(layers.Layer):
paddle.disable_static()
input_data = np.random.rand(2,3,6,10).astype("float32")
upsample_out = paddle.nn.UpSample(size=[12,12])
upsample_out = paddle.nn.Upsample(size=[12,12])
input = paddle.to_tensor(input_data)
output = upsample_out(x=input)
......@@ -339,7 +348,7 @@ class UpSample(layers.Layer):
align_mode=0,
data_format='NCHW',
name=None):
super(UpSample, self).__init__()
super(Upsample, self).__init__()
self.size = size
self.scale_factor = scale_factor
self.mode = mode.lower()
......@@ -366,7 +375,8 @@ class UpsamplingNearest2d(layers.Layer):
"""
This op upsamples a batch of images, using nearest neighbours' pixel values.
The input must be a 4-D Tensor of the shape (num_batches, channels, in_h, in_w),
and the upsampling only applies on the two dimensions(height and width).
where in_w is width of the input tensor, in_h is the height of the input tensor.
And the upsampling only applies on the two dimensions(height and width).
Nearest neighbor interpolation is to perform nearest neighbor interpolation
in both the 3rd dimension(in height direction) and the 4th dimension(in width
......@@ -381,10 +391,11 @@ class UpsamplingNearest2d(layers.Layer):
layer, the shape is (out_h, out_w) when input is a 4-D Tensor.
Default: None. If a list, each element can be an integer or a Tensor Variable of shape: [1].
If a Tensor Variable, its dimensions size should be a 1.
scale_factor (float|int|list|Tensor|None): The multiplier for the input height or width. At
least one of :attr:`out_shape` or :attr:`scale_factor` must be set.
And :attr:`out_shape` has a higher priority than :attr:`scale_factor`.
Default: None. Has to match input size if it is a list.
scale_factor (float|int|list|tuple|Tensor|None): The multiplier for the input height or width. At
least one of :attr:`size` or :attr:`scale_factor` must be set.
And :attr:`size` has a higher priority than :attr:`scale_factor`.
Has to match input size if it is either a list or a tuple or a Tensor.
Default: None.
data_format (str, optional): Specify the data format of the input, and the data format of the output
will be consistent with that of the input. An optional string from:`NCW`, `NWC`, `"NCHW"`, `"NHWC"`, `"NCDHW"`,
`"NDHWC"`. The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of:
......@@ -449,7 +460,8 @@ class UpsamplingBilinear2d(layers.Layer):
"""
This op upsamples a batch of images, using bilinear' pixel values.
The input must be a 4-D Tensor of the shape (num_batches, channels, in_h, in_w),
and the upsampling only applies on the two dimensions(height and width).
where in_w is width of the input tensor, in_h is the height of the input tensor.
And the upsampling only applies on the two dimensions(height and width).
Bilinear interpolation is an extension of linear interpolation for
interpolating functions of two variables (e.g. H-direction and
......@@ -466,10 +478,11 @@ class UpsamplingBilinear2d(layers.Layer):
layer, the shape is (out_h, out_w) when input is a 4-D Tensor.
Default: None. If a list, each element can be an integer or a Tensor Variable of shape: [1].
If a Tensor Variable, its dimensions size should be a 1.
scale_factor (float|int|list|Tensor|None): The multiplier for the input height or width. At
least one of :attr:`out_shape` or :attr:`scale_factor` must be set.
And :attr:`out_shape` has a higher priority than :attr:`scale_factor`.
Default: None. Has to match input size if it is a list.
scale_factor (float|int|list|tuple|Tensor|None): The multiplier for the input height or width. At
least one of :attr:`size` or :attr:`scale_factor` must be set.
And :attr:`size` has a higher priority than :attr:`scale_factor`.
Has to match input size if it is either a list or a tuple or a Tensor.
Default: None.
data_format (str, optional): Specify the data format of the input, and the data format of the output
will be consistent with that of the input. An optional string from:`NCW`, `NWC`, `"NCHW"`, `"NHWC"`, `"NCDHW"`,
`"NDHWC"`. The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of:
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册