未验证 提交 63194d6e 编写于 作者: Q qingqing01 提交者: GitHub

Enhance InferShape in deformable_conv and prior_box op (#20372)

上级 0b321c8a
......@@ -175,12 +175,20 @@ class DeformableConvOp : public framework::OperatorWithKernel {
std::vector<int64_t> output_shape({in_dims[0], filter_dims[0]});
for (size_t i = 0; i < strides.size(); ++i) {
output_shape.push_back(ConvOutputSize(in_dims[i + 2], filter_dims[i + 2],
dilations[i], paddings[i],
strides[i]));
if ((!ctx->IsRuntime()) &&
(in_dims[i + 2] <= 0 || filter_dims[i + 2] <= 0)) {
output_shape.push_back(-1);
} else {
output_shape.push_back(ConvOutputSize(in_dims[i + 2],
filter_dims[i + 2], dilations[i],
paddings[i], strides[i]));
}
}
PADDLE_ENFORCE_EQ(output_shape[1] % deformable_groups, 0U,
"output num_filter must divide deformable group size.");
if (ctx->IsRuntime()) {
PADDLE_ENFORCE_EQ(output_shape[2], offset_dims[2],
"output height must equal to offset map height.");
PADDLE_ENFORCE_EQ(output_shape[3], offset_dims[3],
......@@ -194,11 +202,14 @@ class DeformableConvOp : public framework::OperatorWithKernel {
"output height must equal to mask map height.");
PADDLE_ENFORCE_EQ(output_shape[3], mask_dims[3],
"output width must equal to mask map width.");
PADDLE_ENFORCE_EQ(mask_dims[1] % (filter_dims[2] * filter_dims[3]), 0U,
"mask filter must divide deformable group size.");
PADDLE_ENFORCE_EQ(mask_dims[1] / (filter_dims[2] * filter_dims[3]),
deformable_groups,
"mask filter must divide deformable group size.");
}
ctx->SetOutputDim("Output", framework::make_ddim(output_shape));
}
......
......@@ -36,12 +36,6 @@ class PriorBoxOp : public framework::OperatorWithKernel {
PADDLE_ENFORCE(image_dims.size() == 4, "The layout of image is NCHW.");
PADDLE_ENFORCE(input_dims.size() == 4, "The layout of input is NCHW.");
PADDLE_ENFORCE_LT(input_dims[2], image_dims[2],
"The height of input must smaller than image.");
PADDLE_ENFORCE_LT(input_dims[3], image_dims[3],
"The width of input must smaller than image.");
auto min_sizes = ctx->Attrs().Get<std::vector<float>>("min_sizes");
auto max_sizes = ctx->Attrs().Get<std::vector<float>>("max_sizes");
auto variances = ctx->Attrs().Get<std::vector<float>>("variances");
......
......@@ -152,6 +152,25 @@ class TestPriorBox(unittest.TestCase):
assert box.shape[3] == 4
class TestPriorBox2(unittest.TestCase):
def test_prior_box(self):
program = Program()
with program_guard(program):
data_shape = [None, 3, None, None]
images = fluid.data(name='pixel', shape=data_shape, dtype='float32')
conv1 = fluid.layers.conv2d(images, 3, 3, 2)
box, var = layers.prior_box(
input=conv1,
image=images,
min_sizes=[100.0],
aspect_ratios=[1.],
flip=True,
clip=True)
assert len(box.shape) == 4
assert box.shape == var.shape
assert box.shape[3] == 4
class TestDensityPriorBox(unittest.TestCase):
def test_density_prior_box(self):
program = Program()
......
......@@ -2830,8 +2830,7 @@ class TestBook(LayerTest):
print(str(program))
def test_deformable_conv(self):
with program_guard(fluid.default_main_program(),
fluid.default_startup_program()):
with self.static_graph():
input = layers.data(
name='input',
append_batch_size=False,
......@@ -2856,6 +2855,23 @@ class TestBook(LayerTest):
padding=1)
return (out)
def test_deformable_conv2(self):
with self.static_graph():
input = fluid.data(
name='input', shape=[None, 3, None, None], dtype="float32")
offset = fluid.data(
name='offset', shape=[None, 18, None, None], dtype="float32")
mask = fluid.data(
name='mask', shape=[None, 9, None, None], dtype="float32")
out = layers.deformable_conv(
input=input,
offset=offset,
mask=mask,
num_filters=2,
filter_size=3,
padding=1)
return (out)
def test_unfold(self):
with self.static_graph():
x = layers.data(name='x', shape=[3, 20, 20], dtype='float32')
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册