From f67f0cae50a6f2d2801e645586dac2d53703255c Mon Sep 17 00:00:00 2001 From: wangyang59 Date: Wed, 21 Mar 2018 14:05:12 -0700 Subject: [PATCH] finished testing cpu bilinear_interp_op --- paddle/fluid/operators/bilinear_interp_op.cc | 7 +- paddle/fluid/operators/bilinear_interp_op.h | 10 +-- .../unittests/test_bilinear_interp_op.py | 88 +++++++++++++++++++ 3 files changed, 97 insertions(+), 8 deletions(-) create mode 100644 python/paddle/fluid/tests/unittests/test_bilinear_interp_op.py diff --git a/paddle/fluid/operators/bilinear_interp_op.cc b/paddle/fluid/operators/bilinear_interp_op.cc index 896ef7bed25..c8ccc47bedb 100644 --- a/paddle/fluid/operators/bilinear_interp_op.cc +++ b/paddle/fluid/operators/bilinear_interp_op.cc @@ -27,13 +27,13 @@ class BilinearInterpOp : public framework::OperatorWithKernel { PADDLE_ENFORCE(ctx->HasOutput("Out"), "Output(Out) of BilinearInterOp should not be null."); - auto dim_x = ctx->GetInputDim("Input"); // NCHW format + auto dim_x = ctx->GetInputDim("X"); // NCHW format int out_h = ctx->Attrs().Get("out_h"); int out_w = ctx->Attrs().Get("out_w"); PADDLE_ENFORCE_EQ(dim_x.size(), 4, "X's dimension must be 4"); std::vector dim_out({dim_x[0], dim_x[1], out_h, out_w}); - ctx->SetOutputDim("Output", framework::make_ddim(dim_out)); + ctx->SetOutputDim("Out", framework::make_ddim(dim_out)); } }; @@ -83,4 +83,5 @@ namespace ops = paddle::operators; REGISTER_OP(bilinear_interp, ops::BilinearInterpOp, ops::BilinearInterpOpMaker, bilinear_interp_grad, ops::BilinearInterpOpGrad); REGISTER_OP_CPU_KERNEL(bilinear_interp, ops::BilinearInterpKernel); -REGISTER_OP_CPU_KERNEL(bilinear_interp_grad, ops::BilinearInterpKernel); +REGISTER_OP_CPU_KERNEL(bilinear_interp_grad, + ops::BilinearInterpGradKernel); diff --git a/paddle/fluid/operators/bilinear_interp_op.h b/paddle/fluid/operators/bilinear_interp_op.h index 9571d8699c7..fe4cf0b6098 100644 --- a/paddle/fluid/operators/bilinear_interp_op.h +++ b/paddle/fluid/operators/bilinear_interp_op.h @@ -46,7 +46,7 @@ class BilinearInterpKernel : public framework::OpKernel { T ratio_w = (out_w > 1) ? static_cast(in_w - 1) / (out_w - 1) : 0.f; if (in_h == out_h && in_w == out_w) { - memcpy(output, input, product(input_t->dims()) * sizeof(T)); + memcpy(output, input, input_t->numel() * sizeof(T)); } else { for (int k = 0; k < batch_size; ++k) { // loop for batches for (int i = 0; i < out_h; ++i) { // loop for images @@ -123,10 +123,10 @@ class BilinearInterpGradKernel : public framework::OpKernel { const T* out_pos = &d_output[k * out_chw + i * out_w + j]; for (int c = 0; c < channels; ++c) { // loop for channels - in_pos[0] = h2lambda * w2lambda * out_pos[0]; - in_pos[wid] = h2lambda * w1lambda * out_pos[0]; - in_pos[hid * in_w] = h1lambda * w2lambda * out_pos[0]; - in_pos[hid * in_w + wid] = h1lambda * w1lambda * out_pos[0]; + in_pos[0] += h2lambda * w2lambda * out_pos[0]; + in_pos[wid] += h2lambda * w1lambda * out_pos[0]; + in_pos[hid * in_w] += h1lambda * w2lambda * out_pos[0]; + in_pos[hid * in_w + wid] += h1lambda * w1lambda * out_pos[0]; in_pos += in_hw; out_pos += out_hw; } diff --git a/python/paddle/fluid/tests/unittests/test_bilinear_interp_op.py b/python/paddle/fluid/tests/unittests/test_bilinear_interp_op.py new file mode 100644 index 00000000000..b5ec3942e8a --- /dev/null +++ b/python/paddle/fluid/tests/unittests/test_bilinear_interp_op.py @@ -0,0 +1,88 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest +import numpy as np +from op_test import OpTest + + +def bilinear_interp_np(input, out_h, out_w): + batch_size, channel, in_h, in_w = input.shape + if out_h > 1: + ratio_h = (in_h - 1.0) / (out_h - 1.0) + else: + ratio_h = 0.0 + if out_w > 1: + ratio_w = (in_w - 1.0) / (out_w - 1.0) + else: + ratio_w = 0.0 + + out = np.zeros((batch_size, channel, out_h, out_w)) + for i in range(out_h): + h = int(ratio_h * i) + hid = 1 if h < in_h - 1 else 0 + h1lambda = ratio_h * i - h + h2lambda = 1.0 - h1lambda + for j in range(out_w): + w = int(ratio_w * j) + wid = 1 if w < in_w - 1 else 0 + w1lambda = ratio_w * j - w + w2lambda = 1.0 - w1lambda + + out[:, :, i, j] = h2lambda*(w2lambda*input[:, :, h, w] + + w1lambda*input[:, :, h, w+wid]) + \ + h1lambda*(w2lambda*input[:, :, h+hid, w] + + w1lambda*input[:, :, h+hid, w+wid]) + return out.astype("float32") + + +class TestBilinearInterpOp(OpTest): + def setUp(self): + self.init_test_case() + self.op_type = "bilinear_interp" + input_np = np.random.random(self.input_shape).astype("float32") + output_np = bilinear_interp_np(input_np, self.out_h, self.out_w) + + self.inputs = {'X': input_np} + self.attrs = {'out_h': self.out_h, 'out_w': self.out_w} + self.outputs = {'Out': output_np} + + def test_check_output(self): + self.check_output() + + def test_check_grad(self): + self.check_grad(['X'], 'Out', in_place=True) + + def init_test_case(self): + self.input_shape = [2, 3, 4, 4] + self.out_h = 2 + self.out_w = 2 + + +class TestCase1(TestBilinearInterpOp): + def init_test_case(self): + self.input_shape = [4, 1, 7, 8] + self.out_h = 1 + self.out_w = 1 + + +class TestCase2(TestBilinearInterpOp): + def init_test_case(self): + self.input_shape = [3, 3, 9, 6] + self.out_h = 12 + self.out_w = 12 + + +if __name__ == "__main__": + unittest.main() -- GitLab