# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import print_function import unittest import numpy as np import paddle import paddle.fluid as fluid import paddle.fluid.core as core from paddle.fluid import Program, program_guard from op_test import OpTest import gradient_checker from decorator_helper import prog_scope import paddle.fluid.layers as layers class TestFlipOp_API(unittest.TestCase): """Test flip api.""" def test_static_graph(self): startup_program = fluid.Program() train_program = fluid.Program() with fluid.program_guard(train_program, startup_program): axis = [0] input = fluid.data(name='input', dtype='float32', shape=[2, 3]) output = paddle.flip(input, axis) output = paddle.flip(output, -1) output = output.flip(0) place = fluid.CPUPlace() if fluid.core.is_compiled_with_cuda(): place = fluid.CUDAPlace(0) exe = fluid.Executor(place) exe.run(startup_program) img = np.array([[1, 2, 3], [4, 5, 6]]).astype(np.float32) res = exe.run(train_program, feed={'input': img}, fetch_list=[output]) out_np = np.array(res[0]) out_ref = np.array([[3, 2, 1], [6, 5, 4]]).astype(np.float32) self.assertTrue((out_np == out_ref).all(), msg='flip output is wrong, out =' + str(out_np)) def test_dygraph(self): img = np.array([[1, 2, 3], [4, 5, 6]]).astype(np.float32) with fluid.dygraph.guard(): inputs = fluid.dygraph.to_variable(img) ret = paddle.flip(inputs, [0]) ret = ret.flip(0) ret = paddle.flip(ret, 1) out_ref = np.array([[3, 2, 1], [6, 5, 4]]).astype(np.float32) self.assertTrue( (ret.numpy() == out_ref).all(), msg='flip output is wrong, out =' + str(ret.numpy())) class TestFlipOp(OpTest): def setUp(self): self.op_type = 'flip' self.python_api = paddle.tensor.flip self.init_test_case() self.inputs = {'X': np.random.random(self.in_shape).astype('float64')} self.init_attrs() self.outputs = {'Out': self.calc_ref_res()} def init_attrs(self): self.attrs = {"axis": self.axis} def test_check_output(self): self.check_output(check_eager=True) def test_check_grad(self): self.check_grad(["X"], "Out", check_eager=True) def init_test_case(self): self.in_shape = (6, 4, 2, 3) self.axis = [0, 1] def calc_ref_res(self): res = self.inputs['X'] if isinstance(self.axis, int): return np.flip(res, self.axis) for axis in self.axis: res = np.flip(res, axis) return res class TestFlipOpAxis1(TestFlipOp): def init_test_case(self): self.in_shape = (2, 4, 4) self.axis = [0] class TestFlipOpAxis2(TestFlipOp): def init_test_case(self): self.in_shape = (4, 4, 6, 3) self.axis = [0, 2] class TestFlipOpAxis3(TestFlipOp): def init_test_case(self): self.in_shape = (4, 3, 1) self.axis = [0, 1, 2] class TestFlipOpAxis4(TestFlipOp): def init_test_case(self): self.in_shape = (6, 4, 2, 2) self.axis = [0, 1, 2, 3] class TestFlipOpEmptyAxis(TestFlipOp): def init_test_case(self): self.in_shape = (6, 4, 2, 2) self.axis = [] class TestFlipOpNegAxis(TestFlipOp): def init_test_case(self): self.in_shape = (6, 4, 2, 2) self.axis = [-1] class TestFlipDoubleGradCheck(unittest.TestCase): def flip_wrapper(self, x): return paddle.flip(x[0], [0, 1]) @prog_scope() def func(self, place): # the shape of input variable should be clearly specified, not inlcude -1. eps = 0.005 dtype = np.float32 data = layers.data('data', [3, 2, 2], False, dtype) data.persistable = True out = paddle.flip(data, [0, 1]) data_arr = np.random.uniform(-1, 1, data.shape).astype(dtype) gradient_checker.double_grad_check([data], out, x_init=[data_arr], place=place, eps=eps) fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True}) gradient_checker.double_grad_check_for_dygraph(self.flip_wrapper, [data], out, x_init=[data_arr], place=place) def test_grad(self): paddle.enable_static() places = [fluid.CPUPlace()] if core.is_compiled_with_cuda(): places.append(fluid.CUDAPlace(0)) for p in places: self.func(p) class TestFlipTripleGradCheck(unittest.TestCase): def flip_wrapper(self, x): return paddle.flip(x[0], [0, 1]) @prog_scope() def func(self, place): # the shape of input variable should be clearly specified, not inlcude -1. eps = 0.005 dtype = np.float32 data = layers.data('data', [3, 2, 2], False, dtype) data.persistable = True out = paddle.flip(data, [0, 1]) data_arr = np.random.uniform(-1, 1, data.shape).astype(dtype) gradient_checker.triple_grad_check([data], out, x_init=[data_arr], place=place, eps=eps) fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True}) gradient_checker.triple_grad_check_for_dygraph(self.flip_wrapper, [data], out, x_init=[data_arr], place=place) def test_grad(self): paddle.enable_static() places = [fluid.CPUPlace()] if core.is_compiled_with_cuda(): places.append(fluid.CUDAPlace(0)) for p in places: self.func(p) if __name__ == "__main__": paddle.enable_static() unittest.main()