diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_transpose_flatten_concat_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_transpose_flatten_concat_fuse_pass.py index 5c91ceeb44a0352ea9a753fe7475496048042b1f..6444264f80fb52dd655a26cca627b26ea13a4d76 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_transpose_flatten_concat_fuse_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_transpose_flatten_concat_fuse_pass.py @@ -22,10 +22,8 @@ import paddle.fluid.core as core class TransposeFlattenConcatFusePassTest(InferencePassTest): def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): - data1 = fluid.data( - name="data1", shape=[8, 32, 128], dtype="float32") - data2 = fluid.data( - name="data2", shape=[8, 32, 128], dtype="float32") + data1 = fluid.data(name="data1", shape=[5, 5, 5], dtype="float32") + data2 = fluid.data(name="data2", shape=[5, 5, 5], dtype="float32") trans1 = fluid.layers.transpose(data1, perm=[2, 1, 0]) trans2 = fluid.layers.transpose(data2, perm=[2, 1, 0]) flatt1 = fluid.layers.flatten(trans1) @@ -36,8 +34,8 @@ class TransposeFlattenConcatFusePassTest(InferencePassTest): out = fluid.layers.batch_norm(concat_out, is_test=True) self.feeds = { - "data1": np.random.random([8, 32, 128]).astype("float32"), - "data2": np.random.random([8, 32, 128]).astype("float32") + "data1": np.random.random([5, 5, 5]).astype("float32"), + "data2": np.random.random([5, 5, 5]).astype("float32") } self.fetch_list = [out]