diff --git a/x2paddle/op_mapper/tf_op_mapper.py b/x2paddle/op_mapper/tf_op_mapper.py index 73b0da50d797e2eb9fca0c724499e0ed4fdbf367..d9e565a115c17d518de9757540825d391dd1feee 100644 --- a/x2paddle/op_mapper/tf_op_mapper.py +++ b/x2paddle/op_mapper/tf_op_mapper.py @@ -591,7 +591,7 @@ class TFOpMapper(OpMapper): # to change [192, -1]->[-1, 192], allways put -1 in the first dimension # optimization for Paddle-Lite in_shape = input.out_shapes[0] - if is_variable and in_shape.count(-1) < 1: + if not is_variable and in_shape.count(-1) < 1: total_size = 1 for i in range(len(in_shape)): total_size *= in_shape[i] diff --git a/x2paddle/op_mapper/tf_op_mapper_nhwc.py b/x2paddle/op_mapper/tf_op_mapper_nhwc.py index 9399c6d7a13f26fee17af9a76a52f6d5de5dba8b..cc1e355c866b90fb3cdc8e0d2e7065e6b357c12d 100644 --- a/x2paddle/op_mapper/tf_op_mapper_nhwc.py +++ b/x2paddle/op_mapper/tf_op_mapper_nhwc.py @@ -74,7 +74,7 @@ class TFOpMapperNHWC(OpMapper): unsupported_ops = set() sys.stderr.write("Total nodes: {}\n".format(len(self.graph.topo_sort))) for i, node_name in enumerate(self.graph.topo_sort): - sys.stderr.write("\rConverting node {} ... ".format(i)) + sys.stderr.write("\rConverting node {} ... ".format(i + 1)) node = self.graph.get_node(node_name) op = node.layer_type if op in self.directly_map_ops: @@ -99,7 +99,7 @@ class TFOpMapperNHWC(OpMapper): for op in unsupported_ops: print("========== {} ============".format(op)) sys.exit(-1) - sys.stderr.write("\nDone\n") + sys.stderr.write("\nDone!\n") def add_omit_nodes(self, in_node_name, out_node_name): in_node = self.graph.get_node(in_node_name)