提交 027fb18c 编写于 作者: W wjj19950828

resolve conflict

......@@ -50,11 +50,11 @@ X2Paddle是飞桨生态下的模型转换工具,致力于帮助其它深度学
### 环境依赖
- python >= 3.5
- paddlepaddle >= 2.0.0
- paddlepaddle >= 2.2.2
- tensorflow == 1.14 (如需转换TensorFlow模型)
- onnx >= 1.6.0 (如需转换ONNX模型)
- torch >= 1.5.0 (如需转换PyTorch模型)
- paddlelite == 2.9.0 (如需一键转换成Paddle-Lite支持格式)
- paddlelite >= 2.9.0 (如需一键转换成Paddle-Lite支持格式,推荐最新版本)
### pip安装(推荐)
......
......@@ -141,30 +141,35 @@ def tf2paddle(model_path,
version = tf.__version__
if version >= '2.0.0' or version < '1.0.0':
logging.info(
"[ERROR] 1.0.0<=tensorflow<2.0.0 is required, and v1.14.0 is recommended"
"[ERROR] 1.0.0<=TensorFlow<2.0.0 is required, and v1.14.0 is recommended"
)
return
except:
logging.info(
"[ERROR] Tensorflow is not installed, use \"pip install tensorflow\"."
"[ERROR] TensorFlow is not installed, use \"pip install TensorFlow\"."
)
return
from x2paddle.decoder.tf_decoder import TFDecoder
from x2paddle.op_mapper.tf2paddle.tf_op_mapper import TFOpMapper
logging.info("Now translating model from tensorflow to paddle.")
logging.info("Now translating model from TensorFlow to Paddle.")
model = TFDecoder(model_path, define_input_shape=define_input_shape)
mapper = TFOpMapper(model)
mapper.paddle_graph.build()
logging.info("Model optimizing ...")
from x2paddle.optimizer.optimizer import GraphOptimizer
graph_opt = GraphOptimizer(source_frame="tf")
graph_opt.optimize(mapper.paddle_graph)
logging.info("Model optimized!")
mapper.paddle_graph.gen_model(save_dir)
logging.info("Successfully exported Paddle static graph model!")
ConverterCheck(task="TensorFlow", convert_state="Success").start()
if convert_to_lite:
logging.info("Now translating model from Paddle to Paddle Lite ...")
ConverterCheck(task="TensorFlow", lite_state="Start").start()
convert2lite(save_dir, lite_valid_places, lite_model_type)
logging.info("Successfully exported Paddle Lite support model!")
ConverterCheck(task="TensorFlow", lite_state="Success").start()
......@@ -193,12 +198,15 @@ def caffe2paddle(proto_file,
from x2paddle.optimizer.optimizer import GraphOptimizer
graph_opt = GraphOptimizer(source_frame="caffe")
graph_opt.optimize(mapper.paddle_graph)
logging.info("Model optimized.")
logging.info("Model optimized!")
mapper.paddle_graph.gen_model(save_dir)
logging.info("Successfully exported Paddle static graph model!")
ConverterCheck(task="Caffe", convert_state="Success").start()
if convert_to_lite:
logging.info("Now translating model from Paddle to Paddle Lite ...")
ConverterCheck(task="Caffe", lite_state="Start").start()
convert2lite(save_dir, lite_valid_places, lite_model_type)
logging.info("Successfully exported Paddle Lite support model!")
ConverterCheck(task="Caffe", lite_state="Success").start()
......@@ -234,10 +242,13 @@ def onnx2paddle(model_path,
graph_opt.optimize(mapper.paddle_graph)
logging.info("Model optimized.")
mapper.paddle_graph.gen_model(save_dir)
logging.info("Successfully exported Paddle static graph model!")
ConverterCheck(task="ONNX", convert_state="Success").start()
if convert_to_lite:
logging.info("Now translating model from Paddle to Paddle Lite ...")
ConverterCheck(task="ONNX", lite_state="Start").start()
convert2lite(save_dir, lite_valid_places, lite_model_type)
logging.info("Successfully exported Paddle Lite support model!")
ConverterCheck(task="ONNX", lite_state="Success").start()
......@@ -261,17 +272,17 @@ def pytorch2paddle(module,
version_sum = int(v0) * 100 + int(v1) * 10 + int(v2)
if version_sum < 150:
logging.info(
"[ERROR] pytorch>=1.5.0 is required, 1.6.0 is the most recommended"
"[ERROR] PyTorch>=1.5.0 is required, 1.6.0 is the most recommended"
)
return
if version_sum > 160:
logging.info("[WARNING] pytorch==1.6.0 is recommended")
logging.info("[WARNING] PyTorch==1.6.0 is recommended")
except:
logging.info(
"[ERROR] Pytorch is not installed, use \"pip install torch==1.6.0 torchvision\"."
"[ERROR] PyTorch is not installed, use \"pip install torch==1.6.0 torchvision\"."
)
return
logging.info("Now translating model from pytorch to paddle.")
logging.info("Now translating model from PyTorch to Paddle.")
from x2paddle.decoder.pytorch_decoder import ScriptDecoder, TraceDecoder
from x2paddle.op_mapper.pytorch2paddle.pytorch_op_mapper import PyTorchOpMapper
......@@ -286,13 +297,16 @@ def pytorch2paddle(module,
from x2paddle.optimizer.optimizer import GraphOptimizer
graph_opt = GraphOptimizer(source_frame="pytorch", jit_type=jit_type)
graph_opt.optimize(mapper.paddle_graph)
logging.info("Model optimized.")
logging.info("Model optimized!")
mapper.paddle_graph.gen_model(
save_dir, jit_type=jit_type, enable_code_optim=enable_code_optim)
logging.info("Successfully exported Paddle static graph model!")
ConverterCheck(task="PyTorch", convert_state="Success").start()
if convert_to_lite:
logging.info("Now translating model from Paddle to Paddle Lite ...")
ConverterCheck(task="PyTorch", lite_state="Start").start()
convert2lite(save_dir, lite_valid_places, lite_model_type)
logging.info("Successfully exported Paddle Lite support model!")
ConverterCheck(task="PyTorch", lite_state="Success").start()
......
......@@ -106,7 +106,8 @@ class NMS(object):
if bboxes.shape[0] == 1:
batch = paddle.zeros_like(clas, dtype="int64")
else:
bboxes_count = bboxes.shape[1]
bboxes_count = paddle.shape(bboxes)[1]
bboxes_count = paddle.cast(bboxes_count, dtype="int64")
batch = paddle.divide(index, bboxes_count)
index = paddle.mod(index, bboxes_count)
res = paddle.concat([batch, clas, index], axis=1)
......
......@@ -620,15 +620,23 @@ class OpSet9():
pads) # NCHW
if assume_pad:
paddle_op = 'paddle.nn.Pad2D'
# x1_begin,x2_begin,x3_begin,x4_begin,x1_end,x2_end,x3_end,x4_end->x1_begin,x1_end,x2_begin,x2_end,x3_begin,x3_end,x4_begin,x4_end
paddings = np.array(pads).reshape(
(2, -1)).transpose().astype("int32")
paddings = np.flip(paddings, axis=0).flatten().tolist()
if sum(paddings[:4]) == 0:
paddings = paddings[4:]
if mode == 'constant':
paddings = paddings.flatten().tolist()
layer_attrs['padding'] = paddings
else:
layer_attrs["pad"] = paddings
paddle_op = "custom_layer:PadAllDim4WithOneInput"
paddings = np.flip(paddings, axis=0).flatten().tolist()
if sum(paddings[:4]) == 0:
paddings = paddings[4:]
layer_attrs['padding'] = paddings
else:
layer_attrs["pad"] = paddings
paddle_op = "custom_layer:PadAllDim4WithOneInput"
else:
paddle_op = 'paddle.nn.functional.pad'
layer_attrs["pad"] = np.array(pads).tolist()
else:
pad_data_temp = pads[0::2]
pad_data_all = []
......@@ -1464,11 +1472,18 @@ class OpSet9():
outputs_list.append("{}_p{}".format(node.layer_name, i))
else:
outputs_list.append(node.name)
self.paddle_graph.add_layer(
'paddle.split',
inputs={"x": val_x.name},
outputs=outputs_list,
**layer_attrs)
if len(split) > 1:
self.paddle_graph.add_layer(
'paddle.split',
inputs={"x": val_x.name},
outputs=outputs_list,
**layer_attrs)
else:
self.paddle_graph.add_layer(
"paddle.cast",
inputs={"x": val_x.name},
outputs=outputs_list,
dtype=string(val_x.dtype))
@print_mapping_info
def Reshape(self, node):
......@@ -2698,28 +2713,36 @@ class OpSet9():
layer_outputs = [nn_op_name, output_name]
boxes = self.graph.get_input_node(node, idx=0, copy=True)
scores = self.graph.get_input_node(node, idx=1, copy=True)
num_classes = scores.out_shapes[0][1]
inputs_len = len(node.layer.input)
layer_attrs = dict()
layer_attrs["keep_top_k"] = -1
layer_attrs["nms_threshold"] = 0.0
layer_attrs["score_threshold"] = 0.0
if inputs_len > 2:
max_output_boxes_per_class = self.graph.get_input_node(
node, idx=2, copy=True)
layer_attrs["keep_top_k"] = _const_weight_or_none(
max_output_boxes_per_class).tolist()[0] * num_classes
else:
layer_attrs["keep_top_k"] = 0
max_output_boxes_per_class = _const_weight_or_none(
max_output_boxes_per_class)
if len(scores.out_shapes[0]) != 0:
num_classes = scores.out_shapes[0][1]
else:
num_classes = 1
if max_output_boxes_per_class is not None:
max_output_boxes_per_class = max_output_boxes_per_class.tolist()
if isinstance(max_output_boxes_per_class, int):
layer_attrs[
"keep_top_k"] = max_output_boxes_per_class * num_classes
else:
layer_attrs["keep_top_k"] = max_output_boxes_per_class[
0] * num_classes
if inputs_len > 3:
iou_threshold = self.graph.get_input_node(node, idx=3, copy=True)
layer_attrs["nms_threshold"] = _const_weight_or_none(
iou_threshold).tolist()[0]
else:
layer_attrs["nms_threshold"] = 0.0
if inputs_len > 4:
score_threshold = self.graph.get_input_node(node, idx=4, copy=True)
layer_attrs["score_threshold"] = _const_weight_or_none(
score_threshold).tolist()[0]
else:
layer_attrs["score_threshold"] = 0.0
self.paddle_graph.add_layer(
"custom_layer:NMS",
inputs={"bboxes": boxes.name,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册