提交 0edba41b 编写于 作者: W wjj19950828

deal with comments

上级 f8666204
...@@ -121,7 +121,7 @@ x2paddle --framework=caffe --prototxt=deploy.prototxt --weight=deploy.caffemodel ...@@ -121,7 +121,7 @@ x2paddle --framework=caffe --prototxt=deploy.prototxt --weight=deploy.caffemodel
| --to_lite | **[可选]** 是否使用opt工具转成Paddle-Lite支持格式,默认为False | | --to_lite | **[可选]** 是否使用opt工具转成Paddle-Lite支持格式,默认为False |
| --lite_valid_places | **[可选]** 指定转换类型,可以同时指定多个backend(以逗号分隔),opt将会自动选择最佳方式,默认为arm | | --lite_valid_places | **[可选]** 指定转换类型,可以同时指定多个backend(以逗号分隔),opt将会自动选择最佳方式,默认为arm |
| --lite_model_type | **[可选]** 指定模型转化类型,目前支持两种类型:protobuf和naive_buffer,默认为naive_buffer | | --lite_model_type | **[可选]** 指定模型转化类型,目前支持两种类型:protobuf和naive_buffer,默认为naive_buffer |
| --disable_feedback | **[可选]** 是否关闭用户信息反馈,包括转换框架信息、是否转换成功以及用户ip等,默认为False | | --disable_feedback | **[可选]** 是否关闭X2Paddle使用反馈;X2Paddle默认会统计用户在进行模型转换时的成功率,以及转换框架来源等信息,以便于帮忙X2Paddle根据用户需求进行迭代,不会上传用户的模型文件。如若不想参与反馈,可指定此参数为False即可 |
#### X2Paddle API #### X2Paddle API
目前X2Paddle提供API方式转换模型,可参考[X2PaddleAPI](docs/inference_model_convertor/x2paddle_api.md) 目前X2Paddle提供API方式转换模型,可参考[X2PaddleAPI](docs/inference_model_convertor/x2paddle_api.md)
......
...@@ -18,6 +18,7 @@ from x2paddle.utils import ConverterCheck ...@@ -18,6 +18,7 @@ from x2paddle.utils import ConverterCheck
import argparse import argparse
import sys import sys
import logging import logging
import time
def arg_parser(): def arg_parser():
...@@ -98,7 +99,7 @@ def arg_parser(): ...@@ -98,7 +99,7 @@ def arg_parser():
"--disable_feedback", "--disable_feedback",
"-df", "-df",
default=False, default=False,
help="Turn off user info feedback") help="Tune off feedback of model conversion.")
parser.add_argument( parser.add_argument(
"--to_lite", "-tl", default=False, help="convert to Paddle-Lite format") "--to_lite", "-tl", default=False, help="convert to Paddle-Lite format")
parser.add_argument( parser.add_argument(
...@@ -138,8 +139,12 @@ def tf2paddle(model_path, ...@@ -138,8 +139,12 @@ def tf2paddle(model_path,
lite_valid_places="arm", lite_valid_places="arm",
lite_model_type="naive_buffer", lite_model_type="naive_buffer",
disable_feedback=False): disable_feedback=False):
# for convert_id
time_info = int(time.time())
if not disable_feedback: if not disable_feedback:
ConverterCheck(task="TensorFlow", convert_state="Start").start() ConverterCheck(
task="TensorFlow", time_info=time_info,
convert_state="Start").start()
# check tensorflow installation and version # check tensorflow installation and version
try: try:
import os import os
...@@ -172,15 +177,21 @@ def tf2paddle(model_path, ...@@ -172,15 +177,21 @@ def tf2paddle(model_path,
mapper.paddle_graph.gen_model(save_dir) mapper.paddle_graph.gen_model(save_dir)
logging.info("Successfully exported Paddle static graph model!") logging.info("Successfully exported Paddle static graph model!")
if not disable_feedback: if not disable_feedback:
ConverterCheck(task="TensorFlow", convert_state="Success").start() ConverterCheck(
task="TensorFlow", time_info=time_info,
convert_state="Success").start()
if convert_to_lite: if convert_to_lite:
logging.info("Now translating model from Paddle to Paddle Lite ...") logging.info("Now translating model from Paddle to Paddle Lite ...")
if not disable_feedback: if not disable_feedback:
ConverterCheck(task="TensorFlow", lite_state="Start").start() ConverterCheck(
task="TensorFlow", time_info=time_info,
lite_state="Start").start()
convert2lite(save_dir, lite_valid_places, lite_model_type) convert2lite(save_dir, lite_valid_places, lite_model_type)
logging.info("Successfully exported Paddle Lite support model!") logging.info("Successfully exported Paddle Lite support model!")
if not disable_feedback: if not disable_feedback:
ConverterCheck(task="TensorFlow", lite_state="Success").start() ConverterCheck(
task="TensorFlow", time_info=time_info,
lite_state="Success").start()
def caffe2paddle(proto_file, def caffe2paddle(proto_file,
...@@ -191,8 +202,11 @@ def caffe2paddle(proto_file, ...@@ -191,8 +202,11 @@ def caffe2paddle(proto_file,
lite_valid_places="arm", lite_valid_places="arm",
lite_model_type="naive_buffer", lite_model_type="naive_buffer",
disable_feedback=False): disable_feedback=False):
# for convert_id
time_info = int(time.time())
if not disable_feedback: if not disable_feedback:
ConverterCheck(task="Caffe", convert_state="Start").start() ConverterCheck(
task="Caffe", time_info=time_info, convert_state="Start").start()
from x2paddle.decoder.caffe_decoder import CaffeDecoder from x2paddle.decoder.caffe_decoder import CaffeDecoder
from x2paddle.op_mapper.caffe2paddle.caffe_op_mapper import CaffeOpMapper from x2paddle.op_mapper.caffe2paddle.caffe_op_mapper import CaffeOpMapper
import google.protobuf as gpb import google.protobuf as gpb
...@@ -214,15 +228,18 @@ def caffe2paddle(proto_file, ...@@ -214,15 +228,18 @@ def caffe2paddle(proto_file,
mapper.paddle_graph.gen_model(save_dir) mapper.paddle_graph.gen_model(save_dir)
logging.info("Successfully exported Paddle static graph model!") logging.info("Successfully exported Paddle static graph model!")
if not disable_feedback: if not disable_feedback:
ConverterCheck(task="Caffe", convert_state="Success").start() ConverterCheck(
task="Caffe", time_info=time_info, convert_state="Success").start()
if convert_to_lite: if convert_to_lite:
logging.info("Now translating model from Paddle to Paddle Lite ...") logging.info("Now translating model from Paddle to Paddle Lite ...")
if not disable_feedback: if not disable_feedback:
ConverterCheck(task="Caffe", lite_state="Start").start() ConverterCheck(
task="Caffe", time_info=time_info, lite_state="Start").start()
convert2lite(save_dir, lite_valid_places, lite_model_type) convert2lite(save_dir, lite_valid_places, lite_model_type)
logging.info("Successfully exported Paddle Lite support model!") logging.info("Successfully exported Paddle Lite support model!")
if not disable_feedback: if not disable_feedback:
ConverterCheck(task="Caffe", lite_state="Success").start() ConverterCheck(
task="Caffe", time_info=time_info, lite_state="Success").start()
def onnx2paddle(model_path, def onnx2paddle(model_path,
...@@ -231,8 +248,11 @@ def onnx2paddle(model_path, ...@@ -231,8 +248,11 @@ def onnx2paddle(model_path,
lite_valid_places="arm", lite_valid_places="arm",
lite_model_type="naive_buffer", lite_model_type="naive_buffer",
disable_feedback=False): disable_feedback=False):
# for convert_id
time_info = int(time.time())
if not disable_feedback: if not disable_feedback:
ConverterCheck(task="ONNX", convert_state="Start").start() ConverterCheck(
task="ONNX", time_info=time_info, convert_state="Start").start()
# check onnx installation and version # check onnx installation and version
try: try:
import onnx import onnx
...@@ -261,15 +281,18 @@ def onnx2paddle(model_path, ...@@ -261,15 +281,18 @@ def onnx2paddle(model_path,
mapper.paddle_graph.gen_model(save_dir) mapper.paddle_graph.gen_model(save_dir)
logging.info("Successfully exported Paddle static graph model!") logging.info("Successfully exported Paddle static graph model!")
if not disable_feedback: if not disable_feedback:
ConverterCheck(task="ONNX", convert_state="Success").start() ConverterCheck(
task="ONNX", time_info=time_info, convert_state="Success").start()
if convert_to_lite: if convert_to_lite:
logging.info("Now translating model from Paddle to Paddle Lite ...") logging.info("Now translating model from Paddle to Paddle Lite ...")
if not disable_feedback: if not disable_feedback:
ConverterCheck(task="ONNX", lite_state="Start").start() ConverterCheck(
task="ONNX", time_info=time_info, lite_state="Start").start()
convert2lite(save_dir, lite_valid_places, lite_model_type) convert2lite(save_dir, lite_valid_places, lite_model_type)
logging.info("Successfully exported Paddle Lite support model!") logging.info("Successfully exported Paddle Lite support model!")
if not disable_feedback: if not disable_feedback:
ConverterCheck(task="ONNX", lite_state="Success").start() ConverterCheck(
task="ONNX", time_info=time_info, lite_state="Success").start()
def pytorch2paddle(module, def pytorch2paddle(module,
...@@ -281,8 +304,11 @@ def pytorch2paddle(module, ...@@ -281,8 +304,11 @@ def pytorch2paddle(module,
lite_valid_places="arm", lite_valid_places="arm",
lite_model_type="naive_buffer", lite_model_type="naive_buffer",
disable_feedback=False): disable_feedback=False):
# for convert_id
time_info = int(time.time())
if not disable_feedback: if not disable_feedback:
onverterCheck(task="PyTorch", convert_state="Start").start() ConverterCheck(
task="PyTorch", time_info=time_info, convert_state="Start").start()
# check pytorch installation and version # check pytorch installation and version
try: try:
import torch import torch
...@@ -324,15 +350,20 @@ def pytorch2paddle(module, ...@@ -324,15 +350,20 @@ def pytorch2paddle(module,
save_dir, jit_type=jit_type, enable_code_optim=enable_code_optim) save_dir, jit_type=jit_type, enable_code_optim=enable_code_optim)
logging.info("Successfully exported Paddle static graph model!") logging.info("Successfully exported Paddle static graph model!")
if not disable_feedback: if not disable_feedback:
ConverterCheck(task="PyTorch", convert_state="Success").start() ConverterCheck(
task="PyTorch", time_info=time_info,
convert_state="Success").start()
if convert_to_lite: if convert_to_lite:
logging.info("Now translating model from Paddle to Paddle Lite ...") logging.info("Now translating model from Paddle to Paddle Lite ...")
if not disable_feedback: if not disable_feedback:
ConverterCheck(task="PyTorch", lite_state="Start").start() ConverterCheck(
task="PyTorch", time_info=time_info, lite_state="Start").start()
convert2lite(save_dir, lite_valid_places, lite_model_type) convert2lite(save_dir, lite_valid_places, lite_model_type)
logging.info("Successfully exported Paddle Lite support model!") logging.info("Successfully exported Paddle Lite support model!")
if not disable_feedback: if not disable_feedback:
ConverterCheck(task="PyTorch", lite_state="Success").start() ConverterCheck(
task="PyTorch", time_info=time_info,
lite_state="Success").start()
def main(): def main():
......
...@@ -18,7 +18,6 @@ import x2paddle ...@@ -18,7 +18,6 @@ import x2paddle
import hashlib import hashlib
import requests import requests
import threading import threading
import time
import uuid import uuid
stats_api = "http://paddlepaddle.org.cn/paddlehub/stat" stats_api = "http://paddlepaddle.org.cn/paddlehub/stat"
...@@ -53,6 +52,7 @@ class ConverterCheck(threading.Thread): ...@@ -53,6 +52,7 @@ class ConverterCheck(threading.Thread):
def __init__(self, def __init__(self,
task="onnx", task="onnx",
time_info=time_info,
convert_state=None, convert_state=None,
lite_state=None, lite_state=None,
extra_info=None): extra_info=None):
...@@ -62,9 +62,7 @@ class ConverterCheck(threading.Thread): ...@@ -62,9 +62,7 @@ class ConverterCheck(threading.Thread):
self._convert_state = convert_state self._convert_state = convert_state
self._lite_state = lite_state self._lite_state = lite_state
self._extra_info = extra_info self._extra_info = extra_info
self._convert_id = _md5(str(uuid.uuid1())[-12:]) self._convert_id = _md5(str(uuid.uuid1())[-12:]) + "-" + str(time_info)
self._hash_flag = _md5(str(uuid.uuid1())[-12:]) + "-" + str(
int(time.time()))
def run(self): def run(self):
params = { params = {
...@@ -73,7 +71,6 @@ class ConverterCheck(threading.Thread): ...@@ -73,7 +71,6 @@ class ConverterCheck(threading.Thread):
'paddle_version': paddle.__version__, 'paddle_version': paddle.__version__,
'convert_state': self._convert_state, 'convert_state': self._convert_state,
'convert_id': self._convert_id, 'convert_id': self._convert_id,
'cache_info': self._hash_flag,
'from': 'x2paddle' 'from': 'x2paddle'
} }
if self._lite_state is not None: if self._lite_state is not None:
...@@ -82,7 +79,7 @@ class ConverterCheck(threading.Thread): ...@@ -82,7 +79,7 @@ class ConverterCheck(threading.Thread):
params.update(self._extra_info) params.update(self._extra_info)
try: try:
requests.get(stats_api, params) requests.get(stats_api, params, timeout=2)
except Exception: except Exception:
pass pass
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册