diff --git a/README.md b/README.md index 5c318f405f6c9609ef5e440d39a9a18238e9aa58..6130831c68a36966996c93e16e9f5f13b6c70ee6 100644 --- a/README.md +++ b/README.md @@ -121,7 +121,7 @@ x2paddle --framework=caffe --prototxt=deploy.prototxt --weight=deploy.caffemodel | --to_lite | **[可选]** 是否使用opt工具转成Paddle-Lite支持格式,默认为False | | --lite_valid_places | **[可选]** 指定转换类型,可以同时指定多个backend(以逗号分隔),opt将会自动选择最佳方式,默认为arm | | --lite_model_type | **[可选]** 指定模型转化类型,目前支持两种类型:protobuf和naive_buffer,默认为naive_buffer | -| --disable_feedback | **[可选]** 是否关闭用户信息反馈,包括转换框架信息、是否转换成功以及用户ip等,默认为False | +| --disable_feedback | **[可选]** 是否关闭X2Paddle使用反馈;X2Paddle默认会统计用户在进行模型转换时的成功率,以及转换框架来源等信息,以便于帮忙X2Paddle根据用户需求进行迭代,不会上传用户的模型文件。如若不想参与反馈,可指定此参数为False即可 | #### X2Paddle API 目前X2Paddle提供API方式转换模型,可参考[X2PaddleAPI](docs/inference_model_convertor/x2paddle_api.md) diff --git a/x2paddle/convert.py b/x2paddle/convert.py index 7c5489cb1710c34c29bba96a2353ecba3fea873a..5f660a63f438a3556a2ecce193b1b5ab51b69b45 100644 --- a/x2paddle/convert.py +++ b/x2paddle/convert.py @@ -18,6 +18,7 @@ from x2paddle.utils import ConverterCheck import argparse import sys import logging +import time def arg_parser(): @@ -98,7 +99,7 @@ def arg_parser(): "--disable_feedback", "-df", default=False, - help="Turn off user info feedback") + help="Tune off feedback of model conversion.") parser.add_argument( "--to_lite", "-tl", default=False, help="convert to Paddle-Lite format") parser.add_argument( @@ -138,8 +139,12 @@ def tf2paddle(model_path, lite_valid_places="arm", lite_model_type="naive_buffer", disable_feedback=False): + # for convert_id + time_info = int(time.time()) if not disable_feedback: - ConverterCheck(task="TensorFlow", convert_state="Start").start() + ConverterCheck( + task="TensorFlow", time_info=time_info, + convert_state="Start").start() # check tensorflow installation and version try: import os @@ -172,15 +177,21 @@ def tf2paddle(model_path, mapper.paddle_graph.gen_model(save_dir) logging.info("Successfully exported Paddle static graph model!") if not disable_feedback: - ConverterCheck(task="TensorFlow", convert_state="Success").start() + ConverterCheck( + task="TensorFlow", time_info=time_info, + convert_state="Success").start() if convert_to_lite: logging.info("Now translating model from Paddle to Paddle Lite ...") if not disable_feedback: - ConverterCheck(task="TensorFlow", lite_state="Start").start() + ConverterCheck( + task="TensorFlow", time_info=time_info, + lite_state="Start").start() convert2lite(save_dir, lite_valid_places, lite_model_type) logging.info("Successfully exported Paddle Lite support model!") if not disable_feedback: - ConverterCheck(task="TensorFlow", lite_state="Success").start() + ConverterCheck( + task="TensorFlow", time_info=time_info, + lite_state="Success").start() def caffe2paddle(proto_file, @@ -191,8 +202,11 @@ def caffe2paddle(proto_file, lite_valid_places="arm", lite_model_type="naive_buffer", disable_feedback=False): + # for convert_id + time_info = int(time.time()) if not disable_feedback: - ConverterCheck(task="Caffe", convert_state="Start").start() + ConverterCheck( + task="Caffe", time_info=time_info, convert_state="Start").start() from x2paddle.decoder.caffe_decoder import CaffeDecoder from x2paddle.op_mapper.caffe2paddle.caffe_op_mapper import CaffeOpMapper import google.protobuf as gpb @@ -214,15 +228,18 @@ def caffe2paddle(proto_file, mapper.paddle_graph.gen_model(save_dir) logging.info("Successfully exported Paddle static graph model!") if not disable_feedback: - ConverterCheck(task="Caffe", convert_state="Success").start() + ConverterCheck( + task="Caffe", time_info=time_info, convert_state="Success").start() if convert_to_lite: logging.info("Now translating model from Paddle to Paddle Lite ...") if not disable_feedback: - ConverterCheck(task="Caffe", lite_state="Start").start() + ConverterCheck( + task="Caffe", time_info=time_info, lite_state="Start").start() convert2lite(save_dir, lite_valid_places, lite_model_type) logging.info("Successfully exported Paddle Lite support model!") if not disable_feedback: - ConverterCheck(task="Caffe", lite_state="Success").start() + ConverterCheck( + task="Caffe", time_info=time_info, lite_state="Success").start() def onnx2paddle(model_path, @@ -231,8 +248,11 @@ def onnx2paddle(model_path, lite_valid_places="arm", lite_model_type="naive_buffer", disable_feedback=False): + # for convert_id + time_info = int(time.time()) if not disable_feedback: - ConverterCheck(task="ONNX", convert_state="Start").start() + ConverterCheck( + task="ONNX", time_info=time_info, convert_state="Start").start() # check onnx installation and version try: import onnx @@ -261,15 +281,18 @@ def onnx2paddle(model_path, mapper.paddle_graph.gen_model(save_dir) logging.info("Successfully exported Paddle static graph model!") if not disable_feedback: - ConverterCheck(task="ONNX", convert_state="Success").start() + ConverterCheck( + task="ONNX", time_info=time_info, convert_state="Success").start() if convert_to_lite: logging.info("Now translating model from Paddle to Paddle Lite ...") if not disable_feedback: - ConverterCheck(task="ONNX", lite_state="Start").start() + ConverterCheck( + task="ONNX", time_info=time_info, lite_state="Start").start() convert2lite(save_dir, lite_valid_places, lite_model_type) logging.info("Successfully exported Paddle Lite support model!") if not disable_feedback: - ConverterCheck(task="ONNX", lite_state="Success").start() + ConverterCheck( + task="ONNX", time_info=time_info, lite_state="Success").start() def pytorch2paddle(module, @@ -281,8 +304,11 @@ def pytorch2paddle(module, lite_valid_places="arm", lite_model_type="naive_buffer", disable_feedback=False): + # for convert_id + time_info = int(time.time()) if not disable_feedback: - onverterCheck(task="PyTorch", convert_state="Start").start() + ConverterCheck( + task="PyTorch", time_info=time_info, convert_state="Start").start() # check pytorch installation and version try: import torch @@ -324,15 +350,20 @@ def pytorch2paddle(module, save_dir, jit_type=jit_type, enable_code_optim=enable_code_optim) logging.info("Successfully exported Paddle static graph model!") if not disable_feedback: - ConverterCheck(task="PyTorch", convert_state="Success").start() + ConverterCheck( + task="PyTorch", time_info=time_info, + convert_state="Success").start() if convert_to_lite: logging.info("Now translating model from Paddle to Paddle Lite ...") if not disable_feedback: - ConverterCheck(task="PyTorch", lite_state="Start").start() + ConverterCheck( + task="PyTorch", time_info=time_info, lite_state="Start").start() convert2lite(save_dir, lite_valid_places, lite_model_type) logging.info("Successfully exported Paddle Lite support model!") if not disable_feedback: - ConverterCheck(task="PyTorch", lite_state="Success").start() + ConverterCheck( + task="PyTorch", time_info=time_info, + lite_state="Success").start() def main(): diff --git a/x2paddle/utils.py b/x2paddle/utils.py index 9d588ec1d984783471718d590e1bc784338c2385..f1dbda0c2395362390f0d2eb422cbf4dae04aafb 100644 --- a/x2paddle/utils.py +++ b/x2paddle/utils.py @@ -18,7 +18,6 @@ import x2paddle import hashlib import requests import threading -import time import uuid stats_api = "http://paddlepaddle.org.cn/paddlehub/stat" @@ -53,6 +52,7 @@ class ConverterCheck(threading.Thread): def __init__(self, task="onnx", + time_info=time_info, convert_state=None, lite_state=None, extra_info=None): @@ -62,9 +62,7 @@ class ConverterCheck(threading.Thread): self._convert_state = convert_state self._lite_state = lite_state self._extra_info = extra_info - self._convert_id = _md5(str(uuid.uuid1())[-12:]) - self._hash_flag = _md5(str(uuid.uuid1())[-12:]) + "-" + str( - int(time.time())) + self._convert_id = _md5(str(uuid.uuid1())[-12:]) + "-" + str(time_info) def run(self): params = { @@ -73,7 +71,6 @@ class ConverterCheck(threading.Thread): 'paddle_version': paddle.__version__, 'convert_state': self._convert_state, 'convert_id': self._convert_id, - 'cache_info': self._hash_flag, 'from': 'x2paddle' } if self._lite_state is not None: @@ -82,7 +79,7 @@ class ConverterCheck(threading.Thread): params.update(self._extra_info) try: - requests.get(stats_api, params) + requests.get(stats_api, params, timeout=2) except Exception: pass