diff --git a/README.md b/README.md index acc08fa3bb815e33d39b5121d172830211d5ec95..6130831c68a36966996c93e16e9f5f13b6c70ee6 100644 --- a/README.md +++ b/README.md @@ -121,6 +121,7 @@ x2paddle --framework=caffe --prototxt=deploy.prototxt --weight=deploy.caffemodel | --to_lite | **[可选]** 是否使用opt工具转成Paddle-Lite支持格式,默认为False | | --lite_valid_places | **[可选]** 指定转换类型,可以同时指定多个backend(以逗号分隔),opt将会自动选择最佳方式,默认为arm | | --lite_model_type | **[可选]** 指定模型转化类型,目前支持两种类型:protobuf和naive_buffer,默认为naive_buffer | +| --disable_feedback | **[可选]** 是否关闭X2Paddle使用反馈;X2Paddle默认会统计用户在进行模型转换时的成功率,以及转换框架来源等信息,以便于帮忙X2Paddle根据用户需求进行迭代,不会上传用户的模型文件。如若不想参与反馈,可指定此参数为False即可 | #### X2Paddle API 目前X2Paddle提供API方式转换模型,可参考[X2PaddleAPI](docs/inference_model_convertor/x2paddle_api.md) diff --git a/requirements.txt b/requirements.txt index 9d7748adf401214b95099980aaaecb875e4f153e..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +0,0 @@ -pre-commit -yapf == 0.28.0 -pandas -treelib diff --git a/setup.py b/setup.py index 490c42c95970cdd333de84e458eef910342076f1..eb19c180e290d2027047b8480281e92d0e86b273 100644 --- a/setup.py +++ b/setup.py @@ -6,6 +6,9 @@ long_description += "Usage: x2paddle --framework tensorflow --model tf_model.pb long_description += "GitHub: https://github.com/PaddlePaddle/X2Paddle\n" long_description += "Email: dltp-sz@baidu.com" +with open("requirements.txt") as fin: + REQUIRED_PACKAGES = fin.read() + setuptools.setup( name="x2paddle", version=x2paddle.__version__, @@ -16,6 +19,7 @@ setuptools.setup( long_description_content_type="text/plain", url="https://github.com/PaddlePaddle/x2paddle", packages=setuptools.find_packages(), + install_requires=REQUIRED_PACKAGES, classifiers=[ "Programming Language :: Python :: 3", "License :: OSI Approved :: Apache Software License", diff --git a/x2paddle/convert.py b/x2paddle/convert.py index fad394129c65ea33e1a2bd0733da1665dcaad3c6..5f660a63f438a3556a2ecce193b1b5ab51b69b45 100644 --- a/x2paddle/convert.py +++ b/x2paddle/convert.py @@ -14,9 +14,11 @@ from six import text_type as _text_type from x2paddle import program +from x2paddle.utils import ConverterCheck import argparse import sys import logging +import time def arg_parser(): @@ -93,6 +95,11 @@ def arg_parser(): "-co", default=True, help="Turn on code optimization") + parser.add_argument( + "--disable_feedback", + "-df", + default=False, + help="Tune off feedback of model conversion.") parser.add_argument( "--to_lite", "-tl", default=False, help="convert to Paddle-Lite format") parser.add_argument( @@ -130,7 +137,14 @@ def tf2paddle(model_path, define_input_shape=False, convert_to_lite=False, lite_valid_places="arm", - lite_model_type="naive_buffer"): + lite_model_type="naive_buffer", + disable_feedback=False): + # for convert_id + time_info = int(time.time()) + if not disable_feedback: + ConverterCheck( + task="TensorFlow", time_info=time_info, + convert_state="Start").start() # check tensorflow installation and version try: import os @@ -162,10 +176,22 @@ def tf2paddle(model_path, logging.info("Model optimized!") mapper.paddle_graph.gen_model(save_dir) logging.info("Successfully exported Paddle static graph model!") + if not disable_feedback: + ConverterCheck( + task="TensorFlow", time_info=time_info, + convert_state="Success").start() if convert_to_lite: logging.info("Now translating model from Paddle to Paddle Lite ...") + if not disable_feedback: + ConverterCheck( + task="TensorFlow", time_info=time_info, + lite_state="Start").start() convert2lite(save_dir, lite_valid_places, lite_model_type) logging.info("Successfully exported Paddle Lite support model!") + if not disable_feedback: + ConverterCheck( + task="TensorFlow", time_info=time_info, + lite_state="Success").start() def caffe2paddle(proto_file, @@ -174,7 +200,13 @@ def caffe2paddle(proto_file, caffe_proto, convert_to_lite=False, lite_valid_places="arm", - lite_model_type="naive_buffer"): + lite_model_type="naive_buffer", + disable_feedback=False): + # for convert_id + time_info = int(time.time()) + if not disable_feedback: + ConverterCheck( + task="Caffe", time_info=time_info, convert_state="Start").start() from x2paddle.decoder.caffe_decoder import CaffeDecoder from x2paddle.op_mapper.caffe2paddle.caffe_op_mapper import CaffeOpMapper import google.protobuf as gpb @@ -195,17 +227,32 @@ def caffe2paddle(proto_file, logging.info("Model optimized!") mapper.paddle_graph.gen_model(save_dir) logging.info("Successfully exported Paddle static graph model!") + if not disable_feedback: + ConverterCheck( + task="Caffe", time_info=time_info, convert_state="Success").start() if convert_to_lite: logging.info("Now translating model from Paddle to Paddle Lite ...") + if not disable_feedback: + ConverterCheck( + task="Caffe", time_info=time_info, lite_state="Start").start() convert2lite(save_dir, lite_valid_places, lite_model_type) logging.info("Successfully exported Paddle Lite support model!") + if not disable_feedback: + ConverterCheck( + task="Caffe", time_info=time_info, lite_state="Success").start() def onnx2paddle(model_path, save_dir, convert_to_lite=False, lite_valid_places="arm", - lite_model_type="naive_buffer"): + lite_model_type="naive_buffer", + disable_feedback=False): + # for convert_id + time_info = int(time.time()) + if not disable_feedback: + ConverterCheck( + task="ONNX", time_info=time_info, convert_state="Start").start() # check onnx installation and version try: import onnx @@ -233,10 +280,19 @@ def onnx2paddle(model_path, logging.info("Model optimized.") mapper.paddle_graph.gen_model(save_dir) logging.info("Successfully exported Paddle static graph model!") + if not disable_feedback: + ConverterCheck( + task="ONNX", time_info=time_info, convert_state="Success").start() if convert_to_lite: logging.info("Now translating model from Paddle to Paddle Lite ...") + if not disable_feedback: + ConverterCheck( + task="ONNX", time_info=time_info, lite_state="Start").start() convert2lite(save_dir, lite_valid_places, lite_model_type) logging.info("Successfully exported Paddle Lite support model!") + if not disable_feedback: + ConverterCheck( + task="ONNX", time_info=time_info, lite_state="Success").start() def pytorch2paddle(module, @@ -246,7 +302,13 @@ def pytorch2paddle(module, enable_code_optim=True, convert_to_lite=False, lite_valid_places="arm", - lite_model_type="naive_buffer"): + lite_model_type="naive_buffer", + disable_feedback=False): + # for convert_id + time_info = int(time.time()) + if not disable_feedback: + ConverterCheck( + task="PyTorch", time_info=time_info, convert_state="Start").start() # check pytorch installation and version try: import torch @@ -287,10 +349,21 @@ def pytorch2paddle(module, mapper.paddle_graph.gen_model( save_dir, jit_type=jit_type, enable_code_optim=enable_code_optim) logging.info("Successfully exported Paddle static graph model!") + if not disable_feedback: + ConverterCheck( + task="PyTorch", time_info=time_info, + convert_state="Success").start() if convert_to_lite: logging.info("Now translating model from Paddle to Paddle Lite ...") + if not disable_feedback: + ConverterCheck( + task="PyTorch", time_info=time_info, lite_state="Start").start() convert2lite(save_dir, lite_valid_places, lite_model_type) logging.info("Successfully exported Paddle Lite support model!") + if not disable_feedback: + ConverterCheck( + task="PyTorch", time_info=time_info, + lite_state="Success").start() def main(): @@ -351,7 +424,8 @@ def main(): define_input_shape, convert_to_lite=args.to_lite, lite_valid_places=args.lite_valid_places, - lite_model_type=args.lite_model_type) + lite_model_type=args.lite_model_type, + disable_feedback=args.disable_feedback) elif args.framework == "caffe": assert args.prototxt is not None and args.weight is not None, "--prototxt and --weight should be defined while translating caffe model" @@ -362,7 +436,8 @@ def main(): args.caffe_proto, convert_to_lite=args.to_lite, lite_valid_places=args.lite_valid_places, - lite_model_type=args.lite_model_type) + lite_model_type=args.lite_model_type, + disable_feedback=args.disable_feedback) elif args.framework == "onnx": assert args.model is not None, "--model should be defined while translating onnx model" onnx2paddle( @@ -370,7 +445,8 @@ def main(): args.save_dir, convert_to_lite=args.to_lite, lite_valid_places=args.lite_valid_places, - lite_model_type=args.lite_model_type) + lite_model_type=args.lite_model_type, + disable_feedback=args.disable_feedback) elif args.framework == "paddle2onnx": logging.info( "Paddle to ONNX tool has been migrated to the new github: https://github.com/PaddlePaddle/paddle2onnx" diff --git a/x2paddle/optimizer/optimizer.py b/x2paddle/optimizer/optimizer.py index 25e496f284008062e42edf8a4079e0ff2a1e0493..49c62388a3be26d3bcac486ecdc0e5775414b953 100644 --- a/x2paddle/optimizer/optimizer.py +++ b/x2paddle/optimizer/optimizer.py @@ -42,17 +42,22 @@ class GraphOptimizer(object): self.passes = [] def optimize(self, graph): + show_pass_log = False for pass_name in self.passes: pass_ = PassManager.lookup(pass_name)() if pass_name.endswith("_eliminate_pass") or pass_name.endswith( "conv2d_add_fuse_pass"): pass_.apply(graph) + show_pass_log = True else: while True: before_len = len(graph.layers) pass_.apply(graph) after_len = len(graph.layers) + if after_len < before_len: + show_pass_log = True if before_len == after_len: break - print("{} done!".format(pass_name)) + if show_pass_log: + print("{} done!".format(pass_name)) return graph diff --git a/x2paddle/utils.py b/x2paddle/utils.py index 3747916bd8b1de40c0b66b2878b5b6bd2bcb6847..c3a6d1449867288d8c3df45d9121e518e538e369 100644 --- a/x2paddle/utils.py +++ b/x2paddle/utils.py @@ -14,6 +14,14 @@ # limitations under the License. import paddle +import x2paddle +import hashlib +import requests +import threading +import uuid +import json + +stats_api = "http://paddlepaddle.org.cn/paddlehub/stat" def string(param): @@ -32,6 +40,56 @@ def check_version(): return True +def _md5(text: str): + '''Calculate the md5 value of the input text.''' + md5code = hashlib.md5(text.encode()) + return md5code.hexdigest() + + +class ConverterCheck(threading.Thread): + """ + Count the number of calls to model convertion + """ + + def __init__(self, + task="ONNX", + time_info=None, + convert_state=None, + lite_state=None, + extra_info=None): + threading.Thread.__init__(self) + self._task = task + self._version = x2paddle.__version__ + self._convert_state = convert_state + self._lite_state = lite_state + self._extra_info = extra_info + self._convert_id = _md5(str(uuid.uuid1())[-12:]) + "-" + str(time_info) + + def run(self): + params = { + 'task': self._task, + 'x2paddle_version': self._version, + 'paddle_version': paddle.__version__, + 'from': 'x2paddle' + } + extra = { + 'convert_state': self._convert_state, + 'convert_id': self._convert_id, + } + if self._lite_state is not None: + extra.update({'lite_state': self._lite_state}) + if self._extra_info is not None: + extra.update(self._extra_info) + + params.update({"extra": json.dumps(extra)}) + try: + requests.get(stats_api, params, timeout=2) + except Exception: + pass + + return + + class PaddleDtypes(): def __init__(self, is_new_version=True): if is_new_version: