未验证 提交 9e22b3f7 编写于 作者: J Jason 提交者: GitHub

Merge pull request #766 from wjj19950828/download_utils

Add convert states statistics
......@@ -121,6 +121,7 @@ x2paddle --framework=caffe --prototxt=deploy.prototxt --weight=deploy.caffemodel
| --to_lite | **[可选]** 是否使用opt工具转成Paddle-Lite支持格式,默认为False |
| --lite_valid_places | **[可选]** 指定转换类型,可以同时指定多个backend(以逗号分隔),opt将会自动选择最佳方式,默认为arm |
| --lite_model_type | **[可选]** 指定模型转化类型,目前支持两种类型:protobuf和naive_buffer,默认为naive_buffer |
| --disable_feedback | **[可选]** 是否关闭X2Paddle使用反馈;X2Paddle默认会统计用户在进行模型转换时的成功率,以及转换框架来源等信息,以便于帮忙X2Paddle根据用户需求进行迭代,不会上传用户的模型文件。如若不想参与反馈,可指定此参数为False即可 |
#### X2Paddle API
目前X2Paddle提供API方式转换模型,可参考[X2PaddleAPI](docs/inference_model_convertor/x2paddle_api.md)
......
......@@ -6,6 +6,9 @@ long_description += "Usage: x2paddle --framework tensorflow --model tf_model.pb
long_description += "GitHub: https://github.com/PaddlePaddle/X2Paddle\n"
long_description += "Email: dltp-sz@baidu.com"
with open("requirements.txt") as fin:
REQUIRED_PACKAGES = fin.read()
setuptools.setup(
name="x2paddle",
version=x2paddle.__version__,
......@@ -16,6 +19,7 @@ setuptools.setup(
long_description_content_type="text/plain",
url="https://github.com/PaddlePaddle/x2paddle",
packages=setuptools.find_packages(),
install_requires=REQUIRED_PACKAGES,
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: Apache Software License",
......
......@@ -14,9 +14,11 @@
from six import text_type as _text_type
from x2paddle import program
from x2paddle.utils import ConverterCheck
import argparse
import sys
import logging
import time
def arg_parser():
......@@ -93,6 +95,11 @@ def arg_parser():
"-co",
default=True,
help="Turn on code optimization")
parser.add_argument(
"--disable_feedback",
"-df",
default=False,
help="Tune off feedback of model conversion.")
parser.add_argument(
"--to_lite", "-tl", default=False, help="convert to Paddle-Lite format")
parser.add_argument(
......@@ -130,7 +137,14 @@ def tf2paddle(model_path,
define_input_shape=False,
convert_to_lite=False,
lite_valid_places="arm",
lite_model_type="naive_buffer"):
lite_model_type="naive_buffer",
disable_feedback=False):
# for convert_id
time_info = int(time.time())
if not disable_feedback:
ConverterCheck(
task="TensorFlow", time_info=time_info,
convert_state="Start").start()
# check tensorflow installation and version
try:
import os
......@@ -162,10 +176,22 @@ def tf2paddle(model_path,
logging.info("Model optimized!")
mapper.paddle_graph.gen_model(save_dir)
logging.info("Successfully exported Paddle static graph model!")
if not disable_feedback:
ConverterCheck(
task="TensorFlow", time_info=time_info,
convert_state="Success").start()
if convert_to_lite:
logging.info("Now translating model from Paddle to Paddle Lite ...")
if not disable_feedback:
ConverterCheck(
task="TensorFlow", time_info=time_info,
lite_state="Start").start()
convert2lite(save_dir, lite_valid_places, lite_model_type)
logging.info("Successfully exported Paddle Lite support model!")
if not disable_feedback:
ConverterCheck(
task="TensorFlow", time_info=time_info,
lite_state="Success").start()
def caffe2paddle(proto_file,
......@@ -174,7 +200,13 @@ def caffe2paddle(proto_file,
caffe_proto,
convert_to_lite=False,
lite_valid_places="arm",
lite_model_type="naive_buffer"):
lite_model_type="naive_buffer",
disable_feedback=False):
# for convert_id
time_info = int(time.time())
if not disable_feedback:
ConverterCheck(
task="Caffe", time_info=time_info, convert_state="Start").start()
from x2paddle.decoder.caffe_decoder import CaffeDecoder
from x2paddle.op_mapper.caffe2paddle.caffe_op_mapper import CaffeOpMapper
import google.protobuf as gpb
......@@ -195,17 +227,32 @@ def caffe2paddle(proto_file,
logging.info("Model optimized!")
mapper.paddle_graph.gen_model(save_dir)
logging.info("Successfully exported Paddle static graph model!")
if not disable_feedback:
ConverterCheck(
task="Caffe", time_info=time_info, convert_state="Success").start()
if convert_to_lite:
logging.info("Now translating model from Paddle to Paddle Lite ...")
if not disable_feedback:
ConverterCheck(
task="Caffe", time_info=time_info, lite_state="Start").start()
convert2lite(save_dir, lite_valid_places, lite_model_type)
logging.info("Successfully exported Paddle Lite support model!")
if not disable_feedback:
ConverterCheck(
task="Caffe", time_info=time_info, lite_state="Success").start()
def onnx2paddle(model_path,
save_dir,
convert_to_lite=False,
lite_valid_places="arm",
lite_model_type="naive_buffer"):
lite_model_type="naive_buffer",
disable_feedback=False):
# for convert_id
time_info = int(time.time())
if not disable_feedback:
ConverterCheck(
task="ONNX", time_info=time_info, convert_state="Start").start()
# check onnx installation and version
try:
import onnx
......@@ -233,10 +280,19 @@ def onnx2paddle(model_path,
logging.info("Model optimized.")
mapper.paddle_graph.gen_model(save_dir)
logging.info("Successfully exported Paddle static graph model!")
if not disable_feedback:
ConverterCheck(
task="ONNX", time_info=time_info, convert_state="Success").start()
if convert_to_lite:
logging.info("Now translating model from Paddle to Paddle Lite ...")
if not disable_feedback:
ConverterCheck(
task="ONNX", time_info=time_info, lite_state="Start").start()
convert2lite(save_dir, lite_valid_places, lite_model_type)
logging.info("Successfully exported Paddle Lite support model!")
if not disable_feedback:
ConverterCheck(
task="ONNX", time_info=time_info, lite_state="Success").start()
def pytorch2paddle(module,
......@@ -246,7 +302,13 @@ def pytorch2paddle(module,
enable_code_optim=True,
convert_to_lite=False,
lite_valid_places="arm",
lite_model_type="naive_buffer"):
lite_model_type="naive_buffer",
disable_feedback=False):
# for convert_id
time_info = int(time.time())
if not disable_feedback:
ConverterCheck(
task="PyTorch", time_info=time_info, convert_state="Start").start()
# check pytorch installation and version
try:
import torch
......@@ -287,10 +349,21 @@ def pytorch2paddle(module,
mapper.paddle_graph.gen_model(
save_dir, jit_type=jit_type, enable_code_optim=enable_code_optim)
logging.info("Successfully exported Paddle static graph model!")
if not disable_feedback:
ConverterCheck(
task="PyTorch", time_info=time_info,
convert_state="Success").start()
if convert_to_lite:
logging.info("Now translating model from Paddle to Paddle Lite ...")
if not disable_feedback:
ConverterCheck(
task="PyTorch", time_info=time_info, lite_state="Start").start()
convert2lite(save_dir, lite_valid_places, lite_model_type)
logging.info("Successfully exported Paddle Lite support model!")
if not disable_feedback:
ConverterCheck(
task="PyTorch", time_info=time_info,
lite_state="Success").start()
def main():
......@@ -351,7 +424,8 @@ def main():
define_input_shape,
convert_to_lite=args.to_lite,
lite_valid_places=args.lite_valid_places,
lite_model_type=args.lite_model_type)
lite_model_type=args.lite_model_type,
disable_feedback=args.disable_feedback)
elif args.framework == "caffe":
assert args.prototxt is not None and args.weight is not None, "--prototxt and --weight should be defined while translating caffe model"
......@@ -362,7 +436,8 @@ def main():
args.caffe_proto,
convert_to_lite=args.to_lite,
lite_valid_places=args.lite_valid_places,
lite_model_type=args.lite_model_type)
lite_model_type=args.lite_model_type,
disable_feedback=args.disable_feedback)
elif args.framework == "onnx":
assert args.model is not None, "--model should be defined while translating onnx model"
onnx2paddle(
......@@ -370,7 +445,8 @@ def main():
args.save_dir,
convert_to_lite=args.to_lite,
lite_valid_places=args.lite_valid_places,
lite_model_type=args.lite_model_type)
lite_model_type=args.lite_model_type,
disable_feedback=args.disable_feedback)
elif args.framework == "paddle2onnx":
logging.info(
"Paddle to ONNX tool has been migrated to the new github: https://github.com/PaddlePaddle/paddle2onnx"
......
......@@ -42,17 +42,22 @@ class GraphOptimizer(object):
self.passes = []
def optimize(self, graph):
show_pass_log = False
for pass_name in self.passes:
pass_ = PassManager.lookup(pass_name)()
if pass_name.endswith("_eliminate_pass") or pass_name.endswith(
"conv2d_add_fuse_pass"):
pass_.apply(graph)
show_pass_log = True
else:
while True:
before_len = len(graph.layers)
pass_.apply(graph)
after_len = len(graph.layers)
if after_len < before_len:
show_pass_log = True
if before_len == after_len:
break
print("{} done!".format(pass_name))
if show_pass_log:
print("{} done!".format(pass_name))
return graph
......@@ -14,6 +14,14 @@
# limitations under the License.
import paddle
import x2paddle
import hashlib
import requests
import threading
import uuid
import json
stats_api = "http://paddlepaddle.org.cn/paddlehub/stat"
def string(param):
......@@ -32,6 +40,56 @@ def check_version():
return True
def _md5(text: str):
'''Calculate the md5 value of the input text.'''
md5code = hashlib.md5(text.encode())
return md5code.hexdigest()
class ConverterCheck(threading.Thread):
"""
Count the number of calls to model convertion
"""
def __init__(self,
task="ONNX",
time_info=None,
convert_state=None,
lite_state=None,
extra_info=None):
threading.Thread.__init__(self)
self._task = task
self._version = x2paddle.__version__
self._convert_state = convert_state
self._lite_state = lite_state
self._extra_info = extra_info
self._convert_id = _md5(str(uuid.uuid1())[-12:]) + "-" + str(time_info)
def run(self):
params = {
'task': self._task,
'x2paddle_version': self._version,
'paddle_version': paddle.__version__,
'from': 'x2paddle'
}
extra = {
'convert_state': self._convert_state,
'convert_id': self._convert_id,
}
if self._lite_state is not None:
extra.update({'lite_state': self._lite_state})
if self._extra_info is not None:
extra.update(self._extra_info)
params.update({"extra": json.dumps(extra)})
try:
requests.get(stats_api, params, timeout=2)
except Exception:
pass
return
class PaddleDtypes():
def __init__(self, is_new_version=True):
if is_new_version:
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册