diff --git a/x2paddle/convert.py b/x2paddle/convert.py index 1dc83e55af5cc8105520baf2123499370e11ae1f..7a522b71571728ab98e1aeba6ccb929b24d61871 100644 --- a/x2paddle/convert.py +++ b/x2paddle/convert.py @@ -139,17 +139,6 @@ def caffe2paddle(proto, weight, save_dir, caffe_proto): def onnx2paddle(model_path, save_dir): # check onnx installation and version - try: - import torch - version = torch.__version__ - if '1.2.0' not in version: - print("torch==1.2.0 is required") - return - except: - print( - "we use caffe2 to inference graph, please use \"pip install torch==1.2.0\"." - ) - return try: import onnx version = onnx.version.version @@ -193,6 +182,17 @@ def main(): assert args.framework is not None, "--framework is not defined(support tensorflow/caffe/onnx)" assert args.save_dir is not None, "--save_dir is not defined" + try: + import paddle + v0, v1, v2 = paddle.__version__.split('.') + if int(v0) != 1 or int(v1) < 5: + print("paddlepaddle>=1.5.0 is required") + return + except: + print("paddlepaddle not installed, use \"pip install paddlepaddle\"") + assert args.framework is not None, "--framework is not defined(support tensorflow/caffe/onnx)" + assert args.save_dir is not None, "--save_dir is not defined" + if args.framework == "tensorflow": assert args.model is not None, "--model should be defined while translating tensorflow model" without_data_format_optimization = False diff --git a/x2paddle/core/op_mapper.py b/x2paddle/core/op_mapper.py index a8024b1e7ea8f785b8ea87b83a2edef4476287ff..34aebb0905dc1888cd037f51a4d3864005e6e141 100644 --- a/x2paddle/core/op_mapper.py +++ b/x2paddle/core/op_mapper.py @@ -11,6 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import paddle.fluid as fluid from paddle.fluid.proto import framework_pb2 from x2paddle.core.util import * import inspect @@ -46,6 +47,28 @@ def export_paddle_param(param, param_name, dir): fp.close() +# This func will copy to generate code file +def run_net(param_dir="./"): + import os + inputs, outputs = x2paddle_net() + for i, out in enumerate(outputs): + if isinstance(out, list): + for out_part in out: + outputs.append(out_part) + del outputs[i] + exe = fluid.Executor(fluid.CPUPlace()) + exe.run(fluid.default_startup_program()) + + def if_exist(var): + b = os.path.exists(os.path.join(param_dir, var.name)) + return b + + fluid.io.load_vars(exe, + param_dir, + fluid.default_main_program(), + predicate=if_exist) + + class OpMapper(object): def __init__(self): self.paddle_codes = "" diff --git a/x2paddle/core/util.py b/x2paddle/core/util.py index faafe83288b00938bf0b1b39d8f079111a4e6545..d95aec56d5ed9925ced6a1c71dabe871c3dd1830 100644 --- a/x2paddle/core/util.py +++ b/x2paddle/core/util.py @@ -11,8 +11,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - -import paddle.fluid as fluid import numpy import math import os @@ -20,25 +18,3 @@ import os def string(param): return "\'{}\'".format(param) - - -# This func will copy to generate code file -def run_net(param_dir="./"): - import os - inputs, outputs = x2paddle_net() - for i, out in enumerate(outputs): - if isinstance(out, list): - for out_part in out: - outputs.append(out_part) - del outputs[i] - exe = fluid.Executor(fluid.CPUPlace()) - exe.run(fluid.default_startup_program()) - - def if_exist(var): - b = os.path.exists(os.path.join(param_dir, var.name)) - return b - - fluid.io.load_vars(exe, - param_dir, - fluid.default_main_program(), - predicate=if_exist) diff --git a/x2paddle/decoder/onnx_decoder.py b/x2paddle/decoder/onnx_decoder.py index 15431380e1772940d54227848988bdd4455fd82a..959b5b12aeaea4c6a84aca84b79bfe712423d9b5 100644 --- a/x2paddle/decoder/onnx_decoder.py +++ b/x2paddle/decoder/onnx_decoder.py @@ -271,6 +271,17 @@ class ONNXGraph(Graph): return value_info def get_results_of_inference(self, model, shape): + try: + import torch + version = torch.__version__ + if '1.1.0' not in version: + print("your model have dynamic graph, torch==1.1.0 is required") + return + except: + print( + "your model have dynamic graph, we use caff2 to inference graph, please use \"pip install torch==1.1.0\"." + ) + return from x2paddle.decoder.onnx_backend import prepare np_images = np.random.rand(shape[0], shape[1], shape[2],