diff --git a/modules/image/Image_gan/style_transfer/UGATIT_100w/model.py b/modules/image/Image_gan/style_transfer/UGATIT_100w/model.py index 4c691d936e09d16aeadb98fc66cc2fa3fcb0c060..3adadc9232afaaafe31b3520ad54db406bc42229 100644 --- a/modules/image/Image_gan/style_transfer/UGATIT_100w/model.py +++ b/modules/image/Image_gan/style_transfer/UGATIT_100w/model.py @@ -1,51 +1,56 @@ import os import numpy as np -from paddle.fluid.core import AnalysisConfig, create_paddle_predictor +from paddle.inference import create_predictor, Config __all__ = ['Model'] - class Model(): # 初始化函数 - def __init__(self, modelpath, use_gpu): + def __init__(self, modelpath, use_gpu=False, use_mkldnn=True, combined=True): # 加载模型预测器 - self.predictor = self.load_model(modelpath, use_gpu) + self.predictor = self.load_model(modelpath, use_gpu, use_mkldnn, combined) # 获取模型的输入输出 self.input_names = self.predictor.get_input_names() self.output_names = self.predictor.get_output_names() - self.input_tensor = self.predictor.get_input_tensor(self.input_names[0]) - self.output_tensor = self.predictor.get_output_tensor(self.output_names[0]) + self.input_handle = self.predictor.get_input_handle(self.input_names[0]) + self.output_handle = self.predictor.get_output_handle(self.output_names[0]) # 模型加载函数 - def load_model(self, modelpath, use_gpu): + def load_model(self, modelpath, use_gpu, use_mkldnn, combined): # 对运行位置进行配置 if use_gpu: try: - places = os.environ["CUDA_VISIBLE_DEVICES"] - places = int(places[0]) - except Exception as e: - print('Error: %s. Please set the environment variables "CUDA_VISIBLE_DEVICES".' % e) + int(os.environ.get('CUDA_VISIBLE_DEVICES')) + except Exception: + print('Error! Unable to use GPU. Please set the environment variables "CUDA_VISIBLE_DEVICES=GPU_id" to use GPU.') use_gpu = False - + # 加载模型参数 - config = AnalysisConfig(modelpath) + if combined: + model = os.path.join(modelpath, "__model__") + params = os.path.join(modelpath, "__params__") + config = Config(model, params) + else: + config = Config(modelpath) # 设置参数 - if use_gpu: - config.enable_use_gpu(100, places) + if use_gpu: + config.enable_use_gpu(100, 0) else: config.disable_gpu() + if use_mkldnn: + config.enable_mkldnn() config.disable_glog_info() config.switch_ir_optim(True) config.enable_memory_optim() config.switch_use_feed_fetch_ops(False) config.switch_specify_input_names(True) - + # 通过参数加载模型预测器 - predictor = create_paddle_predictor(config) - + predictor = create_predictor(config) + # 返回预测器 return predictor @@ -56,13 +61,13 @@ class Model(): # 遍历输入数据进行预测 for input_data in input_datas: inputs = input_data.copy() - self.input_tensor.copy_from_cpu(inputs) - self.predictor.zero_copy_run() - output = self.output_tensor.copy_to_cpu() + self.input_handle.copy_from_cpu(inputs) + self.predictor.run() + output = self.output_handle.copy_to_cpu() outputs.append(output) - + # 预测结果合并 outputs = np.concatenate(outputs, 0) # 返回预测结果 - return outputs + return outputs \ No newline at end of file diff --git a/modules/image/Image_gan/style_transfer/UGATIT_100w/module.py b/modules/image/Image_gan/style_transfer/UGATIT_100w/module.py index f916c3f9dd5b7480b47ecffea268135fdd5efba2..6ded624c14170e401971ec21e5abff50d8f43882 100644 --- a/modules/image/Image_gan/style_transfer/UGATIT_100w/module.py +++ b/modules/image/Image_gan/style_transfer/UGATIT_100w/module.py @@ -13,16 +13,16 @@ from UGATIT_100w.processor import base64_to_cv2, cv2_to_base64, Processor author="jm12138", # 作者名称 author_email="jm12138@qq.com", # 作者邮箱 summary="UGATIT_100w", # 模型介绍 - version="1.0.0" # 版本号 + version="1.0.1" # 版本号 ) class UGATIT_100w(Module): # 初始化函数 - def _initialize(self, use_gpu=False): + def __init__(self, name=None, use_gpu=False): # 设置模型路径 self.model_path = os.path.join(self.directory, "UGATIT_100w") # 加载模型 - self.model = Model(self.model_path, use_gpu) + self.model = Model(modelpath=self.model_path, use_gpu=use_gpu, use_mkldnn=False, combined=False) # 关键点检测函数 def style_transfer(self, images=None, paths=None, batch_size=1, output_dir='output', visualization=False): diff --git a/modules/image/Image_gan/style_transfer/UGATIT_83w/model.py b/modules/image/Image_gan/style_transfer/UGATIT_83w/model.py index 4c691d936e09d16aeadb98fc66cc2fa3fcb0c060..3adadc9232afaaafe31b3520ad54db406bc42229 100644 --- a/modules/image/Image_gan/style_transfer/UGATIT_83w/model.py +++ b/modules/image/Image_gan/style_transfer/UGATIT_83w/model.py @@ -1,51 +1,56 @@ import os import numpy as np -from paddle.fluid.core import AnalysisConfig, create_paddle_predictor +from paddle.inference import create_predictor, Config __all__ = ['Model'] - class Model(): # 初始化函数 - def __init__(self, modelpath, use_gpu): + def __init__(self, modelpath, use_gpu=False, use_mkldnn=True, combined=True): # 加载模型预测器 - self.predictor = self.load_model(modelpath, use_gpu) + self.predictor = self.load_model(modelpath, use_gpu, use_mkldnn, combined) # 获取模型的输入输出 self.input_names = self.predictor.get_input_names() self.output_names = self.predictor.get_output_names() - self.input_tensor = self.predictor.get_input_tensor(self.input_names[0]) - self.output_tensor = self.predictor.get_output_tensor(self.output_names[0]) + self.input_handle = self.predictor.get_input_handle(self.input_names[0]) + self.output_handle = self.predictor.get_output_handle(self.output_names[0]) # 模型加载函数 - def load_model(self, modelpath, use_gpu): + def load_model(self, modelpath, use_gpu, use_mkldnn, combined): # 对运行位置进行配置 if use_gpu: try: - places = os.environ["CUDA_VISIBLE_DEVICES"] - places = int(places[0]) - except Exception as e: - print('Error: %s. Please set the environment variables "CUDA_VISIBLE_DEVICES".' % e) + int(os.environ.get('CUDA_VISIBLE_DEVICES')) + except Exception: + print('Error! Unable to use GPU. Please set the environment variables "CUDA_VISIBLE_DEVICES=GPU_id" to use GPU.') use_gpu = False - + # 加载模型参数 - config = AnalysisConfig(modelpath) + if combined: + model = os.path.join(modelpath, "__model__") + params = os.path.join(modelpath, "__params__") + config = Config(model, params) + else: + config = Config(modelpath) # 设置参数 - if use_gpu: - config.enable_use_gpu(100, places) + if use_gpu: + config.enable_use_gpu(100, 0) else: config.disable_gpu() + if use_mkldnn: + config.enable_mkldnn() config.disable_glog_info() config.switch_ir_optim(True) config.enable_memory_optim() config.switch_use_feed_fetch_ops(False) config.switch_specify_input_names(True) - + # 通过参数加载模型预测器 - predictor = create_paddle_predictor(config) - + predictor = create_predictor(config) + # 返回预测器 return predictor @@ -56,13 +61,13 @@ class Model(): # 遍历输入数据进行预测 for input_data in input_datas: inputs = input_data.copy() - self.input_tensor.copy_from_cpu(inputs) - self.predictor.zero_copy_run() - output = self.output_tensor.copy_to_cpu() + self.input_handle.copy_from_cpu(inputs) + self.predictor.run() + output = self.output_handle.copy_to_cpu() outputs.append(output) - + # 预测结果合并 outputs = np.concatenate(outputs, 0) # 返回预测结果 - return outputs + return outputs \ No newline at end of file diff --git a/modules/image/Image_gan/style_transfer/UGATIT_83w/module.py b/modules/image/Image_gan/style_transfer/UGATIT_83w/module.py index 50fbf560417269184982a7169da565aa0cad890e..fb18738a5387fced542400768712ad648950d7d9 100644 --- a/modules/image/Image_gan/style_transfer/UGATIT_83w/module.py +++ b/modules/image/Image_gan/style_transfer/UGATIT_83w/module.py @@ -13,16 +13,16 @@ from UGATIT_83w.processor import base64_to_cv2, cv2_to_base64, Processor author="jm12138", # 作者名称 author_email="jm12138@qq.com", # 作者邮箱 summary="UGATIT", # 模型介绍 - version="1.0.0" # 版本号 + version="1.0.1" # 版本号 ) class UGATIT_83w(Module): # 初始化函数 - def _initialize(self, use_gpu=False): + def __init__(self, name=None, use_gpu=False): # 设置模型路径 self.model_path = os.path.join(self.directory, "UGATIT_83w") # 加载模型 - self.model = Model(self.model_path, use_gpu) + self.model = Model(modelpath=self.model_path, use_gpu=use_gpu, use_mkldnn=False, combined=False) # 关键点检测函数 def style_transfer(self, images=None, paths=None, batch_size=1, output_dir='output', visualization=False): diff --git a/modules/image/Image_gan/style_transfer/UGATIT_92w/model.py b/modules/image/Image_gan/style_transfer/UGATIT_92w/model.py index 4c691d936e09d16aeadb98fc66cc2fa3fcb0c060..3adadc9232afaaafe31b3520ad54db406bc42229 100644 --- a/modules/image/Image_gan/style_transfer/UGATIT_92w/model.py +++ b/modules/image/Image_gan/style_transfer/UGATIT_92w/model.py @@ -1,51 +1,56 @@ import os import numpy as np -from paddle.fluid.core import AnalysisConfig, create_paddle_predictor +from paddle.inference import create_predictor, Config __all__ = ['Model'] - class Model(): # 初始化函数 - def __init__(self, modelpath, use_gpu): + def __init__(self, modelpath, use_gpu=False, use_mkldnn=True, combined=True): # 加载模型预测器 - self.predictor = self.load_model(modelpath, use_gpu) + self.predictor = self.load_model(modelpath, use_gpu, use_mkldnn, combined) # 获取模型的输入输出 self.input_names = self.predictor.get_input_names() self.output_names = self.predictor.get_output_names() - self.input_tensor = self.predictor.get_input_tensor(self.input_names[0]) - self.output_tensor = self.predictor.get_output_tensor(self.output_names[0]) + self.input_handle = self.predictor.get_input_handle(self.input_names[0]) + self.output_handle = self.predictor.get_output_handle(self.output_names[0]) # 模型加载函数 - def load_model(self, modelpath, use_gpu): + def load_model(self, modelpath, use_gpu, use_mkldnn, combined): # 对运行位置进行配置 if use_gpu: try: - places = os.environ["CUDA_VISIBLE_DEVICES"] - places = int(places[0]) - except Exception as e: - print('Error: %s. Please set the environment variables "CUDA_VISIBLE_DEVICES".' % e) + int(os.environ.get('CUDA_VISIBLE_DEVICES')) + except Exception: + print('Error! Unable to use GPU. Please set the environment variables "CUDA_VISIBLE_DEVICES=GPU_id" to use GPU.') use_gpu = False - + # 加载模型参数 - config = AnalysisConfig(modelpath) + if combined: + model = os.path.join(modelpath, "__model__") + params = os.path.join(modelpath, "__params__") + config = Config(model, params) + else: + config = Config(modelpath) # 设置参数 - if use_gpu: - config.enable_use_gpu(100, places) + if use_gpu: + config.enable_use_gpu(100, 0) else: config.disable_gpu() + if use_mkldnn: + config.enable_mkldnn() config.disable_glog_info() config.switch_ir_optim(True) config.enable_memory_optim() config.switch_use_feed_fetch_ops(False) config.switch_specify_input_names(True) - + # 通过参数加载模型预测器 - predictor = create_paddle_predictor(config) - + predictor = create_predictor(config) + # 返回预测器 return predictor @@ -56,13 +61,13 @@ class Model(): # 遍历输入数据进行预测 for input_data in input_datas: inputs = input_data.copy() - self.input_tensor.copy_from_cpu(inputs) - self.predictor.zero_copy_run() - output = self.output_tensor.copy_to_cpu() + self.input_handle.copy_from_cpu(inputs) + self.predictor.run() + output = self.output_handle.copy_to_cpu() outputs.append(output) - + # 预测结果合并 outputs = np.concatenate(outputs, 0) # 返回预测结果 - return outputs + return outputs \ No newline at end of file diff --git a/modules/image/Image_gan/style_transfer/UGATIT_92w/module.py b/modules/image/Image_gan/style_transfer/UGATIT_92w/module.py index 8271307c8b65529762f1770ab7614a88dab6aba9..d11cbee23b315f2cb51dc8bf76850851f221c62d 100644 --- a/modules/image/Image_gan/style_transfer/UGATIT_92w/module.py +++ b/modules/image/Image_gan/style_transfer/UGATIT_92w/module.py @@ -13,16 +13,16 @@ from UGATIT_92w.processor import base64_to_cv2, cv2_to_base64, Processor author="jm12138", # 作者名称 author_email="jm12138@qq.com", # 作者邮箱 summary="UGATIT_92w", # 模型介绍 - version="1.0.0" # 版本号 + version="1.0.1" # 版本号 ) class UGATIT_92w(Module): # 初始化函数 - def _initialize(self, use_gpu=False): + def __init__(self, name=None, use_gpu=False): # 设置模型路径 self.model_path = os.path.join(self.directory, "UGATIT_92w") # 加载模型 - self.model = Model(self.model_path, use_gpu) + self.model = Model(modelpath=self.model_path, use_gpu=use_gpu, use_mkldnn=False, combined=False) # 关键点检测函数 def style_transfer(self, images=None, paths=None, batch_size=1, output_dir='output', visualization=False): diff --git a/modules/image/Image_gan/style_transfer/animegan_v1_hayao_60/model.py b/modules/image/Image_gan/style_transfer/animegan_v1_hayao_60/model.py index 29d4f83b6244a1ba98524748b0506e3902329b7e..3adadc9232afaaafe31b3520ad54db406bc42229 100644 --- a/modules/image/Image_gan/style_transfer/animegan_v1_hayao_60/model.py +++ b/modules/image/Image_gan/style_transfer/animegan_v1_hayao_60/model.py @@ -1,52 +1,56 @@ import os import numpy as np -from paddle.fluid.core import AnalysisConfig, create_paddle_predictor +from paddle.inference import create_predictor, Config __all__ = ['Model'] - class Model(): # 初始化函数 - def __init__(self, modelpath, use_gpu): + def __init__(self, modelpath, use_gpu=False, use_mkldnn=True, combined=True): # 加载模型预测器 - self.predictor = self.load_model(modelpath, use_gpu) + self.predictor = self.load_model(modelpath, use_gpu, use_mkldnn, combined) # 获取模型的输入输出 self.input_names = self.predictor.get_input_names() self.output_names = self.predictor.get_output_names() - self.input_tensor = self.predictor.get_input_tensor(self.input_names[0]) - self.output_tensor = self.predictor.get_output_tensor(self.output_names[0]) + self.input_handle = self.predictor.get_input_handle(self.input_names[0]) + self.output_handle = self.predictor.get_output_handle(self.output_names[0]) # 模型加载函数 - def load_model(self, modelpath, use_gpu): + def load_model(self, modelpath, use_gpu, use_mkldnn, combined): # 对运行位置进行配置 if use_gpu: try: - places = os.environ["CUDA_VISIBLE_DEVICES"] - places = int(places[0]) - except Exception as e: - print('Error: %s. Please set the environment variables "CUDA_VISIBLE_DEVICES".' % e) + int(os.environ.get('CUDA_VISIBLE_DEVICES')) + except Exception: + print('Error! Unable to use GPU. Please set the environment variables "CUDA_VISIBLE_DEVICES=GPU_id" to use GPU.') use_gpu = False - + # 加载模型参数 - config = AnalysisConfig(modelpath) + if combined: + model = os.path.join(modelpath, "__model__") + params = os.path.join(modelpath, "__params__") + config = Config(model, params) + else: + config = Config(modelpath) # 设置参数 - if use_gpu: - config.enable_use_gpu(100, places) + if use_gpu: + config.enable_use_gpu(100, 0) else: config.disable_gpu() - config.enable_mkldnn() + if use_mkldnn: + config.enable_mkldnn() config.disable_glog_info() config.switch_ir_optim(True) config.enable_memory_optim() config.switch_use_feed_fetch_ops(False) config.switch_specify_input_names(True) - + # 通过参数加载模型预测器 - predictor = create_paddle_predictor(config) - + predictor = create_predictor(config) + # 返回预测器 return predictor @@ -56,13 +60,14 @@ class Model(): # 遍历输入数据进行预测 for input_data in input_datas: - self.input_tensor.copy_from_cpu(input_data) - self.predictor.zero_copy_run() - output = self.output_tensor.copy_to_cpu() + inputs = input_data.copy() + self.input_handle.copy_from_cpu(inputs) + self.predictor.run() + output = self.output_handle.copy_to_cpu() outputs.append(output) - + # 预测结果合并 outputs = np.concatenate(outputs, 0) # 返回预测结果 - return outputs + return outputs \ No newline at end of file diff --git a/modules/image/Image_gan/style_transfer/animegan_v1_hayao_60/module.py b/modules/image/Image_gan/style_transfer/animegan_v1_hayao_60/module.py index 39ac4d5d7c6f7b63ed0d42370ebd61fc94c8c9f7..fb1141586971658b73cea9610e1c6d3a2722a944 100644 --- a/modules/image/Image_gan/style_transfer/animegan_v1_hayao_60/module.py +++ b/modules/image/Image_gan/style_transfer/animegan_v1_hayao_60/module.py @@ -13,16 +13,16 @@ from animegan_v1_hayao_60.processor import base64_to_cv2, cv2_to_base64, Process author="jm12138", # 作者名称 author_email="jm12138@qq.com", # 作者邮箱 summary="animegan_v1_hayao_60", # 模型介绍 - version="1.0.0" # 版本号 + version="1.0.2" # 版本号 ) class Animegan_V1_Hayao_60(Module): # 初始化函数 - def _initialize(self, use_gpu=False): + def __init__(self, name=None, use_gpu=False): # 设置模型路径 self.model_path = os.path.join(self.directory, "animegan_v1_hayao_60") # 加载模型 - self.model = Model(self.model_path, use_gpu) + self.model = Model(modelpath=self.model_path, use_gpu=use_gpu, use_mkldnn=False, combined=False) # 关键点检测函数 def style_transfer(self, diff --git a/modules/image/Image_gan/style_transfer/animegan_v2_hayao_64/model.py b/modules/image/Image_gan/style_transfer/animegan_v2_hayao_64/model.py index 29d4f83b6244a1ba98524748b0506e3902329b7e..3adadc9232afaaafe31b3520ad54db406bc42229 100644 --- a/modules/image/Image_gan/style_transfer/animegan_v2_hayao_64/model.py +++ b/modules/image/Image_gan/style_transfer/animegan_v2_hayao_64/model.py @@ -1,52 +1,56 @@ import os import numpy as np -from paddle.fluid.core import AnalysisConfig, create_paddle_predictor +from paddle.inference import create_predictor, Config __all__ = ['Model'] - class Model(): # 初始化函数 - def __init__(self, modelpath, use_gpu): + def __init__(self, modelpath, use_gpu=False, use_mkldnn=True, combined=True): # 加载模型预测器 - self.predictor = self.load_model(modelpath, use_gpu) + self.predictor = self.load_model(modelpath, use_gpu, use_mkldnn, combined) # 获取模型的输入输出 self.input_names = self.predictor.get_input_names() self.output_names = self.predictor.get_output_names() - self.input_tensor = self.predictor.get_input_tensor(self.input_names[0]) - self.output_tensor = self.predictor.get_output_tensor(self.output_names[0]) + self.input_handle = self.predictor.get_input_handle(self.input_names[0]) + self.output_handle = self.predictor.get_output_handle(self.output_names[0]) # 模型加载函数 - def load_model(self, modelpath, use_gpu): + def load_model(self, modelpath, use_gpu, use_mkldnn, combined): # 对运行位置进行配置 if use_gpu: try: - places = os.environ["CUDA_VISIBLE_DEVICES"] - places = int(places[0]) - except Exception as e: - print('Error: %s. Please set the environment variables "CUDA_VISIBLE_DEVICES".' % e) + int(os.environ.get('CUDA_VISIBLE_DEVICES')) + except Exception: + print('Error! Unable to use GPU. Please set the environment variables "CUDA_VISIBLE_DEVICES=GPU_id" to use GPU.') use_gpu = False - + # 加载模型参数 - config = AnalysisConfig(modelpath) + if combined: + model = os.path.join(modelpath, "__model__") + params = os.path.join(modelpath, "__params__") + config = Config(model, params) + else: + config = Config(modelpath) # 设置参数 - if use_gpu: - config.enable_use_gpu(100, places) + if use_gpu: + config.enable_use_gpu(100, 0) else: config.disable_gpu() - config.enable_mkldnn() + if use_mkldnn: + config.enable_mkldnn() config.disable_glog_info() config.switch_ir_optim(True) config.enable_memory_optim() config.switch_use_feed_fetch_ops(False) config.switch_specify_input_names(True) - + # 通过参数加载模型预测器 - predictor = create_paddle_predictor(config) - + predictor = create_predictor(config) + # 返回预测器 return predictor @@ -56,13 +60,14 @@ class Model(): # 遍历输入数据进行预测 for input_data in input_datas: - self.input_tensor.copy_from_cpu(input_data) - self.predictor.zero_copy_run() - output = self.output_tensor.copy_to_cpu() + inputs = input_data.copy() + self.input_handle.copy_from_cpu(inputs) + self.predictor.run() + output = self.output_handle.copy_to_cpu() outputs.append(output) - + # 预测结果合并 outputs = np.concatenate(outputs, 0) # 返回预测结果 - return outputs + return outputs \ No newline at end of file diff --git a/modules/image/Image_gan/style_transfer/animegan_v2_hayao_64/module.py b/modules/image/Image_gan/style_transfer/animegan_v2_hayao_64/module.py index 142a95995fbae2f0c67e0df620a27da278b2bbb7..0c6eacb9d0b93fab32820c31fbe0680b27e25be0 100644 --- a/modules/image/Image_gan/style_transfer/animegan_v2_hayao_64/module.py +++ b/modules/image/Image_gan/style_transfer/animegan_v2_hayao_64/module.py @@ -13,16 +13,16 @@ from animegan_v2_hayao_64.processor import base64_to_cv2, cv2_to_base64, Process author="jm12138", # 作者名称 author_email="jm12138@qq.com", # 作者邮箱 summary="animegan_v2_hayao_64", # 模型介绍 - version="1.0.0" # 版本号 + version="1.0.2" # 版本号 ) class Animegan_V2_Hayao_64(Module): # 初始化函数 - def _initialize(self, use_gpu=False): + def __init__(self, name=None, use_gpu=False): # 设置模型路径 self.model_path = os.path.join(self.directory, "animegan_v2_hayao_64") # 加载模型 - self.model = Model(self.model_path, use_gpu) + self.model = Model(modelpath=self.model_path, use_gpu=use_gpu, use_mkldnn=False, combined=False) # 关键点检测函数 def style_transfer(self, diff --git a/modules/image/Image_gan/style_transfer/animegan_v2_hayao_99/model.py b/modules/image/Image_gan/style_transfer/animegan_v2_hayao_99/model.py index 29d4f83b6244a1ba98524748b0506e3902329b7e..3adadc9232afaaafe31b3520ad54db406bc42229 100644 --- a/modules/image/Image_gan/style_transfer/animegan_v2_hayao_99/model.py +++ b/modules/image/Image_gan/style_transfer/animegan_v2_hayao_99/model.py @@ -1,52 +1,56 @@ import os import numpy as np -from paddle.fluid.core import AnalysisConfig, create_paddle_predictor +from paddle.inference import create_predictor, Config __all__ = ['Model'] - class Model(): # 初始化函数 - def __init__(self, modelpath, use_gpu): + def __init__(self, modelpath, use_gpu=False, use_mkldnn=True, combined=True): # 加载模型预测器 - self.predictor = self.load_model(modelpath, use_gpu) + self.predictor = self.load_model(modelpath, use_gpu, use_mkldnn, combined) # 获取模型的输入输出 self.input_names = self.predictor.get_input_names() self.output_names = self.predictor.get_output_names() - self.input_tensor = self.predictor.get_input_tensor(self.input_names[0]) - self.output_tensor = self.predictor.get_output_tensor(self.output_names[0]) + self.input_handle = self.predictor.get_input_handle(self.input_names[0]) + self.output_handle = self.predictor.get_output_handle(self.output_names[0]) # 模型加载函数 - def load_model(self, modelpath, use_gpu): + def load_model(self, modelpath, use_gpu, use_mkldnn, combined): # 对运行位置进行配置 if use_gpu: try: - places = os.environ["CUDA_VISIBLE_DEVICES"] - places = int(places[0]) - except Exception as e: - print('Error: %s. Please set the environment variables "CUDA_VISIBLE_DEVICES".' % e) + int(os.environ.get('CUDA_VISIBLE_DEVICES')) + except Exception: + print('Error! Unable to use GPU. Please set the environment variables "CUDA_VISIBLE_DEVICES=GPU_id" to use GPU.') use_gpu = False - + # 加载模型参数 - config = AnalysisConfig(modelpath) + if combined: + model = os.path.join(modelpath, "__model__") + params = os.path.join(modelpath, "__params__") + config = Config(model, params) + else: + config = Config(modelpath) # 设置参数 - if use_gpu: - config.enable_use_gpu(100, places) + if use_gpu: + config.enable_use_gpu(100, 0) else: config.disable_gpu() - config.enable_mkldnn() + if use_mkldnn: + config.enable_mkldnn() config.disable_glog_info() config.switch_ir_optim(True) config.enable_memory_optim() config.switch_use_feed_fetch_ops(False) config.switch_specify_input_names(True) - + # 通过参数加载模型预测器 - predictor = create_paddle_predictor(config) - + predictor = create_predictor(config) + # 返回预测器 return predictor @@ -56,13 +60,14 @@ class Model(): # 遍历输入数据进行预测 for input_data in input_datas: - self.input_tensor.copy_from_cpu(input_data) - self.predictor.zero_copy_run() - output = self.output_tensor.copy_to_cpu() + inputs = input_data.copy() + self.input_handle.copy_from_cpu(inputs) + self.predictor.run() + output = self.output_handle.copy_to_cpu() outputs.append(output) - + # 预测结果合并 outputs = np.concatenate(outputs, 0) # 返回预测结果 - return outputs + return outputs \ No newline at end of file diff --git a/modules/image/Image_gan/style_transfer/animegan_v2_hayao_99/module.py b/modules/image/Image_gan/style_transfer/animegan_v2_hayao_99/module.py index 7e724833e3cc71409c619c572ff93fe42fda7c7b..a5228e95c3fabec9ee3cb417a191e2d2280a2d99 100644 --- a/modules/image/Image_gan/style_transfer/animegan_v2_hayao_99/module.py +++ b/modules/image/Image_gan/style_transfer/animegan_v2_hayao_99/module.py @@ -13,16 +13,16 @@ from animegan_v2_hayao_99.processor import base64_to_cv2, cv2_to_base64, Process author="jm12138", # 作者名称 author_email="jm12138@qq.com", # 作者邮箱 summary="animegan_v2_hayao_99", # 模型介绍 - version="1.0.0" # 版本号 + version="1.0.2" # 版本号 ) class Animegan_V2_Hayao_99(Module): # 初始化函数 - def _initialize(self, use_gpu=False): + def __init__(self, name=None, use_gpu=False): # 设置模型路径 self.model_path = os.path.join(self.directory, "animegan_v2_hayao_99") # 加载模型 - self.model = Model(self.model_path, use_gpu) + self.model = Model(modelpath=self.model_path, use_gpu=use_gpu, use_mkldnn=False, combined=False) # 关键点检测函数 def style_transfer(self, diff --git a/modules/image/Image_gan/style_transfer/animegan_v2_paprika_54/model.py b/modules/image/Image_gan/style_transfer/animegan_v2_paprika_54/model.py index 29d4f83b6244a1ba98524748b0506e3902329b7e..3adadc9232afaaafe31b3520ad54db406bc42229 100644 --- a/modules/image/Image_gan/style_transfer/animegan_v2_paprika_54/model.py +++ b/modules/image/Image_gan/style_transfer/animegan_v2_paprika_54/model.py @@ -1,52 +1,56 @@ import os import numpy as np -from paddle.fluid.core import AnalysisConfig, create_paddle_predictor +from paddle.inference import create_predictor, Config __all__ = ['Model'] - class Model(): # 初始化函数 - def __init__(self, modelpath, use_gpu): + def __init__(self, modelpath, use_gpu=False, use_mkldnn=True, combined=True): # 加载模型预测器 - self.predictor = self.load_model(modelpath, use_gpu) + self.predictor = self.load_model(modelpath, use_gpu, use_mkldnn, combined) # 获取模型的输入输出 self.input_names = self.predictor.get_input_names() self.output_names = self.predictor.get_output_names() - self.input_tensor = self.predictor.get_input_tensor(self.input_names[0]) - self.output_tensor = self.predictor.get_output_tensor(self.output_names[0]) + self.input_handle = self.predictor.get_input_handle(self.input_names[0]) + self.output_handle = self.predictor.get_output_handle(self.output_names[0]) # 模型加载函数 - def load_model(self, modelpath, use_gpu): + def load_model(self, modelpath, use_gpu, use_mkldnn, combined): # 对运行位置进行配置 if use_gpu: try: - places = os.environ["CUDA_VISIBLE_DEVICES"] - places = int(places[0]) - except Exception as e: - print('Error: %s. Please set the environment variables "CUDA_VISIBLE_DEVICES".' % e) + int(os.environ.get('CUDA_VISIBLE_DEVICES')) + except Exception: + print('Error! Unable to use GPU. Please set the environment variables "CUDA_VISIBLE_DEVICES=GPU_id" to use GPU.') use_gpu = False - + # 加载模型参数 - config = AnalysisConfig(modelpath) + if combined: + model = os.path.join(modelpath, "__model__") + params = os.path.join(modelpath, "__params__") + config = Config(model, params) + else: + config = Config(modelpath) # 设置参数 - if use_gpu: - config.enable_use_gpu(100, places) + if use_gpu: + config.enable_use_gpu(100, 0) else: config.disable_gpu() - config.enable_mkldnn() + if use_mkldnn: + config.enable_mkldnn() config.disable_glog_info() config.switch_ir_optim(True) config.enable_memory_optim() config.switch_use_feed_fetch_ops(False) config.switch_specify_input_names(True) - + # 通过参数加载模型预测器 - predictor = create_paddle_predictor(config) - + predictor = create_predictor(config) + # 返回预测器 return predictor @@ -56,13 +60,14 @@ class Model(): # 遍历输入数据进行预测 for input_data in input_datas: - self.input_tensor.copy_from_cpu(input_data) - self.predictor.zero_copy_run() - output = self.output_tensor.copy_to_cpu() + inputs = input_data.copy() + self.input_handle.copy_from_cpu(inputs) + self.predictor.run() + output = self.output_handle.copy_to_cpu() outputs.append(output) - + # 预测结果合并 outputs = np.concatenate(outputs, 0) # 返回预测结果 - return outputs + return outputs \ No newline at end of file diff --git a/modules/image/Image_gan/style_transfer/animegan_v2_paprika_54/module.py b/modules/image/Image_gan/style_transfer/animegan_v2_paprika_54/module.py index f3e02e0d98a859985dc2250f93d42ebce2995967..50f07186982bce0b7854db0e2697a1230b2925e7 100644 --- a/modules/image/Image_gan/style_transfer/animegan_v2_paprika_54/module.py +++ b/modules/image/Image_gan/style_transfer/animegan_v2_paprika_54/module.py @@ -13,16 +13,16 @@ from animegan_v2_paprika_54.processor import base64_to_cv2, cv2_to_base64, Proce author="jm12138", # 作者名称 author_email="jm12138@qq.com", # 作者邮箱 summary="animegan_v2_paprika_54", # 模型介绍 - version="1.0.0" # 版本号 + version="1.0.2" # 版本号 ) class Animegan_V2_Paprika_54(Module): # 初始化函数 - def _initialize(self, use_gpu=False): + def __init__(self, name=None, use_gpu=False): # 设置模型路径 self.model_path = os.path.join(self.directory, "animegan_v2_paprika_54") # 加载模型 - self.model = Model(self.model_path, use_gpu) + self.model = Model(modelpath=self.model_path, use_gpu=use_gpu, use_mkldnn=False, combined=False) # 关键点检测函数 def style_transfer(self, diff --git a/modules/image/Image_gan/style_transfer/animegan_v2_paprika_74/model.py b/modules/image/Image_gan/style_transfer/animegan_v2_paprika_74/model.py index 29d4f83b6244a1ba98524748b0506e3902329b7e..3adadc9232afaaafe31b3520ad54db406bc42229 100644 --- a/modules/image/Image_gan/style_transfer/animegan_v2_paprika_74/model.py +++ b/modules/image/Image_gan/style_transfer/animegan_v2_paprika_74/model.py @@ -1,52 +1,56 @@ import os import numpy as np -from paddle.fluid.core import AnalysisConfig, create_paddle_predictor +from paddle.inference import create_predictor, Config __all__ = ['Model'] - class Model(): # 初始化函数 - def __init__(self, modelpath, use_gpu): + def __init__(self, modelpath, use_gpu=False, use_mkldnn=True, combined=True): # 加载模型预测器 - self.predictor = self.load_model(modelpath, use_gpu) + self.predictor = self.load_model(modelpath, use_gpu, use_mkldnn, combined) # 获取模型的输入输出 self.input_names = self.predictor.get_input_names() self.output_names = self.predictor.get_output_names() - self.input_tensor = self.predictor.get_input_tensor(self.input_names[0]) - self.output_tensor = self.predictor.get_output_tensor(self.output_names[0]) + self.input_handle = self.predictor.get_input_handle(self.input_names[0]) + self.output_handle = self.predictor.get_output_handle(self.output_names[0]) # 模型加载函数 - def load_model(self, modelpath, use_gpu): + def load_model(self, modelpath, use_gpu, use_mkldnn, combined): # 对运行位置进行配置 if use_gpu: try: - places = os.environ["CUDA_VISIBLE_DEVICES"] - places = int(places[0]) - except Exception as e: - print('Error: %s. Please set the environment variables "CUDA_VISIBLE_DEVICES".' % e) + int(os.environ.get('CUDA_VISIBLE_DEVICES')) + except Exception: + print('Error! Unable to use GPU. Please set the environment variables "CUDA_VISIBLE_DEVICES=GPU_id" to use GPU.') use_gpu = False - + # 加载模型参数 - config = AnalysisConfig(modelpath) + if combined: + model = os.path.join(modelpath, "__model__") + params = os.path.join(modelpath, "__params__") + config = Config(model, params) + else: + config = Config(modelpath) # 设置参数 - if use_gpu: - config.enable_use_gpu(100, places) + if use_gpu: + config.enable_use_gpu(100, 0) else: config.disable_gpu() - config.enable_mkldnn() + if use_mkldnn: + config.enable_mkldnn() config.disable_glog_info() config.switch_ir_optim(True) config.enable_memory_optim() config.switch_use_feed_fetch_ops(False) config.switch_specify_input_names(True) - + # 通过参数加载模型预测器 - predictor = create_paddle_predictor(config) - + predictor = create_predictor(config) + # 返回预测器 return predictor @@ -56,13 +60,14 @@ class Model(): # 遍历输入数据进行预测 for input_data in input_datas: - self.input_tensor.copy_from_cpu(input_data) - self.predictor.zero_copy_run() - output = self.output_tensor.copy_to_cpu() + inputs = input_data.copy() + self.input_handle.copy_from_cpu(inputs) + self.predictor.run() + output = self.output_handle.copy_to_cpu() outputs.append(output) - + # 预测结果合并 outputs = np.concatenate(outputs, 0) # 返回预测结果 - return outputs + return outputs \ No newline at end of file diff --git a/modules/image/Image_gan/style_transfer/animegan_v2_paprika_74/module.py b/modules/image/Image_gan/style_transfer/animegan_v2_paprika_74/module.py index 1081be3aee72157beee9d00017e5bf1597f891a7..9b986062fbb7a3c110a9d4f2e064def08adb6b3d 100644 --- a/modules/image/Image_gan/style_transfer/animegan_v2_paprika_74/module.py +++ b/modules/image/Image_gan/style_transfer/animegan_v2_paprika_74/module.py @@ -13,16 +13,16 @@ from animegan_v2_paprika_74.processor import base64_to_cv2, cv2_to_base64, Proce author="jm12138", # 作者名称 author_email="jm12138@qq.com", # 作者邮箱 summary="animegan_v2_paprika_74", # 模型介绍 - version="1.0.0" # 版本号 + version="1.0.2" # 版本号 ) class Animegan_V2_Paprika_74(Module): # 初始化函数 - def _initialize(self, use_gpu=False): + def __init__(self, name=None, use_gpu=False): # 设置模型路径 self.model_path = os.path.join(self.directory, "animegan_v2_paprika_74") # 加载模型 - self.model = Model(self.model_path, use_gpu) + self.model = Model(modelpath=self.model_path, use_gpu=use_gpu, use_mkldnn=False, combined=False) # 关键点检测函数 def style_transfer(self, diff --git a/modules/image/Image_gan/style_transfer/animegan_v2_paprika_97/model.py b/modules/image/Image_gan/style_transfer/animegan_v2_paprika_97/model.py index 29d4f83b6244a1ba98524748b0506e3902329b7e..3adadc9232afaaafe31b3520ad54db406bc42229 100644 --- a/modules/image/Image_gan/style_transfer/animegan_v2_paprika_97/model.py +++ b/modules/image/Image_gan/style_transfer/animegan_v2_paprika_97/model.py @@ -1,52 +1,56 @@ import os import numpy as np -from paddle.fluid.core import AnalysisConfig, create_paddle_predictor +from paddle.inference import create_predictor, Config __all__ = ['Model'] - class Model(): # 初始化函数 - def __init__(self, modelpath, use_gpu): + def __init__(self, modelpath, use_gpu=False, use_mkldnn=True, combined=True): # 加载模型预测器 - self.predictor = self.load_model(modelpath, use_gpu) + self.predictor = self.load_model(modelpath, use_gpu, use_mkldnn, combined) # 获取模型的输入输出 self.input_names = self.predictor.get_input_names() self.output_names = self.predictor.get_output_names() - self.input_tensor = self.predictor.get_input_tensor(self.input_names[0]) - self.output_tensor = self.predictor.get_output_tensor(self.output_names[0]) + self.input_handle = self.predictor.get_input_handle(self.input_names[0]) + self.output_handle = self.predictor.get_output_handle(self.output_names[0]) # 模型加载函数 - def load_model(self, modelpath, use_gpu): + def load_model(self, modelpath, use_gpu, use_mkldnn, combined): # 对运行位置进行配置 if use_gpu: try: - places = os.environ["CUDA_VISIBLE_DEVICES"] - places = int(places[0]) - except Exception as e: - print('Error: %s. Please set the environment variables "CUDA_VISIBLE_DEVICES".' % e) + int(os.environ.get('CUDA_VISIBLE_DEVICES')) + except Exception: + print('Error! Unable to use GPU. Please set the environment variables "CUDA_VISIBLE_DEVICES=GPU_id" to use GPU.') use_gpu = False - + # 加载模型参数 - config = AnalysisConfig(modelpath) + if combined: + model = os.path.join(modelpath, "__model__") + params = os.path.join(modelpath, "__params__") + config = Config(model, params) + else: + config = Config(modelpath) # 设置参数 - if use_gpu: - config.enable_use_gpu(100, places) + if use_gpu: + config.enable_use_gpu(100, 0) else: config.disable_gpu() - config.enable_mkldnn() + if use_mkldnn: + config.enable_mkldnn() config.disable_glog_info() config.switch_ir_optim(True) config.enable_memory_optim() config.switch_use_feed_fetch_ops(False) config.switch_specify_input_names(True) - + # 通过参数加载模型预测器 - predictor = create_paddle_predictor(config) - + predictor = create_predictor(config) + # 返回预测器 return predictor @@ -56,13 +60,14 @@ class Model(): # 遍历输入数据进行预测 for input_data in input_datas: - self.input_tensor.copy_from_cpu(input_data) - self.predictor.zero_copy_run() - output = self.output_tensor.copy_to_cpu() + inputs = input_data.copy() + self.input_handle.copy_from_cpu(inputs) + self.predictor.run() + output = self.output_handle.copy_to_cpu() outputs.append(output) - + # 预测结果合并 outputs = np.concatenate(outputs, 0) # 返回预测结果 - return outputs + return outputs \ No newline at end of file diff --git a/modules/image/Image_gan/style_transfer/animegan_v2_paprika_97/module.py b/modules/image/Image_gan/style_transfer/animegan_v2_paprika_97/module.py index c059338d39bff08385213962e92acd5aa11f8065..73a79baf839897864a850d8c029dc4aff6223488 100644 --- a/modules/image/Image_gan/style_transfer/animegan_v2_paprika_97/module.py +++ b/modules/image/Image_gan/style_transfer/animegan_v2_paprika_97/module.py @@ -13,16 +13,16 @@ from animegan_v2_paprika_97.processor import base64_to_cv2, cv2_to_base64, Proce author="jm12138", # 作者名称 author_email="jm12138@qq.com", # 作者邮箱 summary="animegan_v2_paprika_97", # 模型介绍 - version="1.0.0" # 版本号 + version="1.0.2" # 版本号 ) class Animegan_V2_Paprika_97(Module): # 初始化函数 - def _initialize(self, use_gpu=False): + def __init__(self, name=None, use_gpu=False): # 设置模型路径 self.model_path = os.path.join(self.directory, "animegan_v2_paprika_97") # 加载模型 - self.model = Model(self.model_path, use_gpu) + self.model = Model(modelpath=self.model_path, use_gpu=use_gpu, use_mkldnn=False, combined=False) # 关键点检测函数 def style_transfer(self, diff --git a/modules/image/Image_gan/style_transfer/animegan_v2_paprika_98/model.py b/modules/image/Image_gan/style_transfer/animegan_v2_paprika_98/model.py index 29d4f83b6244a1ba98524748b0506e3902329b7e..3adadc9232afaaafe31b3520ad54db406bc42229 100644 --- a/modules/image/Image_gan/style_transfer/animegan_v2_paprika_98/model.py +++ b/modules/image/Image_gan/style_transfer/animegan_v2_paprika_98/model.py @@ -1,52 +1,56 @@ import os import numpy as np -from paddle.fluid.core import AnalysisConfig, create_paddle_predictor +from paddle.inference import create_predictor, Config __all__ = ['Model'] - class Model(): # 初始化函数 - def __init__(self, modelpath, use_gpu): + def __init__(self, modelpath, use_gpu=False, use_mkldnn=True, combined=True): # 加载模型预测器 - self.predictor = self.load_model(modelpath, use_gpu) + self.predictor = self.load_model(modelpath, use_gpu, use_mkldnn, combined) # 获取模型的输入输出 self.input_names = self.predictor.get_input_names() self.output_names = self.predictor.get_output_names() - self.input_tensor = self.predictor.get_input_tensor(self.input_names[0]) - self.output_tensor = self.predictor.get_output_tensor(self.output_names[0]) + self.input_handle = self.predictor.get_input_handle(self.input_names[0]) + self.output_handle = self.predictor.get_output_handle(self.output_names[0]) # 模型加载函数 - def load_model(self, modelpath, use_gpu): + def load_model(self, modelpath, use_gpu, use_mkldnn, combined): # 对运行位置进行配置 if use_gpu: try: - places = os.environ["CUDA_VISIBLE_DEVICES"] - places = int(places[0]) - except Exception as e: - print('Error: %s. Please set the environment variables "CUDA_VISIBLE_DEVICES".' % e) + int(os.environ.get('CUDA_VISIBLE_DEVICES')) + except Exception: + print('Error! Unable to use GPU. Please set the environment variables "CUDA_VISIBLE_DEVICES=GPU_id" to use GPU.') use_gpu = False - + # 加载模型参数 - config = AnalysisConfig(modelpath) + if combined: + model = os.path.join(modelpath, "__model__") + params = os.path.join(modelpath, "__params__") + config = Config(model, params) + else: + config = Config(modelpath) # 设置参数 - if use_gpu: - config.enable_use_gpu(100, places) + if use_gpu: + config.enable_use_gpu(100, 0) else: config.disable_gpu() - config.enable_mkldnn() + if use_mkldnn: + config.enable_mkldnn() config.disable_glog_info() config.switch_ir_optim(True) config.enable_memory_optim() config.switch_use_feed_fetch_ops(False) config.switch_specify_input_names(True) - + # 通过参数加载模型预测器 - predictor = create_paddle_predictor(config) - + predictor = create_predictor(config) + # 返回预测器 return predictor @@ -56,13 +60,14 @@ class Model(): # 遍历输入数据进行预测 for input_data in input_datas: - self.input_tensor.copy_from_cpu(input_data) - self.predictor.zero_copy_run() - output = self.output_tensor.copy_to_cpu() + inputs = input_data.copy() + self.input_handle.copy_from_cpu(inputs) + self.predictor.run() + output = self.output_handle.copy_to_cpu() outputs.append(output) - + # 预测结果合并 outputs = np.concatenate(outputs, 0) # 返回预测结果 - return outputs + return outputs \ No newline at end of file diff --git a/modules/image/Image_gan/style_transfer/animegan_v2_paprika_98/module.py b/modules/image/Image_gan/style_transfer/animegan_v2_paprika_98/module.py index 2c549e62e5f43f7d2f1b85ee83a6c7a3bccf51f9..10f56e468fc101d547b6fec89b06fdad89780a4b 100644 --- a/modules/image/Image_gan/style_transfer/animegan_v2_paprika_98/module.py +++ b/modules/image/Image_gan/style_transfer/animegan_v2_paprika_98/module.py @@ -13,16 +13,16 @@ from animegan_v2_paprika_98.processor import base64_to_cv2, cv2_to_base64, Proce author="jm12138", # 作者名称 author_email="jm12138@qq.com", # 作者邮箱 summary="animegan_v2_paprika_98", # 模型介绍 - version="1.0.0" # 版本号 + version="1.0.2" # 版本号 ) class Animegan_V2_Paprika_98(Module): # 初始化函数 - def _initialize(self, use_gpu=False): + def __init__(self, name=None, use_gpu=False): # 设置模型路径 self.model_path = os.path.join(self.directory, "animegan_v2_paprika_98") # 加载模型 - self.model = Model(self.model_path, use_gpu) + self.model = Model(modelpath=self.model_path, use_gpu=use_gpu, use_mkldnn=False, combined=False) # 关键点检测函数 def style_transfer(self, diff --git a/modules/image/Image_gan/style_transfer/animegan_v2_shinkai_33/model.py b/modules/image/Image_gan/style_transfer/animegan_v2_shinkai_33/model.py index 29d4f83b6244a1ba98524748b0506e3902329b7e..3adadc9232afaaafe31b3520ad54db406bc42229 100644 --- a/modules/image/Image_gan/style_transfer/animegan_v2_shinkai_33/model.py +++ b/modules/image/Image_gan/style_transfer/animegan_v2_shinkai_33/model.py @@ -1,52 +1,56 @@ import os import numpy as np -from paddle.fluid.core import AnalysisConfig, create_paddle_predictor +from paddle.inference import create_predictor, Config __all__ = ['Model'] - class Model(): # 初始化函数 - def __init__(self, modelpath, use_gpu): + def __init__(self, modelpath, use_gpu=False, use_mkldnn=True, combined=True): # 加载模型预测器 - self.predictor = self.load_model(modelpath, use_gpu) + self.predictor = self.load_model(modelpath, use_gpu, use_mkldnn, combined) # 获取模型的输入输出 self.input_names = self.predictor.get_input_names() self.output_names = self.predictor.get_output_names() - self.input_tensor = self.predictor.get_input_tensor(self.input_names[0]) - self.output_tensor = self.predictor.get_output_tensor(self.output_names[0]) + self.input_handle = self.predictor.get_input_handle(self.input_names[0]) + self.output_handle = self.predictor.get_output_handle(self.output_names[0]) # 模型加载函数 - def load_model(self, modelpath, use_gpu): + def load_model(self, modelpath, use_gpu, use_mkldnn, combined): # 对运行位置进行配置 if use_gpu: try: - places = os.environ["CUDA_VISIBLE_DEVICES"] - places = int(places[0]) - except Exception as e: - print('Error: %s. Please set the environment variables "CUDA_VISIBLE_DEVICES".' % e) + int(os.environ.get('CUDA_VISIBLE_DEVICES')) + except Exception: + print('Error! Unable to use GPU. Please set the environment variables "CUDA_VISIBLE_DEVICES=GPU_id" to use GPU.') use_gpu = False - + # 加载模型参数 - config = AnalysisConfig(modelpath) + if combined: + model = os.path.join(modelpath, "__model__") + params = os.path.join(modelpath, "__params__") + config = Config(model, params) + else: + config = Config(modelpath) # 设置参数 - if use_gpu: - config.enable_use_gpu(100, places) + if use_gpu: + config.enable_use_gpu(100, 0) else: config.disable_gpu() - config.enable_mkldnn() + if use_mkldnn: + config.enable_mkldnn() config.disable_glog_info() config.switch_ir_optim(True) config.enable_memory_optim() config.switch_use_feed_fetch_ops(False) config.switch_specify_input_names(True) - + # 通过参数加载模型预测器 - predictor = create_paddle_predictor(config) - + predictor = create_predictor(config) + # 返回预测器 return predictor @@ -56,13 +60,14 @@ class Model(): # 遍历输入数据进行预测 for input_data in input_datas: - self.input_tensor.copy_from_cpu(input_data) - self.predictor.zero_copy_run() - output = self.output_tensor.copy_to_cpu() + inputs = input_data.copy() + self.input_handle.copy_from_cpu(inputs) + self.predictor.run() + output = self.output_handle.copy_to_cpu() outputs.append(output) - + # 预测结果合并 outputs = np.concatenate(outputs, 0) # 返回预测结果 - return outputs + return outputs \ No newline at end of file diff --git a/modules/image/Image_gan/style_transfer/animegan_v2_shinkai_33/module.py b/modules/image/Image_gan/style_transfer/animegan_v2_shinkai_33/module.py index 674e576db08f80b5f1526e0096927354cb9ff3da..2ff2c9a17601cca061749fe1d16d83cbf1125e0b 100644 --- a/modules/image/Image_gan/style_transfer/animegan_v2_shinkai_33/module.py +++ b/modules/image/Image_gan/style_transfer/animegan_v2_shinkai_33/module.py @@ -13,16 +13,16 @@ from animegan_v2_shinkai_33.processor import base64_to_cv2, cv2_to_base64, Proce author="jm12138", # 作者名称 author_email="jm12138@qq.com", # 作者邮箱 summary="animegan_v2_shinkai_33", # 模型介绍 - version="1.0.0" # 版本号 + version="1.0.2" # 版本号 ) class Animegan_V2_Shinkai_33(Module): # 初始化函数 - def _initialize(self, use_gpu=False): + def __init__(self, name=None, use_gpu=False): # 设置模型路径 self.model_path = os.path.join(self.directory, "animegan_v2_shinkai_33") # 加载模型 - self.model = Model(self.model_path, use_gpu) + self.model = Model(modelpath=self.model_path, use_gpu=use_gpu, use_mkldnn=False, combined=False) # 关键点检测函数 def style_transfer(self, diff --git a/modules/image/Image_gan/style_transfer/animegan_v2_shinkai_53/model.py b/modules/image/Image_gan/style_transfer/animegan_v2_shinkai_53/model.py index 29d4f83b6244a1ba98524748b0506e3902329b7e..3adadc9232afaaafe31b3520ad54db406bc42229 100644 --- a/modules/image/Image_gan/style_transfer/animegan_v2_shinkai_53/model.py +++ b/modules/image/Image_gan/style_transfer/animegan_v2_shinkai_53/model.py @@ -1,52 +1,56 @@ import os import numpy as np -from paddle.fluid.core import AnalysisConfig, create_paddle_predictor +from paddle.inference import create_predictor, Config __all__ = ['Model'] - class Model(): # 初始化函数 - def __init__(self, modelpath, use_gpu): + def __init__(self, modelpath, use_gpu=False, use_mkldnn=True, combined=True): # 加载模型预测器 - self.predictor = self.load_model(modelpath, use_gpu) + self.predictor = self.load_model(modelpath, use_gpu, use_mkldnn, combined) # 获取模型的输入输出 self.input_names = self.predictor.get_input_names() self.output_names = self.predictor.get_output_names() - self.input_tensor = self.predictor.get_input_tensor(self.input_names[0]) - self.output_tensor = self.predictor.get_output_tensor(self.output_names[0]) + self.input_handle = self.predictor.get_input_handle(self.input_names[0]) + self.output_handle = self.predictor.get_output_handle(self.output_names[0]) # 模型加载函数 - def load_model(self, modelpath, use_gpu): + def load_model(self, modelpath, use_gpu, use_mkldnn, combined): # 对运行位置进行配置 if use_gpu: try: - places = os.environ["CUDA_VISIBLE_DEVICES"] - places = int(places[0]) - except Exception as e: - print('Error: %s. Please set the environment variables "CUDA_VISIBLE_DEVICES".' % e) + int(os.environ.get('CUDA_VISIBLE_DEVICES')) + except Exception: + print('Error! Unable to use GPU. Please set the environment variables "CUDA_VISIBLE_DEVICES=GPU_id" to use GPU.') use_gpu = False - + # 加载模型参数 - config = AnalysisConfig(modelpath) + if combined: + model = os.path.join(modelpath, "__model__") + params = os.path.join(modelpath, "__params__") + config = Config(model, params) + else: + config = Config(modelpath) # 设置参数 - if use_gpu: - config.enable_use_gpu(100, places) + if use_gpu: + config.enable_use_gpu(100, 0) else: config.disable_gpu() - config.enable_mkldnn() + if use_mkldnn: + config.enable_mkldnn() config.disable_glog_info() config.switch_ir_optim(True) config.enable_memory_optim() config.switch_use_feed_fetch_ops(False) config.switch_specify_input_names(True) - + # 通过参数加载模型预测器 - predictor = create_paddle_predictor(config) - + predictor = create_predictor(config) + # 返回预测器 return predictor @@ -56,13 +60,14 @@ class Model(): # 遍历输入数据进行预测 for input_data in input_datas: - self.input_tensor.copy_from_cpu(input_data) - self.predictor.zero_copy_run() - output = self.output_tensor.copy_to_cpu() + inputs = input_data.copy() + self.input_handle.copy_from_cpu(inputs) + self.predictor.run() + output = self.output_handle.copy_to_cpu() outputs.append(output) - + # 预测结果合并 outputs = np.concatenate(outputs, 0) # 返回预测结果 - return outputs + return outputs \ No newline at end of file diff --git a/modules/image/Image_gan/style_transfer/animegan_v2_shinkai_53/module.py b/modules/image/Image_gan/style_transfer/animegan_v2_shinkai_53/module.py index e192d282597f62de423cb1b20a7171fd3fcf197e..c0158b5c69bdc30856de39d1d03449e59a13f2d0 100644 --- a/modules/image/Image_gan/style_transfer/animegan_v2_shinkai_53/module.py +++ b/modules/image/Image_gan/style_transfer/animegan_v2_shinkai_53/module.py @@ -13,16 +13,16 @@ from animegan_v2_shinkai_53.processor import base64_to_cv2, cv2_to_base64, Proce author="jm12138", # 作者名称 author_email="jm12138@qq.com", # 作者邮箱 summary="animegan_v2_shinkai_53", # 模型介绍 - version="1.0.0" # 版本号 + version="1.0.2" # 版本号 ) class Animegan_V2_Shinkai_53(Module): # 初始化函数 - def _initialize(self, use_gpu=False): + def __init__(self, name=None, use_gpu=False): # 设置模型路径 self.model_path = os.path.join(self.directory, "animegan_v2_shinkai_53") # 加载模型 - self.model = Model(self.model_path, use_gpu) + self.model = Model(modelpath=self.model_path, use_gpu=use_gpu, use_mkldnn=False, combined=False) # 关键点检测函数 def style_transfer(self, diff --git a/modules/image/keypoint_detection/hand_pose_localization/model.py b/modules/image/keypoint_detection/hand_pose_localization/model.py index 05c40ac75a69da72263c8f04bac2bba9ee1a36f1..81d177ea8dc02f6775fc9789ec7eb68eda485b4c 100644 --- a/modules/image/keypoint_detection/hand_pose_localization/model.py +++ b/modules/image/keypoint_detection/hand_pose_localization/model.py @@ -1,55 +1,56 @@ import os import numpy as np -from paddle.fluid.core import AnalysisConfig, create_paddle_predictor +from paddle.inference import create_predictor, Config __all__ = ['Model'] - class Model(): # 初始化函数 - def __init__(self, modelpath, use_gpu): + def __init__(self, modelpath, use_gpu=False, use_mkldnn=True, combined=True): # 加载模型预测器 - self.predictor = self.load_model(modelpath, use_gpu) + self.predictor = self.load_model(modelpath, use_gpu, use_mkldnn, combined) # 获取模型的输入输出 self.input_names = self.predictor.get_input_names() self.output_names = self.predictor.get_output_names() - self.input_tensor = self.predictor.get_input_tensor(self.input_names[0]) - self.output_tensor = self.predictor.get_output_tensor(self.output_names[0]) + self.input_handle = self.predictor.get_input_handle(self.input_names[0]) + self.output_handle = self.predictor.get_output_handle(self.output_names[0]) # 模型加载函数 - def load_model(self, modelpath, use_gpu): + def load_model(self, modelpath, use_gpu, use_mkldnn, combined): # 对运行位置进行配置 if use_gpu: try: - places = os.environ["CUDA_VISIBLE_DEVICES"] - places = int(places[0]) - except Exception as e: - print('Error: %s. Please set the environment variables "CUDA_VISIBLE_DEVICES".' % e) + int(os.environ.get('CUDA_VISIBLE_DEVICES')) + except Exception: + print('Error! Unable to use GPU. Please set the environment variables "CUDA_VISIBLE_DEVICES=GPU_id" to use GPU.') use_gpu = False - - # 预训练模型路径 - model = os.path.join(modelpath, "__model__") - params = os.path.join(modelpath, "__params__") - + # 加载模型参数 - config = AnalysisConfig(model, params) + if combined: + model = os.path.join(modelpath, "__model__") + params = os.path.join(modelpath, "__params__") + config = Config(model, params) + else: + config = Config(modelpath) # 设置参数 - if use_gpu: - config.enable_use_gpu(100, places) + if use_gpu: + config.enable_use_gpu(100, 0) else: config.disable_gpu() - config.enable_mkldnn() + if use_mkldnn: + config.enable_mkldnn() config.disable_glog_info() config.switch_ir_optim(True) + config.enable_memory_optim() config.switch_use_feed_fetch_ops(False) config.switch_specify_input_names(True) - + # 通过参数加载模型预测器 - predictor = create_paddle_predictor(config) - + predictor = create_predictor(config) + # 返回预测器 return predictor @@ -59,13 +60,14 @@ class Model(): # 遍历输入数据进行预测 for input_data in input_datas: - self.input_tensor.copy_from_cpu(input_data) - self.predictor.zero_copy_run() - output = self.output_tensor.copy_to_cpu() + inputs = input_data.copy() + self.input_handle.copy_from_cpu(inputs) + self.predictor.run() + output = self.output_handle.copy_to_cpu() outputs.append(output) - + # 预测结果合并 outputs = np.concatenate(outputs, 0) # 返回预测结果 - return outputs + return outputs \ No newline at end of file diff --git a/modules/image/keypoint_detection/hand_pose_localization/module.py b/modules/image/keypoint_detection/hand_pose_localization/module.py index 26ff7f292ef17f77c0060d9e9c6cb48f46b55a4d..c855319f154e96b0ca5170075012b4f672161f1a 100644 --- a/modules/image/keypoint_detection/hand_pose_localization/module.py +++ b/modules/image/keypoint_detection/hand_pose_localization/module.py @@ -14,16 +14,16 @@ from hand_pose_localization.processor import base64_to_cv2, Processor author="jm12138", # 作者名称 author_email="jm12138@qq.com", # 作者邮箱 summary="hand_pose_localization", # 模型介绍 - version="1.0.0" # 版本号 + version="1.0.2" # 版本号 ) class Hand_Pose_Localization(Module): # 初始化函数 - def _initialize(self, use_gpu=False): + def __init__(self, name=None, use_gpu=False): # 设置模型路径 self.model_path = os.path.join(self.directory, "hand_pose_localization") # 加载模型 - self.model = Model(self.model_path, use_gpu) + self.model = Model(modelpath=self.model_path, use_gpu=use_gpu, use_mkldnn=False, combined=True) # 关键点检测函数 def keypoint_detection(self, images=None, paths=None, batch_size=1, output_dir='output', visualization=False):