model.py 2.3 KB
Newer Older
jm_12138's avatar
jm_12138 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70
import os
import numpy as np

from paddle.fluid.core import AnalysisConfig, create_paddle_predictor

__all__ = ['Model']

class Model():
    # 初始化函数
    def __init__(self, modelpath, use_gpu):
        # 加载模型预测器
        self.predictor = self.load_model(modelpath, use_gpu)

        # 获取模型的输入输出
        self.input_names = self.predictor.get_input_names()
        self.output_names = self.predictor.get_output_names()
        self.input_tensor = self.predictor.get_input_tensor(self.input_names[0])
        self.output_tensor = self.predictor.get_output_tensor(self.output_names[0])

    # 模型加载函数
    def load_model(self, modelpath, use_gpu):
        # 对运行位置进行配置
        if use_gpu:
            try:
                places = os.environ["CUDA_VISIBLE_DEVICES"]
                places = int(places[0])
            except Exception as e:
                print('Error: %s. Please set the environment variables "CUDA_VISIBLE_DEVICES".' % e)
                use_gpu = False
        
        # 预训练模型路径
        model = os.path.join(modelpath, "__model__")
        params = os.path.join(modelpath, "__params__")

        # 加载模型参数
        config = AnalysisConfig(model, params)

        # 设置参数
        if use_gpu:   
            config.enable_use_gpu(100, places)
        else:
            config.disable_gpu()
            config.enable_mkldnn()
        config.disable_glog_info()
        config.switch_ir_optim(True)
        config.switch_use_feed_fetch_ops(False)
        config.switch_specify_input_names(True)
        
        # 通过参数加载模型预测器
        predictor = create_paddle_predictor(config)
        
        # 返回预测器
        return predictor

    # 模型预测函数
    def predict(self, input_datas):
        outputs = []

        # 遍历输入数据进行预测
        for input_data in input_datas:
            self.input_tensor.copy_from_cpu(input_data)
            self.predictor.zero_copy_run()
            output = self.output_tensor.copy_to_cpu()
            outputs.append(output)
        
        # 预测结果合并
        outputs = np.concatenate(outputs, 0)

        # 返回预测结果
        return outputs