部署在线服务报错,使用load_inference_model可以成功
已关闭
部署在线服务报错,使用load_inference_model可以成功
Created by: hubu-wangpei
paddle1.7版本 使用load_inference_model,加载自己保存的模型,成功。使用load_inference_model加载模型的代码如下,模型输入数据为256×256大小的pgm灰度图像,输入Tensor Shape为[1,1,256,256]
from paddle.fluid.dygraph import TracedLayer
import cv2
import random
import numpy as np
from scipy import misc
import os
import paddle
import paddle.fluid as fluid
from paddle.fluid.layer_helper import LayerHelper
from paddle.fluid.dygraph.nn import Conv2D, Pool2D, BatchNorm, Linear
from paddle.fluid.dygraph.base import to_variable
from paddle.fluid.initializer import NumpyArrayInitializer
def data_loader(filename):
filethpath="/home/aistudio/suni/test"
full_filename = os.path.join(filethpath, filename)
img = misc.imread(full_filename,0)
img = img[np.newaxis, :]
img = img[np.newaxis, :]
img = img.astype('float32')
return img
place = fluid.CPUPlace()
main_prog = fluid.Program()
startup_prog = fluid.Program()
exe = fluid.Executor(place)
exe.run(startup_prog)
path = "./saved_infer_model"
filename="6_0.pgm"
img = data_loader(filename)
[inference_program, feed_target_names, fetch_targets] = (fluid.io.load_inference_model(dirname=path, executor=exe))
results = exe.run(inference_program,
feed={feed_target_names[0]: img},
fetch_list=fetch_targets)
print(results)
print(feed_target_names)
但是使用在线部署的时候报错
Traceback (most recent call last):
File "/home/work/serving/webserver/home/views.py", line 48, in index
out = serving.infer(json.loads(req))
File "/home/work/serving/webserver/home/serving.py", line 24, in infer
return_numpy=False
File "/usr/local/lib/python3.5/site-packages/paddle/fluid/executor.py", line 657, in run
use_program_cache=use_program_cache)
File "/usr/local/lib/python3.5/site-packages/paddle/fluid/executor.py", line 751, in _run
fetch_var_name=fetch_var_name)
File "/usr/local/lib/python3.5/site-packages/paddle/fluid/executor.py", line 419, in _add_feed_fetch_ops
if not has_feed_operators(global_block, feed, feed_var_name):
File "/usr/local/lib/python3.5/site-packages/paddle/fluid/executor.py", line 158, in has_feed_operators
format(feed_target_name))
Exception: 'feed_targets' does not have feed_0 variable
预测时候的的数据转换器如下
# -*- coding: utf-8 -*-
"""
Hook
"""
import os
import sys
sys.path.append("..")
from PIL import Image
import numpy as np
import paddle.fluid as fluid
from home.utility import base64_to_image
def reader_infer(data_args):
def reader():
image_shape = [1, 1, 256,256]
image = fluid.layers.data(name='image', shape=image_shape, dtype='float32')
feeder = fluid.DataFeeder(place=fluid.CPUPlace(), feed_list=[image])
img = base64_to_image(data_args["img"])
img=np.array(img)
img = img[np.newaxis, :]
img = img[np.newaxis, :]
img = img.astype('float32')
return img,feeder
return reader
"""
Hook
"""
def output(results, data_args):
"""
示例为目标检测的输出hook
将模型预测的结果,转换为直线坐标
:param results 模型预测结果
:param data_args: 接口传入的数据,以k-v形式
:return array 需要能被json_encode的数据格式
"""
lines = []
for dt in results:
lines.append(str(dt))
return lines
请注册或登录再回复