Skip to content

  • 体验新版
    • 正在加载...
  • 登录
  • PaddlePaddle
  • Paddle
  • Issue
  • #23341

P
Paddle
  • 项目概览

PaddlePaddle / Paddle
大约 2 年 前同步成功

通知 2325
Star 20933
Fork 5424
  • 代码
    • 文件
    • 提交
    • 分支
    • Tags
    • 贡献者
    • 分支图
    • Diff
  • Issue 1423
    • 列表
    • 看板
    • 标记
    • 里程碑
  • 合并请求 543
  • Wiki 0
    • Wiki
  • 分析
    • 仓库
    • DevOps
  • 项目成员
  • Pages
P
Paddle
  • 项目概览
    • 项目概览
    • 详情
    • 发布
  • 仓库
    • 仓库
    • 文件
    • 提交
    • 分支
    • 标签
    • 贡献者
    • 分支图
    • 比较
  • Issue 1,423
    • Issue 1,423
    • 列表
    • 看板
    • 标记
    • 里程碑
  • 合并请求 543
    • 合并请求 543
  • Pages
  • 分析
    • 分析
    • 仓库分析
    • DevOps
  • Wiki 0
    • Wiki
  • 成员
    • 成员
  • 收起侧边栏
  • 动态
  • 分支图
  • 创建新Issue
  • 提交
  • Issue看板
已关闭
开放中
Opened 3月 31, 2020 by saxon_zh@saxon_zhGuest

python 预测api出错但load_inference_model没问题

Created by: Liuhan703

  • 版本、环境信息:    1)python 3.6 PaddlePaddle==1.7.1 CPU预测
  • 问题描述:save__inference_model后调用load_inference_model预测没问题,但用python预测api出错 组网代码:
import numpy as np
import paddle.fluid as fluid

class Model(object):
    def __init__(self):
        self.save_path = './tmp_model'
        self.place = fluid.CPUPlace()
        self.exe = fluid.Executor(self.place)
        self.start_program = fluid.default_startup_program()
        self.main_program = fluid.default_main_program()
        self.infer_scope = fluid.core.Scope()
        with fluid.scope_guard(self.infer_scope):
            with fluid.program_guard(self.main_program, self.start_program):
                # create model
                self.word = fluid.layers.data(name="words", shape=[1], dtype='int64', lod_level=1)
                self.lod_sent = fluid.layers.data(name="lod_sent", shape=[1], dtype="int64", lod_level=1)
                self.label = fluid.layers.data(name="label", shape=[1], dtype="int64", lod_level=1)
                self.out = fluid.layers.embedding(
                                input=self.word,
                                size=[3, 5],
                                dtype='float32',
                                param_attr=fluid.ParamAttr(
                                    name="word_emb",
                                    learning_rate=5))
                self.out = fluid.layers.sequence_pool(self.out, pool_type="average")
                self.out = fluid.layers.lod_reset(x=self.out, y=self.lod_sent)
                self.out = fluid.layers.fc(input=self.out, size=3)
                #metric
                self.cost = fluid.layers.softmax_with_cross_entropy(logits=self.out, label=self.label)
                self.avg_cost = fluid.layers.mean(self.cost)
                self.acc = fluid.layers.accuracy(input=self.out, label=self.label)
                #clone main program
                self.infer_program = self.main_program.clone(for_test=True)
                self.optimizer = fluid.optimizer.Adam(learning_rate=1.5e-3)
                self.optimizer.minimize(self.avg_cost)
                self.exe.run(self.start_program)
                self.fetch_list = [self.out, self.avg_cost, self.acc]
                #self.fetch_list = [self.out, self.label]
    def run(self, batch_words=[[[1], [0], [2]], [[0],[1]]], labels=[[1],[2]]):
        words_lod = []
        words_idx = sum(batch_words, [])
        for words in batch_words:
            words_lod.append(len(words))
        sent_lod = [len(batch_words)]
        # create lod tensor
        word_tensor = fluid.create_lod_tensor(
                np.array(words_idx).astype("int64"), [words_lod], self.place)
        sent_tensor = fluid.create_lod_tensor(np.ones(sum(sent_lod)), [sent_lod], self.place)
        label_tensor = fluid.create_lod_tensor(
                np.array(labels).astype("int64"), [sent_lod], self.place)
        # run
        with fluid.scope_guard(self.infer_scope):
            results, avg_cost, acc = self.exe.run(self.main_program,
                        feed={'words': word_tensor, "lod_sent": sent_tensor, "label": label_tensor},
                        fetch_list=self.fetch_list,
                        return_numpy=False)
            print(results)
            fluid.io.save_inference_model(self.save_path, ["words", "lod_sent"],
                                            self.out, self.exe, main_program=self.infer_program,
                                                model_filename="__model__", params_filename="__params__")

if __name__ == "__main__":
    m = Model()
    m.run()

预测代码:


#!/usr/bin/env python
# coding=utf-8
import os
import numpy as np
import paddle.fluid as fluid

class Model(object):
    """ label model """
    def __init__(self):
        """ init model """
        self.load_path = './tmp_model'
        config = fluid.core.AnalysisConfig(os.path.join(self.load_path, "__model__"), os.path.join(self.load_path, "__params__"))
        config.disable_gpu()
        # 创建PaddlePredictor
        self.predictor = fluid.core.create_paddle_predictor(config)


    def infer(self, batch_words=[[[1],], [[0],]], labels=[[1],[2]]):
        words_lod = []
        words_idx = sum(batch_words, [])
        for words in batch_words:
            words_lod.append(len(words))
        sent_lod = [len(batch_words)]
        # create tensor
        word_tensor = fluid.core.PaddleTensor(data=np.array(words_idx).astype("int64"), lod=[words_lod], name="words")
        sent_tensor = fluid.core.PaddleTensor(data=np.ones(sum(sent_lod)), lod=[sent_lod], name="lod_sent")

        self.predictor.run([word_tensor, sent_tensor])


if __name__ == "__main__":
    m = Model()
    m.infer()

直接用load_inference_model加载模型run没有问题,使用create_paddle_predictor出现错误:


I0331 15:31:03.994108 2480636800 analysis_predictor.cc:462] ======= optimize end =======
W0331 15:31:03.996776 2480636800 naive_executor.cc:45] The NaiveExecutor can not work properly if the cmake flag ON_INFER is not set.
W0331 15:31:03.996803 2480636800 naive_executor.cc:47] Unlike the training phase, all the scopes and variables will be reused to save the allocation overhead.
W0331 15:31:03.996809 2480636800 naive_executor.cc:50] Please re-compile the inference library by setting the cmake flag ON_INFER=ON if you are running Paddle Inference
Traceback (most recent call last):
  File "infer_api.py", line 33, in <module>
    m.infer()
  File "infer_api.py", line 28, in infer
    self.predictor.run([word_tensor, sent_tensor])
paddle.fluid.core_avx.EnforceNotMet: 

--------------------------------------------
C++ Call Stacks (More useful to developers):
--------------------------------------------
0   std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> > paddle::platform::GetTraceBackString<std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> > const&>(std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> > const&&&, char const*, int)
1   paddle::operators::LoDResetKernel<paddle::platform::CPUPlace, float>::Compute(paddle::framework::ExecutionContext const&) const
2   std::__1::__function::__func<paddle::framework::OpKernelRegistrarFunctor<paddle::platform::CPUPlace, false, 0ul, paddle::operators::LoDResetKernel<paddle::platform::CPUPlace, float>, paddle::operators::LoDResetKernel<paddle::platform::CPUPlace, double>, paddle::operators::LoDResetKernel<paddle::platform::CPUPlace, int>, paddle::operators::LoDResetKernel<paddle::platform::CPUPlace, long long> >::operator()(char const*, char const*, int) const::'lambda'(paddle::framework::ExecutionContext const&), std::__1::allocator<paddle::framework::OpKernelRegistrarFunctor<paddle::platform::CPUPlace, false, 0ul, paddle::operators::LoDResetKernel<paddle::platform::CPUPlace, float>, paddle::operators::LoDResetKernel<paddle::platform::CPUPlace, double>, paddle::operators::LoDResetKernel<paddle::platform::CPUPlace, int>, paddle::operators::LoDResetKernel<paddle::platform::CPUPlace, long long> >::operator()(char const*, char const*, int) const::'lambda'(paddle::framework::ExecutionContext const&)>, void (paddle::framework::ExecutionContext const&)>::operator()(paddle::framework::ExecutionContext const&)
3   paddle::framework::OperatorWithKernel::RunImpl(paddle::framework::Scope const&, paddle::platform::Place const&, paddle::framework::RuntimeContext*) const
4   paddle::framework::OperatorWithKernel::RunImpl(paddle::framework::Scope const&, paddle::platform::Place const&) const
5   paddle::framework::OperatorBase::Run(paddle::framework::Scope const&, paddle::platform::Place const&)
6   paddle::framework::NaiveExecutor::Run()
7   paddle::AnalysisPredictor::Run(std::__1::vector<paddle::PaddleTensor, std::__1::allocator<paddle::PaddleTensor> > const&, std::__1::vector<paddle::PaddleTensor, std::__1::allocator<paddle::PaddleTensor> >*, int)
8   void pybind11::cpp_function::initialize<paddle::pybind::(anonymous namespace)::BindAnalysisPredictor(pybind11::module*)::$_12, std::__1::vector<paddle::PaddleTensor, std::__1::allocator<paddle::PaddleTensor> >, paddle::AnalysisPredictor&, std::__1::vector<paddle::PaddleTensor, std::__1::allocator<paddle::PaddleTensor> > const&, pybind11::name, pybind11::is_method, pybind11::sibling>(paddle::pybind::(anonymous namespace)::BindAnalysisPredictor(pybind11::module*)::$_12&&, std::__1::vector<paddle::PaddleTensor, std::__1::allocator<paddle::PaddleTensor> > (*)(paddle::AnalysisPredictor&, std::__1::vector<paddle::PaddleTensor, std::__1::allocator<paddle::PaddleTensor> > const&), pybind11::name const&, pybind11::is_method const&, pybind11::sibling const&)::'lambda'(pybind11::detail::function_call&)::__invoke(pybind11::detail::function_call&)
9   pybind11::cpp_function::dispatcher(_object*, _object*, _object*)

------------------------------------------
Python Call Stacks (More useful to users):
------------------------------------------
  File "/Users/liuhan09/anaconda3/lib/python3.7/site-packages/paddle/fluid/framework.py", line 1771, in append_op
    attrs=kwargs.get("attrs", None))
  File "/Users/liuhan09/anaconda3/lib/python3.7/site-packages/paddle/fluid/layer_helper.py", line 43, in append_op
    return self.main_program.current_block().append_op(*args, **kwargs)
  File "/Users/liuhan09/anaconda3/lib/python3.7/site-packages/paddle/fluid/layers/nn.py", line 7030, in lod_reset
    'Y': y}, outputs={'Out': out})
  File "model.py", line 30, in __init__
    self.out = fluid.layers.lod_reset(x=self.out, y=self.lod_sent)
  File "model.py", line 68, in <module>
    m = Model()

----------------------
Error Message Summary:
----------------------
Error: Last value of `Y`'s last level LoD should be equal to the first dimension of `X`
  [Hint: Expected (int64_t)(last_level.back()) == in->dims()[0], but received (int64_t)(last_level.back()):2 != in->dims()[0]:1.] at (/home/teamcity/work/ef54dc8a5b211854/paddle/fluid/operators/lod_reset_op.h:43)
  [operator < lod_reset > error]
指派人
分配到
无
里程碑
无
分配里程碑
工时统计
无
截止日期
无
标识: paddlepaddle/Paddle#23341
渝ICP备2023009037号

京公网安备11010502055752号

网络110报警服务 Powered by GitLab CE v13.7
开源知识
Git 入门 Pro Git 电子书 在线学 Git
Markdown 基础入门 IT 技术知识开源图谱
帮助
使用手册 反馈建议 博客
《GitCode 隐私声明》 《GitCode 服务条款》 关于GitCode
Powered by GitLab CE v13.7