提交 11f68e99 编写于 作者: Z zhangyang0701

fix bug for multiple predictions for FPGA track

上级 67d1a8a4
......@@ -469,7 +469,7 @@ void Executor<Device, T>::FeedData(const std::vector<void *> &v) {
PADDLE_MOBILE_ENFORCE(input_size == vars.size(),
"input data number not correct");
for (int i = 0; i < input_size; i++) {
auto var = program_.scope->Var("feed", i);
auto var = vars[i];
auto feed_tensor = var->template GetMutable<LoDTensor>();
feed_tensor->external_data = v[i];
}
......@@ -482,7 +482,7 @@ void Executor<Device, T>::FeedTensorData(const vector<framework::Tensor> &v) {
PADDLE_MOBILE_ENFORCE(input_size == vars.size(),
"input data number not correct");
for (int i = 0; i < input_size; i++) {
auto var = program_.scope->Var("feed", i);
auto var = vars[i];
auto feed_tensor = var->template GetMutable<LoDTensor>();
feed_tensor->ShareDataWith(v[i]);
}
......@@ -495,8 +495,9 @@ void Executor<Device, T>::GetResults(std::vector<void *> *v) {
auto vars = program_.scope->VarContain("fetch");
PADDLE_MOBILE_ENFORCE(output_size == vars.size(),
"output data number not correct");
for (int i = 0; i < output_size; i++) {
auto var = program_.scope->Var("fetch", i);
auto var = vars[i];
auto fetch_tensor = var->template GetMutable<LoDTensor>();
(*v)[i] = fetch_tensor->template data<float>();
}
......@@ -509,7 +510,7 @@ void Executor<Device, T>::GetTensorResults(
auto output_size = vars.size();
for (int i = 0; i < output_size; i++) {
auto var = program_.scope->Var("fetch", i);
auto var = vars[i];
auto fetch_tensor = var->template GetMutable<LoDTensor>();
v->push_back(fetch_tensor);
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册