未验证 提交 09ec8398 编写于 作者: Z zhangyang0701 提交者: GitHub

Merge pull request #1480 from zhangyang0701/develop

add new interface function for FPGA track close #1479
......@@ -505,18 +505,23 @@ void Executor<Device, T>::GetResults(std::vector<void *> *v) {
template <typename Device, typename T>
void Executor<Device, T>::GetTensorResults(
std::vector<framework::Tensor *> *v) {
auto output_size = v->size();
PADDLE_MOBILE_ENFORCE(output_size > 0, "Empty output");
auto vars = program_.scope->VarContain("fetch");
PADDLE_MOBILE_ENFORCE(output_size == vars.size(),
"output data number not correct");
auto output_size = vars.size();
for (int i = 0; i < output_size; i++) {
auto var = program_.scope->Var("fetch", i);
auto fetch_tensor = var->template GetMutable<LoDTensor>();
(*v)[i] = fetch_tensor;
v->push_back(fetch_tensor);
}
}
template <typename Device, typename T>
framework::Tensor *Executor<Device, T>::GetTensorByName(
const std::string &name) {
auto var = program_.scope->Var(name);
return var->template GetMutable<LoDTensor>();
};
template <typename Device, typename T>
std::shared_ptr<Tensor> Executor<Device, T>::FetchResult(int id) {
auto &ops = ops_of_block_[0];
......
......@@ -57,6 +57,7 @@ class Executor {
void GetResults(std::vector<void *> *v);
void GetTensorResults(std::vector<framework::Tensor *> *v);
framework::Tensor *GetTensorByName(const std::string &name);
std::shared_ptr<Tensor> FetchResult(int id = -1);
void Predict_From_To(int start = 0, int end = -1);
......
......@@ -151,15 +151,26 @@ void PaddleMobilePredictor<Device, T>::FeedPaddleTensors(
template <typename Device, typename T>
void PaddleMobilePredictor<Device, T>::FetchPaddleTensors(
std::vector<PaddleTensor> *outputs) {
auto num = outputs->size();
PADDLE_MOBILE_ENFORCE(num > 0, "0 output pointers is not permitted");
std::vector<framework::Tensor *> tensors(num, nullptr);
// auto num = outputs->size();
// PADDLE_MOBILE_ENFORCE(num > 0, "0 output pointers is not permitted");
// std::vector<framework::Tensor *> tensors(num, nullptr);
outputs->clear();
std::vector<framework::Tensor *> tensors;
paddle_mobile_->GetTensorResults(&tensors);
auto num = tensors.size();
outputs->resize(num, PaddleTensor());
for (int i = 0; i < num; i++) {
ConvertTensors(*tensors[i], &(*outputs)[i]);
}
}
template <typename Device, typename T>
void PaddleMobilePredictor<Device, T>::GetPaddleTensor(const std::string &name,
PaddleTensor *output) {
framework::Tensor *t = paddle_mobile_->GetTensorByName(name);
ConvertTensors(*t, output);
};
template <typename Device, typename T>
void PaddleMobilePredictor<Device, T>::FeedData(
const std::vector<void *> &inputs) {
......
......@@ -37,6 +37,8 @@ class PaddleMobilePredictor : public PaddlePredictor {
void Predict_From_To(int start, int end) override;
void FeedPaddleTensors(const std::vector<PaddleTensor>& inputs) override;
void FetchPaddleTensors(std::vector<PaddleTensor>* outputs) override;
void GetPaddleTensor(const std::string& name, PaddleTensor* output) override;
#endif
~PaddleMobilePredictor() override;
......
......@@ -27,8 +27,6 @@ limitations under the License. */
#include <typeindex>
#include <vector>
// #define PADDLE_MOBILE_FPGA
namespace paddle_mobile {
#ifdef PADDLE_MOBILE_FPGA
......@@ -133,6 +131,8 @@ class PaddlePredictor {
virtual void Predict_From_To(int start, int end) = 0;
virtual void FeedPaddleTensors(const std::vector<PaddleTensor>& inputs) = 0;
virtual void FetchPaddleTensors(std::vector<PaddleTensor>* outputs) = 0;
virtual void GetPaddleTensor(const std::string& name,
PaddleTensor* output) = 0;
#endif
protected:
......
......@@ -249,6 +249,12 @@ void PaddleMobile<Device, T>::GetTensorResults(
executor_->GetTensorResults(v);
}
template <typename Device, typename T>
framework::Tensor *PaddleMobile<Device, T>::GetTensorByName(
const std::string &name) {
return executor_->GetTensorByName(name);
};
template <typename Device, typename T>
std::shared_ptr<framework::Tensor> PaddleMobile<Device, T>::FetchResult(
int id) {
......
......@@ -95,6 +95,7 @@ class PaddleMobile {
void GetResults(std::vector<void *> *v);
void GetTensorResults(std::vector<framework::Tensor *> *v);
framework::Tensor *GetTensorByName(const std::string &name);
std::shared_ptr<framework::Tensor> FetchResult(int id = -1);
void Predict_From_To(int start = 0, int end = -1);
......
......@@ -133,39 +133,16 @@ int main() {
readStream(g_image_src_float, reinterpret_cast<char *>(img));
std::vector<void *> v(3, nullptr);
paddle_mobile.FeedData({img_info, img});
paddle_mobile.FeedData(std::vector<void *>({img_info, img}));
paddle_mobile.Predict_To(-1);
for (int i = 55; i < 69; i++) {
for (int i = 65; i < 69; i++) {
auto tensor_ptr = paddle_mobile.FetchResult(i);
std::string saveName = "rfcn_" + std::to_string(i);
// if(i != 58)
paddle_mobile::fpga::fpga_invalidate((*tensor_ptr).get_data(),
tensor_ptr->numel() * sizeof(float));
// tensor_ptr->numel() * sizeof(float));
if ((i == 48) || (i == 47)) {
dump_stride(saveName, (*tensor_ptr), 20,
false); // 20);//tensor_ptr->numel());
} else if (i == 55) {
dump_stride(saveName, (*tensor_ptr), tensor_ptr->numel(),
true); // 20);//tensor_ptr->numel());
} else {
dump_stride(saveName, (*tensor_ptr), tensor_ptr->numel(),
true); // 20);//tensor_ptr->numel());
}
/* float result = 0;
std::string str = "softmax_input_data";
float* data =
static_cast<float*>(fpga::fpga_malloc(tensor_ptr->numel() *
sizeof(float))); str = "softmax_output_data"; auto output_ptr =
static_cast<half*>((*tensor_ptr).get_data()); for (int idx = 0; idx <
tensor_ptr->numel(); ++idx)
{
data[idx] = fpga::fp16_2_fp32(output_ptr[idx]);
dump_stride(saveName, (*tensor_ptr), tensor_ptr->numel(), true);
}
fpga::savefile<float>(str,data, tensor_ptr->numel(), result ); */
}
// paddle_mobile.GetResults(&v);
DLOG << "Computation done";
fpga::fpga_free(img);
......
......@@ -12,6 +12,9 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#ifndef PADDLE_MOBILE_FPGA
#define PADDLE_MOBILE_FPGA
#endif
#include <fstream>
#include <iostream>
#include "../../src/io/paddle_inference_api.h"
......@@ -59,14 +62,14 @@ int main() {
CreatePaddlePredictor<PaddleMobileConfig,
PaddleEngineKind::kPaddleMobile>(config);
std::cout << "after loading model" << std::endl;
std::cout << "Finishing loading model" << std::endl;
float img_info[3] = {768, 1536, 768.0f / 960.0f};
int img_length = 768 * 1536 * 3;
auto img = reinterpret_cast<float *>(fpga_malloc(img_length * sizeof(float)));
readStream(g_image, reinterpret_cast<char *>(img));
std::cout << "after initializing data" << std::endl;
std::cout << "Finishing initializing data" << std::endl;
/*
predictor->FeedData({img_info, img});
predictor->Predict_From_To(0, -1);
......@@ -110,8 +113,10 @@ int main() {
predictor->Predict_From_To(0, -1);
std::cout << "Finishing predicting " << std::endl;
std::vector<PaddleTensor> v(3, PaddleTensor());
predictor->FetchPaddleTensors(&v);
std::vector<PaddleTensor> v; // No need to initialize v
predictor->FetchPaddleTensors(&v); // Old data in v will be cleared
std::cout << "Output number is " << v.size() << std::endl;
auto post_nms = v[0].data.length() / sizeof(float) / 8;
for (int num = 0; num < post_nms; num++) {
for (int i = 0; i < 8; i++) {
......@@ -131,5 +136,14 @@ int main() {
std::cout << p[num * 4 + i] << std::endl;
}
}
std::cout << "Finish getting vector values" << std::endl;
PaddleTensor tensor;
predictor->GetPaddleTensor("fetch2", &tensor);
for (int i = 0; i < post_nms; i++) {
auto p = reinterpret_cast<float *>(tensor.data.data());
std::cout << p[+i] << std::endl;
}
return 0;
}
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册