提交 f18f4bbd 编写于 作者: Z zhangyang0701

change function names to avoid ambiguity for FPGA track

上级 17c42bdf
......@@ -476,7 +476,7 @@ void Executor<Device, T>::FeedData(const std::vector<void *> &v) {
}
template <typename Device, typename T>
void Executor<Device, T>::FeedData(const vector<framework::Tensor> &v) {
void Executor<Device, T>::FeedTensorData(const vector<framework::Tensor> &v) {
auto input_size = v.size();
auto vars = program_.scope->VarContain("feed");
PADDLE_MOBILE_ENFORCE(input_size == vars.size(),
......@@ -503,7 +503,8 @@ void Executor<Device, T>::GetResults(std::vector<void *> *v) {
}
template <typename Device, typename T>
void Executor<Device, T>::GetResults(std::vector<framework::Tensor *> *v) {
void Executor<Device, T>::GetTensorResults(
std::vector<framework::Tensor *> *v) {
auto output_size = v->size();
PADDLE_MOBILE_ENFORCE(output_size > 0, "Empty output");
auto vars = program_.scope->VarContain("fetch");
......
......@@ -53,10 +53,10 @@ class Executor {
void InjectVariable(const Tensor &t, std::string var_name);
void FeedData(const Tensor &t);
void FeedData(const std::vector<void *> &v);
void FeedData(const std::vector<framework::Tensor> &v);
void FeedTensorData(const std::vector<framework::Tensor> &v);
void GetResults(std::vector<void *> *v);
void GetResults(std::vector<framework::Tensor *> *v);
void GetTensorResults(std::vector<framework::Tensor *> *v);
std::shared_ptr<Tensor> FetchResult(int id = -1);
void Predict_From_To(int start = 0, int end = -1);
......
......@@ -145,7 +145,7 @@ void PaddleMobilePredictor<Device, T>::FeedPaddleTensors(
tensors[i].init(typeid(float));
ConvertPaddleTensors(inputs[i], &tensors[i]);
}
paddle_mobile_->FeedData(tensors);
paddle_mobile_->FeedTensorData(tensors);
}
template <typename Device, typename T>
......@@ -154,7 +154,7 @@ void PaddleMobilePredictor<Device, T>::FetchPaddleTensors(
auto num = outputs->size();
PADDLE_MOBILE_ENFORCE(num > 0, "0 output pointers is not permitted");
std::vector<framework::Tensor *> tensors(num, nullptr);
paddle_mobile_->GetResults(&tensors);
paddle_mobile_->GetTensorResults(&tensors);
for (int i = 0; i < num; i++) {
ConvertTensors(*tensors[i], &(*outputs)[i]);
}
......
......@@ -233,9 +233,9 @@ void PaddleMobile<Device, T>::FeedData(const std::vector<void *> &v) {
};
template <typename Device, typename T>
void PaddleMobile<Device, T>::FeedData(
void PaddleMobile<Device, T>::FeedTensorData(
const std::vector<framework::Tensor> &v) {
executor_->FeedData(v);
executor_->FeedTensorData(v);
};
template <typename Device, typename T>
......@@ -244,8 +244,9 @@ void PaddleMobile<Device, T>::GetResults(std::vector<void *> *v) {
}
template <typename Device, typename T>
void PaddleMobile<Device, T>::GetResults(std::vector<framework::Tensor *> *v) {
executor_->GetResults(v);
void PaddleMobile<Device, T>::GetTensorResults(
std::vector<framework::Tensor *> *v) {
executor_->GetTensorResults(v);
}
template <typename Device, typename T>
......
......@@ -91,10 +91,10 @@ class PaddleMobile {
void InjectVariable(const framework::Tensor &t, std::string var_name);
void FeedData(const framework::Tensor &t);
void FeedData(const std::vector<void *> &v);
void FeedData(const std::vector<framework::Tensor> &v);
void FeedTensorData(const std::vector<framework::Tensor> &v);
void GetResults(std::vector<void *> *v);
void GetResults(std::vector<framework::Tensor *> *v);
void GetTensorResults(std::vector<framework::Tensor *> *v);
std::shared_ptr<framework::Tensor> FetchResult(int id = -1);
void Predict_From_To(int start = 0, int end = -1);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册