提交 1caff3fd 编写于 作者: L lubin10

update some code and README.md

上级 a37659eb
...@@ -9,8 +9,9 @@ endif ...@@ -9,8 +9,9 @@ endif
${info ARM_ABI: ${ARM_ABI}} ${info ARM_ABI: ${ARM_ABI}}
${info ARM_PLAT: ${ARM_PLAT}; option[arm7/arm8]} ${info ARM_PLAT: ${ARM_PLAT}; option[arm7/arm8]}
LITE_ROOT=libs/inference_lite_lib.android.armv8 include ../Makefile.def
include ${LITE_ROOT}/demo/cxx/Makefile.def
LITE_ROOT=../../../
${info LITE_ROOT: $(abspath ${LITE_ROOT})} ${info LITE_ROOT: $(abspath ${LITE_ROOT})}
THIRD_PARTY_DIR=third_party THIRD_PARTY_DIR=third_party
......
...@@ -92,9 +92,9 @@ PaddleClas 提供了转换并优化后的推理模型,可以直接参考下方 ...@@ -92,9 +92,9 @@ PaddleClas 提供了转换并优化后的推理模型,可以直接参考下方
```shell ```shell
# 进入lite_ppshitu目录 # 进入lite_ppshitu目录
cd $PaddleClas/deploy/lite_shitu cd $PaddleClas/deploy/lite_shitu
wget https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/lite/ppshitu_lite_models_v1.0.tar.gz wget https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/lite/ppshitu_lite_models_v1.0.tar
tar -xf ppshitu_lite_models_v1.0.tar.gz tar -xf ppshitu_lite_models_v1.0.tar
rm -f ppshitu_lite_models_v1.0.tar.gz rm -f ppshitu_lite_models_v1.0.tar
``` ```
#### 2.1.2 使用其他模型 #### 2.1.2 使用其他模型
...@@ -312,7 +312,9 @@ chmod 777 pp_shitu ...@@ -312,7 +312,9 @@ chmod 777 pp_shitu
运行效果如下: 运行效果如下:
``` ```
```` images/demo.jpg:
result0: bbox[253, 275, 1146, 872], score: 0.974196, label: 伊藤园_果蔬汁
```
## FAQ ## FAQ
Q1:如果想更换模型怎么办,需要重新按照流程走一遍吗? Q1:如果想更换模型怎么办,需要重新按照流程走一遍吗?
......
...@@ -45,6 +45,7 @@ public: ...@@ -45,6 +45,7 @@ public:
LoadIndexFile(); LoadIndexFile();
this->I.resize(this->return_k * this->max_query_number); this->I.resize(this->return_k * this->max_query_number);
this->D.resize(this->return_k * this->max_query_number); this->D.resize(this->return_k * this->max_query_number);
printf("faiss index load success!\n");
}; };
void LoadIdMap(); void LoadIdMap();
......
...@@ -51,7 +51,6 @@ void FeatureExtract::RunRecModel(const cv::Mat &img, ...@@ -51,7 +51,6 @@ void FeatureExtract::RunRecModel(const cv::Mat &img,
for (auto dim : output_tensor->shape()) { for (auto dim : output_tensor->shape()) {
output_size *= dim; output_size *= dim;
} }
std::cout << "output len is: " << output_size << std::endl;
feature.resize(output_size); feature.resize(output_size);
output_tensor->CopyToCpu(feature.data()); output_tensor->CopyToCpu(feature.data());
......
...@@ -208,9 +208,9 @@ int main(int argc, char **argv) { ...@@ -208,9 +208,9 @@ int main(int argc, char **argv) {
RT_Config["Global"]["max_det_results"].as<int>(), false, &det); RT_Config["Global"]["max_det_results"].as<int>(), false, &det);
// add the whole image for recognition to improve recall // add the whole image for recognition to improve recall
PPShiTu::ObjectResult result_whole_img = { // PPShiTu::ObjectResult result_whole_img = {
{0, 0, srcimg.cols, srcimg.rows}, 0, 1.0}; // {0, 0, srcimg.cols, srcimg.rows}, 0, 1.0};
det_result.push_back(result_whole_img); // det_result.push_back(result_whole_img);
// get rec result // get rec result
PPShiTu::SearchResult search_result; PPShiTu::SearchResult search_result;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册