diff --git a/README.md b/README.md index 69362734116fd8af78442a07dd31600aa46b7935..91771f1af2fb8dcf3d595aa8d0dc362834290544 100644 --- a/README.md +++ b/README.md @@ -27,7 +27,7 @@ Paddle-Moible是PaddlePaddle组织下的项目,是一个致力于嵌入式平 - **ARM CPU** -![](http://7xop3k.com1.z0.glb.clouddn.com/15312108766575.jpg) +![](http://mms-graph.bj.bcebos.com/paddle-mobile%2F2018_07_18.png) arm cpu是paddle-mobile的主要支持方向,cpu的通用性一直是其优势。嵌入式深度学习,需要大量的cpu汇编实现。我们正在紧锣密鼓的编码,为的是能充分硬件的每一点加速能力。 arm cpu的优化工作还在进行中,现在使用了常规的cpu优化。在arm a73上paddle-mobile arm-v7现在单核运行一次mobilenet1.0是120+ms,显然这不是我们的最终目标,我们正在用大量的汇编改写,后续性能仍会有巨大提升空间, 目前只支持armv7, 未来我们也会支持armv8。 diff --git a/test/net/test_mobilenet+ssd.cpp b/test/net/test_mobilenet+ssd.cpp index a3aac63f5759923df5bc60df556241c6e15c3eb4..a3d780a4854d018f948af2890bfe9f1e7a8fefef 100644 --- a/test/net/test_mobilenet+ssd.cpp +++ b/test/net/test_mobilenet+ssd.cpp @@ -18,11 +18,11 @@ limitations under the License. */ int main() { paddle_mobile::PaddleMobile paddle_mobile; + paddle_mobile.SetThreadNum(4); auto time1 = time(); - // auto isok = paddle_mobile.Load(g_mobilenet_ssd_gesture + "/model", - // g_mobilenet_ssd_gesture + "/params", - // true); - auto isok = paddle_mobile.Load(g_mobilenet_ssd, false); + auto isok = paddle_mobile.Load(g_mobilenet_ssd_gesture + "/model", + g_mobilenet_ssd_gesture + "/params", true); + // auto isok = paddle_mobile.Load(g_mobilenet_ssd, false); if (isok) { auto time2 = time(); std::cout << "load cost :" << time_diff(time1, time2) << "ms" << std::endl; diff --git a/test/net/test_mobilenet.cpp b/test/net/test_mobilenet.cpp index 2e285695fb79f3ed5471a653c71a10b36ef4e7f2..95ffc59c394782b69d17f16c549b0e6923fd31e8 100644 --- a/test/net/test_mobilenet.cpp +++ b/test/net/test_mobilenet.cpp @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include +#include #include "../test_helper.h" #include "../test_include.h" @@ -22,7 +22,7 @@ int main() { auto time1 = time(); if (paddle_mobile.Load(g_mobilenet, true)) { auto time2 = time(); - DLOG << "load cost :" << time_diff(time1, time1) << "ms"; + std::cout << "load cost :" << time_diff(time1, time1) << "ms" << std::endl; std::vector dims{1, 3, 224, 224}; Tensor input_tensor; @@ -35,7 +35,8 @@ int main() { auto vec_result = paddle_mobile.Predict(input, dims); auto time4 = time(); - DLOG << "predict cost :" << time_diff(time3, time4) << "ms"; + std::cout << "predict cost :" << time_diff(time3, time4) << "ms" + << std::endl; } return 0;