提交 86b60a0f 编写于 作者: W Wilber 提交者: GitHub

[Demo][Cuda] Modify cxx&python demo of Cuda backend for release/v2.6 (#3473)

* fix cuda demo. test=develop

* update type error. test=develop
上级 bfd9ad27
...@@ -48,7 +48,7 @@ cuda的编译结果位于 `build_cuda/inference_lite_lib` ...@@ -48,7 +48,7 @@ cuda的编译结果位于 `build_cuda/inference_lite_lib`
4、 `demo` 文件夹:c++ demo. 4、 `demo` 文件夹:c++ demo.
如果编译打开了python选项,则会在 `build_cuda/inference_lite_lib/python/lib/` 目录下生成 `lite_core.so` 如果编译打开了python选项,则会在 `build_cuda/inference_lite_lib/python/lib/` 目录下生成 `lite.so`
## 运行 ## 运行
...@@ -66,7 +66,7 @@ wget https://paddle-inference-dist.cdn.bcebos.com/PaddleLite/kite.jpg ...@@ -66,7 +66,7 @@ wget https://paddle-inference-dist.cdn.bcebos.com/PaddleLite/kite.jpg
二: 运行 二: 运行
**NOTE:**此处示例使用的是python接口。 **NOTE:** 此处示例使用的是python接口。
``` python ``` python
#-*- coding: utf-8 -*- #-*- coding: utf-8 -*-
...@@ -75,7 +75,7 @@ import sys ...@@ -75,7 +75,7 @@ import sys
import numpy as np import numpy as np
import cv2 import cv2
sys.path.append('build_cuda/inference_lite_lib/python/lib') sys.path.append('build_cuda/inference_lite_lib/python/lib')
from lite_core import * from lite import *
def read_img(im_path, resize_h, resize_w): def read_img(im_path, resize_h, resize_w):
im = cv2.imread(im_path).astype('float32') im = cv2.imread(im_path).astype('float32')
......
...@@ -181,7 +181,7 @@ class LITE_API CxxConfig : public ConfigBase { ...@@ -181,7 +181,7 @@ class LITE_API CxxConfig : public ConfigBase {
#endif #endif
#ifdef LITE_WITH_CUDA #ifdef LITE_WITH_CUDA
void set_multi_stream(bool multi_stream) { multi_stream_ = multi_stream; } void set_multi_stream(bool multi_stream) { multi_stream_ = multi_stream; }
int multi_stream() const { return multi_stream_; } bool multi_stream() const { return multi_stream_; }
#endif #endif
#ifdef LITE_WITH_MLU #ifdef LITE_WITH_MLU
......
project(demo CXX C)
cmake_minimum_required(VERSION 2.8) cmake_minimum_required(VERSION 2.8)
project(demo CXX C)
add_definitions(-DLITE_WITH_CUDA)
set(TARGET demo) set(TARGET demo)
set(CMAKE_CXX_FLAGS "-std=c++11 -O3") set(CMAKE_CXX_FLAGS "-std=c++11 -O3")
set(LITE_LIB "${PROJECT_SOURCE_DIR}/../../cxx") set(LITE_ROOT "${PROJECT_SOURCE_DIR}/../../cxx")
set(PROTOBUF_LIB "${PROJECT_SOURCE_DIR}/../../third_party/protobuf") set(PROTOBUF_ROOT "${PROJECT_SOURCE_DIR}/../../third_party/protobuf")
include_directories("${LITE_LIB}/include") include_directories("${LITE_ROOT}/include")
link_directories("${LITE_LIB}/lib") link_directories("${LITE_ROOT}/lib")
link_directories("${PROTOBUF_LIB}/lib") link_directories("${PROTOBUF_ROOT}/lib")
# cuda lib
link_directories("/usr/local/cuda/lib64/")
add_executable(${TARGET} ${TARGET}.cc) add_executable(${TARGET} ${TARGET}.cc)
set(DEPS ${LITE_LIB}/lib/libpaddle_full_api_shared.so) set(DEPS ${LITE_ROOT}/lib/libpaddle_full_api_shared.so)
set(DEPS ${DEPS} protobuf-lite) set(DEPS ${DEPS} protobuf-lite)
set(DEPS ${DEPS} "-lrt -lpthread -ldl") set(DEPS ${DEPS} "-lrt -lpthread -ldl -lcudart")
target_link_libraries(${TARGET} ${DEPS}) target_link_libraries(${TARGET} ${DEPS})
...@@ -350,6 +350,7 @@ function make_cuda { ...@@ -350,6 +350,7 @@ function make_cuda {
-DLITE_WITH_LIGHT_WEIGHT_FRAMEWORK=OFF \ -DLITE_WITH_LIGHT_WEIGHT_FRAMEWORK=OFF \
-DWITH_TESTING=OFF \ -DWITH_TESTING=OFF \
-DLITE_WITH_ARM=OFF \ -DLITE_WITH_ARM=OFF \
-DLITE_WITH_STATIC_CUDA=OFF \
-DLITE_WITH_PYTHON=${BUILD_PYTHON} \ -DLITE_WITH_PYTHON=${BUILD_PYTHON} \
-DLITE_BUILD_EXTRA=ON \ -DLITE_BUILD_EXTRA=ON \
-DLITE_WITH_XPU=$BUILD_XPU \ -DLITE_WITH_XPU=$BUILD_XPU \
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册