提交 24c266cc 编写于 作者: B BohaoWu

Add Zelda!

Provide preprocess operatore for images.
上级 50f1bbfd
...@@ -25,6 +25,7 @@ endif() ...@@ -25,6 +25,7 @@ endif()
if (APP) if (APP)
add_subdirectory(configure) add_subdirectory(configure)
add_subdirectory(preprocess)
endif() endif()
......
cmake_minimum_required(VERSION 3.2) cmake_minimum_required(VERSION 3.2)
project(hw-frame-extract) project(gpupreprocess)
# SET(CUDA_VERSION 10.1) include(cuda)
include(configure)
#gcc version #C flags
#GCC('gcc482') set(CMAKE_C_FLAGS " -g -pipe -W -Wall -fPIC")
#CUDA("10.1")
set(global_cflags_str "-g -pipe -W -Wall -fPIC")
set(CMAKE_C_FLAGS ${global_cflags_str})
#C++ flags. #C++ flags.
set(global_cxxflags_str "-g -pipe -W -Wall -fPIC -std=c++11") set(CMAKE_CXX_FLAGS " -g -pipe -W -Wall -fPIC -std=c++11")
set(CMAKE_CXX_FLAGS ${global_cxxflags_str})
add_subdirectory(cuda) add_subdirectory(cuda)
add_subdirectory(pybind11) set(PYTHON_SO "${CMAKE_SOURCE_DIR}/core/preprocess/hwvideoframe/lib/libpython2.7.so")
set (EXTRA_LIBS ${EXTRA_LIBS} gpu) set(EXTRA_LIBS ${EXTRA_LIBS} gpu)
file(GLOB SOURCE_FILES pybind/*.cpp src/*.cpp)
message(${CMAKE_CURRENT_SOURCE_DIR}) include_directories("./include")
include_directories("${CMAKE_CURRENT_SOURCE_DIR}/include") include_directories(${CUDA_INCLUDE_DIRS})
include_directories("${CMAKE_CURRENT_SOURCE_DIR}/pybind11/include") include_directories(${PYTHON_INCLUDE_DIR})
include_directories("/opt/compiler/cuda-10.1/include")
include_directories("/home/work/wubohao/baidu/third-party/python/include/python2.7")
file(GLOB SOURCE_FILES src/*.cpp pybind11/*.cpp ) link_directories("-L${CUDA_TOOLKIT_ROOT_DIR}/lib64 -lcudart -lnppidei_static -lnppial_static -lnpps_static -lnppc_static -lculibos")
link_directories(${PYTHON_SO})
link_directories("-L/opt/compiler/cuda-10.1/lib64 -lcudart -lnppidei_static -lnppial_static -lnpps_static -lnppc_static -lculibos")
# link_directories("/home/work/wubohao/baidu/third-party/python/lib")
#.so #.so
add_library(gpupreprocess SHARED ${SOURCE_FILES}) add_library(gpupreprocess SHARED ${SOURCE_FILES})
target_link_libraries (gpupreprocess ${EXTRA_LIBS}) target_link_libraries(gpupreprocess ${EXTRA_LIBS})
target_link_libraries(gpupreprocess ${CUDA_LIBRARIES})
...@@ -5,7 +5,6 @@ SET(CUDA_TARGET_INCLUDE ${CUDA_TOOLKIT_ROOT_DIR}-${CUDA_VERSION}/targets/${CMAKE ...@@ -5,7 +5,6 @@ SET(CUDA_TARGET_INCLUDE ${CUDA_TOOLKIT_ROOT_DIR}-${CUDA_VERSION}/targets/${CMAKE
file(GLOB_RECURSE CURRENT_HEADERS *.h *.hpp *.cuh) file(GLOB_RECURSE CURRENT_HEADERS *.h *.hpp *.cuh)
file(GLOB CURRENT_SOURCES *.cpp *.cu) file(GLOB CURRENT_SOURCES *.cpp *.cu)
file(GLOB CUDA_LIBS /opt/compiler/cuda-10.1/lib64/*.so)
source_group("Include" FILES ${CURRENT_HEADERS}) source_group("Include" FILES ${CURRENT_HEADERS})
source_group("Source" FILES ${CURRENT_SOURCES}) source_group("Source" FILES ${CURRENT_SOURCES})
...@@ -13,10 +12,7 @@ source_group("Source" FILES ${CURRENT_SOURCES}) ...@@ -13,10 +12,7 @@ source_group("Source" FILES ${CURRENT_SOURCES})
set(CMAKE_CUDA_FLAGS "-ccbin /opt/compiler/gcc-4.8.2/bin -Xcompiler -fPIC --std=c++11") set(CMAKE_CUDA_FLAGS "-ccbin /opt/compiler/gcc-4.8.2/bin -Xcompiler -fPIC --std=c++11")
set(CUDA_NVCC_FLAGS "-L/opt/compiler/gcc-4.8.2/bin -Xcompiler -fPIC --std=c++11") set(CUDA_NVCC_FLAGS "-L/opt/compiler/gcc-4.8.2/bin -Xcompiler -fPIC --std=c++11")
include_directories("/opt/compiler/cuda-10.1/include") include_directories(${CUDA_INCLUDE_DIRS})
#cuda_add_library(gpu SHARED ${CURRENT_HEADERS} ${CURRENT_SOURCES})
cuda_add_library(gpu SHARED ${CURRENT_HEADERS} ${CURRENT_SOURCES}) cuda_add_library(gpu SHARED ${CURRENT_HEADERS} ${CURRENT_SOURCES})
target_link_libraries(gpu ${CUDA_LIBS}) target_link_libraries(gpu ${CUDA_LIBS})
# import libgpupreprocess as pp
...@@ -34,8 +34,8 @@ __global__ void resizeCudaKernel(const float* input, ...@@ -34,8 +34,8 @@ __global__ void resizeCudaKernel(const float* input,
if (inputChannels == 1) { // grayscale image if (inputChannels == 1) { // grayscale image
// TODO(Zelda): support grayscale // TODO(Zelda): support grayscale
} else if (inputChannels == 3) { // RGB image } else if (inputChannels == 3) { // RGB image
double scale_x = static_cast<double> inputWidth / outputWidth; double scale_x = static_cast<double>(inputWidth / outputWidth);
double scale_y = static_cast<double> inputHeight / outputHeight; double scale_y = static_cast<double>(inputHeight / outputHeight);
int xmax = outputWidth; int xmax = outputWidth;
...@@ -144,8 +144,8 @@ __global__ void resizeCudaKernel_fixpt(const float* input, ...@@ -144,8 +144,8 @@ __global__ void resizeCudaKernel_fixpt(const float* input,
if (inputChannels == 1) { // grayscale image if (inputChannels == 1) { // grayscale image
// TODO(Zelda): support grayscale // TODO(Zelda): support grayscale
} else if (inputChannels == 3) { // RGB image } else if (inputChannels == 3) { // RGB image
double scale_x = static_cast<double> inputWidth / outputWidth; double scale_x = static_cast<double>(inputWidth / outputWidth);
double scale_y = static_cast<double> inputHeight / outputHeight; double scale_y = static_cast<double>(inputHeight / outputHeight);
int xmax = outputWidth; int xmax = outputWidth;
......
...@@ -23,8 +23,8 @@ ...@@ -23,8 +23,8 @@
// subtract by some float numbers // subtract by some float numbers
class Sub { class Sub {
public: public:
explicit Sub(float subtractor) {} explicit Sub(float subtractor);
explicit Sub(const std::vector<float> &subtractors) {} explicit Sub(const std::vector<float> &subtractors);
std::shared_ptr<OpContext> operator()(std::shared_ptr<OpContext> input); std::shared_ptr<OpContext> operator()(std::shared_ptr<OpContext> input);
private: private:
......
...@@ -45,7 +45,7 @@ pybind11::array_t<float> Gpubuffer2Image::operator()( ...@@ -45,7 +45,7 @@ pybind11::array_t<float> Gpubuffer2Image::operator()(
std::shared_ptr<OpContext> input) { std::shared_ptr<OpContext> input) {
auto result = pybind11::array_t<float>({input->nppi_size().height, auto result = pybind11::array_t<float>({input->nppi_size().height,
input->nppi_size().width, input->nppi_size().width,
static_cast<int> CHANNEL_SIZE}); static_cast<int>(CHANNEL_SIZE)});
pybind11::buffer_info buf = result.request(); pybind11::buffer_info buf = result.request();
auto ret = cudaMemcpy(static_cast<float*>(buf.ptr), auto ret = cudaMemcpy(static_cast<float*>(buf.ptr),
input->p_frame(), input->p_frame(),
......
...@@ -34,9 +34,8 @@ std::shared_ptr<OpContext> Resize::operator()( ...@@ -34,9 +34,8 @@ std::shared_ptr<OpContext> Resize::operator()(
if (round(percent * im_max_size) > _max_size) { if (round(percent * im_max_size) > _max_size) {
percent = static_cast<float>(_max_size) / static_cast<float>(im_max_size); percent = static_cast<float>(_max_size) / static_cast<float>(im_max_size);
} }
resized_width = tatic_cast<int>(round(input->nppi_size().width * percent)); resized_width = static_cast<int>(round(input->nppi_size().width * percent));
resized_height = resized_height = static_cast<int>(round(input->nppi_size().height * percent));
tatic_cast<int>(round(input->nppi_size().height * percent));
} }
auto output = std::make_shared<OpContext>(resized_height, resized_width); auto output = std::make_shared<OpContext>(resized_height, resized_width);
auto ret = resize_linear(input->p_frame(), auto ret = resize_linear(input->p_frame(),
......
...@@ -49,8 +49,8 @@ std::shared_ptr<OpContext> ResizeByFactor::operator()( ...@@ -49,8 +49,8 @@ std::shared_ptr<OpContext> ResizeByFactor::operator()(
} else { } else {
resized_width = (floor(resized_width / 32) - 1) * _factor; resized_width = (floor(resized_width / 32) - 1) * _factor;
} }
if (tatic_cast<int>(resized_width) <= 0 || if (static_cast<int>(resized_width) <= 0 ||
tatic_cast<int>(resized_height) <= 0) { static_cast<int>(resized_height) <= 0) {
return NULL; return NULL;
} }
auto output = std::make_shared<OpContext>(resized_height, resized_width); auto output = std::make_shared<OpContext>(resized_height, resized_width);
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
#include <npp.h> #include <npp.h>
#include "core/preprocess/hwvideoframe/include/utils.h" #include "core/preprocess/hwvideoframe/include/utils.h"
#include "core/preprocess/hwvideoframe/src/rgb_swap.h" #include "core/preprocess/hwvideoframe/include/rgb_swap.h"
const int SwapChannel::_ORDER[CHANNEL_SIZE] = {2, 1, 0}; const int SwapChannel::_ORDER[CHANNEL_SIZE] = {2, 1, 0};
......
...@@ -16,7 +16,7 @@ ...@@ -16,7 +16,7 @@
#include <sstream> #include <sstream>
#include <stdexcept> #include <stdexcept>
#include "core/preprocess/hwvideoframe/src/utils.h" #include "core/preprocess/hwvideoframe/include/utils.h"
void verify_npp_ret(const std::string& function_name, NppStatus ret) { void verify_npp_ret(const std::string& function_name, NppStatus ret) {
if (ret != NPP_SUCCESS) { if (ret != NPP_SUCCESS) {
......
...@@ -11,7 +11,6 @@ ...@@ -11,7 +11,6 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
from .audio_reader import AudioFeatureOp
from .chinese_bert_reader import ChineseBertReader from .chinese_bert_reader import ChineseBertReader
from .image_reader import ImageReader, File2Image, URL2Image, Sequential, Normalize, Base64ToImage from .image_reader import ImageReader, File2Image, URL2Image, Sequential, Normalize, Base64ToImage
from .image_reader import CenterCrop, Resize, Transpose, Div, RGB2BGR, BGR2RGB, ResizeByFactor from .image_reader import CenterCrop, Resize, Transpose, Div, RGB2BGR, BGR2RGB, ResizeByFactor
......
...@@ -40,11 +40,8 @@ max_version, mid_version, min_version = python_version() ...@@ -40,11 +40,8 @@ max_version, mid_version, min_version = python_version()
if '${PACK}' == 'ON': if '${PACK}' == 'ON':
copy_lib() copy_lib()
os.system('cp ../core/preprocess/nvdec-extractframe/libhwextract.so ./')
os.system('mv ./libhwextract.so ./paddle_serving_app/reader/hwextract.so')
os.system('cp ../core/preprocess/hwvideoframe/libgpupreprocess.so ./paddle_serving_app/reader') os.system('cp ../core/preprocess/hwvideoframe/libgpupreprocess.so ./paddle_serving_app/reader')
os.system('mkdir ./paddle_serving_app/reader/lib') os.system('mkdir ./paddle_serving_app/reader/lib')
os.system('cp ../core/preprocess/nvdec-extractframe/cuda/libhwgpu.so ./paddle_serving_app/reader/lib')
os.system('cp ../core/preprocess/hwvideoframe/cuda/libgpu.so ./paddle_serving_app/reader/lib') os.system('cp ../core/preprocess/hwvideoframe/cuda/libgpu.so ./paddle_serving_app/reader/lib')
os.system('export LD_LIBRARY_PATH="./paddle_serving_app/reader/lib"') os.system('export LD_LIBRARY_PATH="./paddle_serving_app/reader/lib"')
...@@ -61,8 +58,7 @@ packages=['paddle_serving_app', ...@@ -61,8 +58,7 @@ packages=['paddle_serving_app',
'paddle_serving_app.reader.pddet', 'paddle_serving_app.reader.pddet',
'paddle_serving_app.reader.lib'] 'paddle_serving_app.reader.lib']
package_data={'paddle_serving_app': ['reader/*.so']} package_data={}
# 'paddle_serving_app.reader': ['lib/*.so']}
package_dir={'paddle_serving_app': package_dir={'paddle_serving_app':
'${PADDLE_SERVING_BINARY_DIR}/python/paddle_serving_app', '${PADDLE_SERVING_BINARY_DIR}/python/paddle_serving_app',
'paddle_serving_app.proto': 'paddle_serving_app.proto':
...@@ -110,4 +106,4 @@ setup( ...@@ -110,4 +106,4 @@ setup(
'Topic :: Software Development :: Libraries :: Python Modules', 'Topic :: Software Development :: Libraries :: Python Modules',
], ],
license='Apache 2.0', license='Apache 2.0',
keywords=('paddle-serving serving-client deployment industrial easy-to-use')) keywords=('paddle-serving serving-client deployment industrial easy-to-use'))
\ No newline at end of file
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册