“bb0237426e5aa752a8116ca16951ae57284f8e05”上不存在“paddlespeech/server/utils/audio_process.py”
未验证 提交 591b957b 编写于 作者: Mars懵's avatar Mars懵 提交者: GitHub

[runtime] fix linux && android cmake bug (#3112)

* support vad ios compile
* support onnx model recognize
* add build ios sh
上级 9e5a39ca
...@@ -60,7 +60,10 @@ set(FETCHCONTENT_BASE_DIR ${fc_patch}) ...@@ -60,7 +60,10 @@ set(FETCHCONTENT_BASE_DIR ${fc_patch})
############################################################################### ###############################################################################
# https://github.com/google/brotli/pull/655 # https://github.com/google/brotli/pull/655
option(BUILD_SHARED_LIBS "Build shared libraries" ON) option(BUILD_SHARED_LIBS "Build shared libraries" ON)
option(NDEBUG "debug option" OFF) option(WITH_PPS_DEBUG "debug option" OFF)
if (WITH_PPS_DEBUG)
add_definitions("-DPPS_DEBUG")
endif()
option(WITH_ASR "build asr" ON) option(WITH_ASR "build asr" ON)
option(WITH_CLS "build cls" ON) option(WITH_CLS "build cls" ON)
...@@ -71,7 +74,7 @@ option(WITH_GPU "NNet using GPU." OFF) ...@@ -71,7 +74,7 @@ option(WITH_GPU "NNet using GPU." OFF)
option(WITH_PROFILING "enable c++ profling" OFF) option(WITH_PROFILING "enable c++ profling" OFF)
option(WITH_TESTING "unit test" ON) option(WITH_TESTING "unit test" ON)
option(WITH_ONNX "u2 support onnx runtime" ON) option(WITH_ONNX "u2 support onnx runtime" OFF)
############################################################################### ###############################################################################
# Include Third Party # Include Third Party
......
...@@ -9,9 +9,11 @@ mkdir -p ${BUILD_DIR} ...@@ -9,9 +9,11 @@ mkdir -p ${BUILD_DIR}
BUILD_TYPE=Release BUILD_TYPE=Release
#BUILD_TYPE=Debug #BUILD_TYPE=Debug
BUILD_SO=OFF BUILD_SO=OFF
BUILD_ONNX=ON
BUILD_ASR=ON BUILD_ASR=ON
BUILD_CLS=ON BUILD_CLS=ON
BUILD_VAD=ON BUILD_VAD=ON
PPS_DEBUG=OFF
FASTDEPLOY_INSTALL_DIR="" FASTDEPLOY_INSTALL_DIR=""
# the build script had verified in the paddlepaddle docker image. # the build script had verified in the paddlepaddle docker image.
...@@ -21,9 +23,11 @@ FASTDEPLOY_INSTALL_DIR="" ...@@ -21,9 +23,11 @@ FASTDEPLOY_INSTALL_DIR=""
cmake -B ${BUILD_DIR} \ cmake -B ${BUILD_DIR} \
-DCMAKE_BUILD_TYPE=${BUILD_TYPE} \ -DCMAKE_BUILD_TYPE=${BUILD_TYPE} \
-DBUILD_SHARED_LIBS=${BUILD_SO} \ -DBUILD_SHARED_LIBS=${BUILD_SO} \
-DWITH_ONNX=${BUILD_ONNX} \
-DWITH_ASR=${BUILD_ASR} \ -DWITH_ASR=${BUILD_ASR} \
-DWITH_CLS=${BUILD_CLS} \ -DWITH_CLS=${BUILD_CLS} \
-DWITH_VAD=${BUILD_VAD} \ -DWITH_VAD=${BUILD_VAD} \
-DFASTDEPLOY_INSTALL_DIR=${FASTDEPLOY_INSTALL_DIR} -DFASTDEPLOY_INSTALL_DIR=${FASTDEPLOY_INSTALL_DIR} \
-DWITH_PPS_DEBUG=${PPS_DEBUG}
cmake --build ${BUILD_DIR} -j cmake --build ${BUILD_DIR} -j
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
set -ex set -ex
ANDROID_NDK=/workspace/zhanghui/android-sdk/android-ndk-r25c ANDROID_NDK=/mnt/masimeng/workspace/software/android-ndk-r25b/
# Setting up Android toolchanin # Setting up Android toolchanin
ANDROID_ABI=arm64-v8a # 'arm64-v8a', 'armeabi-v7a' ANDROID_ABI=arm64-v8a # 'arm64-v8a', 'armeabi-v7a'
...@@ -14,7 +14,7 @@ TOOLCHAIN_FILE=${ANDROID_NDK}/build/cmake/android.toolchain.cmake ...@@ -14,7 +14,7 @@ TOOLCHAIN_FILE=${ANDROID_NDK}/build/cmake/android.toolchain.cmake
# Create build directory # Create build directory
BUILD_ROOT=build/Android BUILD_ROOT=build/Android
BUILD_DIR=${BUILD_ROOT}/${ANDROID_ABI}-api-21 BUILD_DIR=${BUILD_ROOT}/${ANDROID_ABI}-api-21
FASTDEPLOY_INSTALL_DIR="/workspace/zhanghui/paddle/FastDeploy/build/Android/arm64-v8a-api-21/install" FASTDEPLOY_INSTALL_DIR="/mnt/masimeng/workspace/FastDeploy/build/Android/arm64-v8a-api-21/install"
mkdir -p ${BUILD_DIR} mkdir -p ${BUILD_DIR}
cd ${BUILD_DIR} cd ${BUILD_DIR}
......
...@@ -28,7 +28,7 @@ endif() ...@@ -28,7 +28,7 @@ endif()
if(ANDROID) if(ANDROID)
add_library(extern_glog INTERFACE) add_library(extern_glog INTERFACE)
add_dependencies(glog gflags) add_dependencies(extern_glog gflags)
else() # UNIX else() # UNIX
add_library(extern_glog ALIAS glog) add_library(extern_glog ALIAS glog)
add_dependencies(glog gflags) add_dependencies(glog gflags)
......
...@@ -223,7 +223,7 @@ void U2Nnet::ForwardEncoderChunkImpl( ...@@ -223,7 +223,7 @@ void U2Nnet::ForwardEncoderChunkImpl(
VLOG(3) << "feats shape: " << feats.shape()[0] << ", " << feats.shape()[1] VLOG(3) << "feats shape: " << feats.shape()[0] << ", " << feats.shape()[1]
<< ", " << feats.shape()[2]; << ", " << feats.shape()[2];
#ifndef NDEBUG #ifdef PPS_DEBUG
{ {
std::stringstream path("feat", std::ios_base::app | std::ios_base::out); std::stringstream path("feat", std::ios_base::app | std::ios_base::out);
path << offset_; path << offset_;
...@@ -269,7 +269,7 @@ void U2Nnet::ForwardEncoderChunkImpl( ...@@ -269,7 +269,7 @@ void U2Nnet::ForwardEncoderChunkImpl(
cnn_cache_ = outputs[2]; cnn_cache_ = outputs[2];
#endif #endif
#ifndef NDEBUG #ifdef PPS_DEBUG
{ {
std::stringstream path("encoder_logits", std::stringstream path("encoder_logits",
std::ios_base::app | std::ios_base::out); std::ios_base::app | std::ios_base::out);
...@@ -299,7 +299,7 @@ void U2Nnet::ForwardEncoderChunkImpl( ...@@ -299,7 +299,7 @@ void U2Nnet::ForwardEncoderChunkImpl(
encoder_outs_.push_back(chunk_out); encoder_outs_.push_back(chunk_out);
VLOG(2) << "encoder_outs_ size: " << encoder_outs_.size(); VLOG(2) << "encoder_outs_ size: " << encoder_outs_.size();
#ifndef NDEBUG #ifdef PPS_DEBUG
{ {
std::stringstream path("encoder_logits_list", std::stringstream path("encoder_logits_list",
std::ios_base::app | std::ios_base::out); std::ios_base::app | std::ios_base::out);
...@@ -332,7 +332,7 @@ void U2Nnet::ForwardEncoderChunkImpl( ...@@ -332,7 +332,7 @@ void U2Nnet::ForwardEncoderChunkImpl(
CHECK_EQ(outputs.size(), 1); CHECK_EQ(outputs.size(), 1);
paddle::Tensor ctc_log_probs = outputs[0]; paddle::Tensor ctc_log_probs = outputs[0];
#ifndef NDEBUG #ifdef PPS_DEBUG
{ {
std::stringstream path("encoder_logprob", std::stringstream path("encoder_logprob",
std::ios_base::app | std::ios_base::out); std::ios_base::app | std::ios_base::out);
...@@ -371,7 +371,7 @@ void U2Nnet::ForwardEncoderChunkImpl( ...@@ -371,7 +371,7 @@ void U2Nnet::ForwardEncoderChunkImpl(
std::memcpy( std::memcpy(
out_prob->data(), ctc_log_probs_ptr, T * D * sizeof(kaldi::BaseFloat)); out_prob->data(), ctc_log_probs_ptr, T * D * sizeof(kaldi::BaseFloat));
#ifndef NDEBUG #ifdef PPS_DEBUG
{ {
std::stringstream path("encoder_logits_list_ctc", std::stringstream path("encoder_logits_list_ctc",
std::ios_base::app | std::ios_base::out); std::ios_base::app | std::ios_base::out);
...@@ -462,7 +462,7 @@ void U2Nnet::AttentionRescoring(const std::vector<std::vector<int>>& hyps, ...@@ -462,7 +462,7 @@ void U2Nnet::AttentionRescoring(const std::vector<std::vector<int>>& hyps,
} }
} }
#ifndef NDEBUG #ifdef PPS_DEBUG
{ {
std::stringstream path("encoder_logits_concat", std::stringstream path("encoder_logits_concat",
std::ios_base::app | std::ios_base::out); std::ios_base::app | std::ios_base::out);
...@@ -486,7 +486,7 @@ void U2Nnet::AttentionRescoring(const std::vector<std::vector<int>>& hyps, ...@@ -486,7 +486,7 @@ void U2Nnet::AttentionRescoring(const std::vector<std::vector<int>>& hyps,
paddle::Tensor encoder_out = paddle::concat(encoder_outs_, 1); paddle::Tensor encoder_out = paddle::concat(encoder_outs_, 1);
VLOG(2) << "encoder_outs_ size: " << encoder_outs_.size(); VLOG(2) << "encoder_outs_ size: " << encoder_outs_.size();
#ifndef NDEBUG #ifdef PPS_DEBUG
{ {
std::stringstream path("encoder_out0", std::stringstream path("encoder_out0",
std::ios_base::app | std::ios_base::out); std::ios_base::app | std::ios_base::out);
...@@ -505,7 +505,7 @@ void U2Nnet::AttentionRescoring(const std::vector<std::vector<int>>& hyps, ...@@ -505,7 +505,7 @@ void U2Nnet::AttentionRescoring(const std::vector<std::vector<int>>& hyps,
} }
#endif // end TEST_DEBUG #endif // end TEST_DEBUG
#ifndef NDEBUG #ifdef PPS_DEBUG
{ {
std::stringstream path("encoder_out", std::stringstream path("encoder_out",
std::ios_base::app | std::ios_base::out); std::ios_base::app | std::ios_base::out);
...@@ -536,7 +536,7 @@ void U2Nnet::AttentionRescoring(const std::vector<std::vector<int>>& hyps, ...@@ -536,7 +536,7 @@ void U2Nnet::AttentionRescoring(const std::vector<std::vector<int>>& hyps,
CHECK_EQ(probs_shape[0], num_hyps); CHECK_EQ(probs_shape[0], num_hyps);
CHECK_EQ(probs_shape[1], max_hyps_len); CHECK_EQ(probs_shape[1], max_hyps_len);
#ifndef NDEBUG #ifdef PPS_DEBUG
{ {
std::stringstream path("decoder_logprob", std::stringstream path("decoder_logprob",
std::ios_base::app | std::ios_base::out); std::ios_base::app | std::ios_base::out);
...@@ -554,7 +554,7 @@ void U2Nnet::AttentionRescoring(const std::vector<std::vector<int>>& hyps, ...@@ -554,7 +554,7 @@ void U2Nnet::AttentionRescoring(const std::vector<std::vector<int>>& hyps,
} }
#endif // end TEST_DEBUG #endif // end TEST_DEBUG
#ifndef NDEBUG #ifdef PPS_DEBUG
{ {
std::stringstream path("hyps_lens", std::stringstream path("hyps_lens",
std::ios_base::app | std::ios_base::out); std::ios_base::app | std::ios_base::out);
...@@ -570,7 +570,7 @@ void U2Nnet::AttentionRescoring(const std::vector<std::vector<int>>& hyps, ...@@ -570,7 +570,7 @@ void U2Nnet::AttentionRescoring(const std::vector<std::vector<int>>& hyps,
} }
#endif // end TEST_DEBUG #endif // end TEST_DEBUG
#ifndef NDEBUG #ifdef PPS_DEBUG
{ {
std::stringstream path("hyps_tensor", std::stringstream path("hyps_tensor",
std::ios_base::app | std::ios_base::out); std::ios_base::app | std::ios_base::out);
......
...@@ -3,7 +3,7 @@ set(srcs ...@@ -3,7 +3,7 @@ set(srcs
panns_interface.cc panns_interface.cc
) )
add_library(cls ${srcs}) add_library(cls SHARED ${srcs})
target_link_libraries(cls PRIVATE ${FASTDEPLOY_LIBS} kaldi-matrix kaldi-base frontend utils ) target_link_libraries(cls PRIVATE ${FASTDEPLOY_LIBS} kaldi-matrix kaldi-base frontend utils )
set(bin_name panns_nnet_main) set(bin_name panns_nnet_main)
......
...@@ -49,23 +49,22 @@ int ClsNnet::Init(const ClsNnetConf& conf) { ...@@ -49,23 +49,22 @@ int ClsNnet::Init(const ClsNnetConf& conf) {
// init model // init model
fastdeploy::RuntimeOption runtime_option; fastdeploy::RuntimeOption runtime_option;
#ifdef USE_ORT_BACKEND #ifdef USE_PADDLE_INFERENCE_BACKEND
runtime_option.SetModelPath(conf.model_file_path_,
conf.param_file_path_,
fastdeploy::ModelFormat::PADDLE);
runtime_option.UsePaddleInferBackend();
#elif defined(USE_ORT_BACKEND)
runtime_option.SetModelPath( runtime_option.SetModelPath(
conf.model_file_path_, "", fastdeploy::ModelFormat::ONNX); // onnx conf.model_file_path_, "", fastdeploy::ModelFormat::ONNX); // onnx
runtime_option.UseOrtBackend(); // onnx runtime_option.UseOrtBackend(); // onnx
#endif #elif defined(USE_PADDLE_LITE_BACKEND)
#ifdef USE_PADDLE_LITE_BACKEND
runtime_option.SetModelPath(conf.model_file_path_, runtime_option.SetModelPath(conf.model_file_path_,
conf.param_file_path_, conf.param_file_path_,
fastdeploy::ModelFormat::PADDLE); fastdeploy::ModelFormat::PADDLE);
runtime_option.UseLiteBackend(); runtime_option.UseLiteBackend();
#endif #endif
#ifdef USE_PADDLE_INFERENCE_BACKEND
runtime_option.SetModelPath(conf.model_file_path_,
conf.param_file_path_,
fastdeploy::ModelFormat::PADDLE);
runtime_option.UsePaddleInferBackend();
#endif
runtime_option.SetCpuThreadNum(conf.num_cpu_thread_); runtime_option.SetCpuThreadNum(conf.num_cpu_thread_);
// runtime_option.DeletePaddleBackendPass("simplify_with_basic_ops_pass"); // runtime_option.DeletePaddleBackendPass("simplify_with_basic_ops_pass");
runtime_ = std::unique_ptr<fastdeploy::Runtime>(new fastdeploy::Runtime()); runtime_ = std::unique_ptr<fastdeploy::Runtime>(new fastdeploy::Runtime());
...@@ -105,7 +104,7 @@ int ClsNnet::Forward(const char* wav_path, ...@@ -105,7 +104,7 @@ int ClsNnet::Forward(const char* wav_path,
conf_.wav_normal_, conf_.wav_normal_,
conf_.wav_normal_type_, conf_.wav_normal_type_,
conf_.wav_norm_mul_factor_); conf_.wav_norm_mul_factor_);
#ifndef NDEBUG #ifdef PPS_DEBUG
{ {
std::ofstream fp("cls.wavform", std::ios::out); std::ofstream fp("cls.wavform", std::ios::out);
for (int i = 0; i < wavform.size(); ++i) { for (int i = 0; i < wavform.size(); ++i) {
...@@ -138,7 +137,7 @@ int ClsNnet::Forward(const char* wav_path, ...@@ -138,7 +137,7 @@ int ClsNnet::Forward(const char* wav_path,
feats[i * feat_dim + j] = PowerTodb(feats[i * feat_dim + j]); feats[i * feat_dim + j] = PowerTodb(feats[i * feat_dim + j]);
} }
} }
#ifndef NDEBUG #ifdef PPS_DEBUG
{ {
std::ofstream fp("cls.feat", std::ios::out); std::ofstream fp("cls.feat", std::ios::out);
for (int i = 0; i < num_frames; ++i) { for (int i = 0; i < num_frames; ++i) {
...@@ -162,7 +161,7 @@ int ClsNnet::Forward(const char* wav_path, ...@@ -162,7 +161,7 @@ int ClsNnet::Forward(const char* wav_path,
#ifdef WITH_PROFILING #ifdef WITH_PROFILING
printf("fast deploy infer consume: %fs\n", timer.Elapsed()); printf("fast deploy infer consume: %fs\n", timer.Elapsed());
#endif #endif
#ifndef NDEBUG #ifdef PPS_DEBUG
{ {
std::ofstream fp("cls.logits", std::ios::out); std::ofstream fp("cls.logits", std::ios::out);
for (int i = 0; i < model_out.size(); ++i) { for (int i = 0; i < model_out.size(); ++i) {
......
...@@ -15,8 +15,8 @@ ...@@ -15,8 +15,8 @@
#include <fstream> #include <fstream>
#include <string> #include <string>
#include "base/flags.h" #include "gflags/gflags.h"
#include "base/log.h" #include "glog/logging.h"
#include "cls/nnet/panns_interface.h" #include "cls/nnet/panns_interface.h"
DEFINE_string(conf_path, "", "config path"); DEFINE_string(conf_path, "", "config path");
......
...@@ -90,7 +90,7 @@ class LogMessage { ...@@ -90,7 +90,7 @@ class LogMessage {
} // namespace ppspeech } // namespace ppspeech
#ifdef NDEBUG #ifndef PPS_DEBUG
#define DLOG_INFO \ #define DLOG_INFO \
ppspeech::log::LogMessage(__FILE__, __LINE__, ppspeech::log::INFO, false) ppspeech::log::LogMessage(__FILE__, __LINE__, ppspeech::log::INFO, false)
#define DLOG_WARNING \ #define DLOG_WARNING \
...@@ -145,7 +145,7 @@ class LogMessage { ...@@ -145,7 +145,7 @@ class LogMessage {
#define CHECK_LT(x, y) CHECK((x) < (y)) #define CHECK_LT(x, y) CHECK((x) < (y))
#define CHECK_GE(x, y) CHECK((x) >= (y)) #define CHECK_GE(x, y) CHECK((x) >= (y))
#define CHECK_GT(x, y) CHECK((x) > (y)) #define CHECK_GT(x, y) CHECK((x) > (y))
#ifndef NDEBUG #ifdef PPS_DEBUG
#define DCHECK(x) CHECK(x) #define DCHECK(x) CHECK(x)
#define DCHECK_EQ(x, y) CHECK_EQ(x, y) #define DCHECK_EQ(x, y) CHECK_EQ(x, y)
#define DCHECK_NE(x, y) CHECK_NE(x, y) #define DCHECK_NE(x, y) CHECK_NE(x, y)
...@@ -153,7 +153,7 @@ class LogMessage { ...@@ -153,7 +153,7 @@ class LogMessage {
#define DCHECK_LT(x, y) CHECK_LT(x, y) #define DCHECK_LT(x, y) CHECK_LT(x, y)
#define DCHECK_GE(x, y) CHECK_GE(x, y) #define DCHECK_GE(x, y) CHECK_GE(x, y)
#define DCHECK_GT(x, y) CHECK_GT(x, y) #define DCHECK_GT(x, y) CHECK_GT(x, y)
#else // NDEBUG #else
#define DCHECK(condition) \ #define DCHECK(condition) \
while (false) CHECK(condition) while (false) CHECK(condition)
#define DCHECK_EQ(val1, val2) \ #define DCHECK_EQ(val1, val2) \
......
...@@ -7,6 +7,7 @@ add_library(kaldi-native-fbank-core ...@@ -7,6 +7,7 @@ add_library(kaldi-native-fbank-core
rfft.cc rfft.cc
) )
target_link_libraries(kaldi-native-fbank-core PUBLIC utils base) target_link_libraries(kaldi-native-fbank-core PUBLIC utils base)
target_compile_options(kaldi-native-fbank-core PUBLIC "-fPIC")
add_library(frontend STATIC add_library(frontend STATIC
cmvn.cc cmvn.cc
......
...@@ -181,7 +181,7 @@ private: ...@@ -181,7 +181,7 @@ private:
// Also see KALDI_COMPILE_TIME_ASSERT, defined in base/kaldi-utils.h, and // Also see KALDI_COMPILE_TIME_ASSERT, defined in base/kaldi-utils.h, and
// KALDI_ASSERT_IS_INTEGER_TYPE and KALDI_ASSERT_IS_FLOATING_TYPE, also defined // KALDI_ASSERT_IS_INTEGER_TYPE and KALDI_ASSERT_IS_FLOATING_TYPE, also defined
// there. // there.
#ifndef NDEBUG #ifdef PPS_DEBUG
#define KALDI_ASSERT(cond) \ #define KALDI_ASSERT(cond) \
do { \ do { \
if (cond) \ if (cond) \
......
...@@ -2,7 +2,7 @@ set(srcs ...@@ -2,7 +2,7 @@ set(srcs
vad_interface.cc vad_interface.cc
) )
add_library(pps_vad_interface ${srcs}) add_library(pps_vad_interface SHARED ${srcs})
target_link_libraries(pps_vad_interface PUBLIC pps_vad extern_glog) target_link_libraries(pps_vad_interface PUBLIC pps_vad extern_glog)
......
...@@ -12,4 +12,8 @@ target_link_libraries(${bin_name} pps_vad) ...@@ -12,4 +12,8 @@ target_link_libraries(${bin_name} pps_vad)
file(RELATIVE_PATH DEST_DIR ${ENGINE_ROOT} ${CMAKE_CURRENT_SOURCE_DIR}) file(RELATIVE_PATH DEST_DIR ${ENGINE_ROOT} ${CMAKE_CURRENT_SOURCE_DIR})
install(TARGETS pps_vad DESTINATION lib) install(TARGETS pps_vad DESTINATION lib)
install(TARGETS glog DESTINATION lib) if(ANDROID)
\ No newline at end of file install(TARGETS extern_glog DESTINATION lib)
else() # UNIX
install(TARGETS glog DESTINATION lib)
endif()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册