提交 ab2ba2fe 编写于 作者: H hjchen2

Fix compile warning, disable openmp for ios ci build

上级 0e9a2fa2
......@@ -175,11 +175,8 @@ struct Print {
friend struct ToLog;
template <typename T>
Print &operator<<(T const &value) {
Print p = Print();
return p;
return *this;
}
private:
};
struct ToLog {
......
......@@ -39,7 +39,7 @@ using framework::Tensor;
using paddle_mobile::CPU;
using std::string;
extern const char *ANDROID_LOG_TAG =
const char *ANDROID_LOG_TAG =
"paddle_mobile LOG built on " __DATE__ " " __TIME__;
paddle_mobile::PaddleMobile<paddle_mobile::CPU> paddle_mobile;
static std::mutex shared_mutex;
......@@ -53,25 +53,25 @@ string jstring2cppstring(JNIEnv *env, jstring jstr) {
return cppstr;
}
JNIEXPORT jboolean JNICALL Java_com_baidu_paddle_PML_load(JNIEnv *env,
jclass thiz,
jstring modelPath) {
JNIEXPORT jboolean JNICALL Java_com_baidu_paddle_PML_load(
JNIEnv *env, jclass thiz, jstring modelPath, jboolean lod_mode = false) {
std::lock_guard<std::mutex> lock(shared_mutex);
ANDROIDLOGI("load invoked");
bool optimize = true;
bool isLoadOk = false;
#ifdef ENABLE_EXCEPTION
try {
isLoadOk = getPaddleMobileInstance()->Load(
jstring2cppstring(env, modelPath), optimize);
jstring2cppstring(env, modelPath), optimize, false, 1,
static_cast<bool>(lod_mode));
} catch (paddle_mobile::PaddleMobileException &e) {
ANDROIDLOGE("jni got an PaddleMobileException! ", e.what());
isLoadOk = false;
}
#else
isLoadOk = getPaddleMobileInstance()->Load(jstring2cppstring(env, modelPath),
optimize);
optimize, false, 1,
static_cast<bool>(lod_mode));
#endif
return static_cast<jboolean>(isLoadOk);
}
......
......@@ -26,7 +26,8 @@ namespace jni {
*/
JNIEXPORT jboolean JNICALL Java_com_baidu_paddle_PML_load(JNIEnv *env,
jclass thiz,
jstring modelPath);
jstring modelPath,
jboolean lod_mode);
/**
* load separated qualified model for android
......@@ -64,6 +65,9 @@ JNIEXPORT jfloatArray JNICALL Java_com_baidu_paddle_PML_predictYuv(
JNIEXPORT jfloatArray JNICALL
Java_com_baidu_paddle_PML_predict(JNIEnv *env, jclass thiz, jfloatArray buf);
JNIEXPORT jlongArray JNICALL
Java_com_baidu_paddle_PML_predictLod(JNIEnv *env, jclass thiz, jlongArray buf);
/**
* setThreadCount for multithread
*/
......
......@@ -167,7 +167,7 @@ float find_abs_max(const Tensor *input) {
max_abs = vmaxvq_f32(__max);
#endif
for (size_t i = 0; i < remain; ++i) {
max_abs = std::max(max_abs, std::abs(x[i]));
max_abs = std::max(max_abs, fabs(x[i]));
}
return max_abs;
}
......
......@@ -21,6 +21,7 @@ limitations under the License. */
#include "operators/math/gemm.h"
#include "operators/math/math_function.h"
#include "operators/op_param.h"
namespace paddle_mobile {
namespace operators {
......
......@@ -40,8 +40,8 @@ template <>
inline int8_t Round<ROUND_NEAREST_TO_EVEN>(const float &x) {
float v = std::round(x);
int32_t q = static_cast<int32_t>(v);
if (std::abs(std::abs(q - v) - 0.5) <= 0) {
if (std::abs(q) % 2 != 0) {
if (fabs(fabs(q - v) - 0.5) <= 0) {
if (abs(q) % 2 != 0) {
q = q + ((q > 0) ? -1 : 1);
}
}
......
......@@ -146,6 +146,7 @@ function build_ios_armv8_cpu_only() {
-DIOS_PLATFORM=OS \
-DIOS_ARCH="${IOS_ARCH}" \
-DIS_IOS=true \
-DUSE_OPENMP=OFF \
-DGPU_MALI=OFF \
-DGPU_CL=OFF \
-DFPGA=OFF
......@@ -163,6 +164,7 @@ function build_ios_armv8_gpu() {
-DIOS_PLATFORM=OS \
-DIOS_ARCH="${IOS_ARCH}" \
-DIS_IOS=true \
-DUSE_OPENMP=OFF \
-DGPU_MALI=OFF \
-DGPU_CL=ON \
-DFPGA=OFF
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册