diff --git a/paddle/fluid/distributed/test/brpc_service_dense_sgd_test.cc b/paddle/fluid/distributed/test/brpc_service_dense_sgd_test.cc index b87f308aa6beea401ce01d2ab22213377467a558..b7f7df46278d0de12a321d662341d2bba1701f76 100644 --- a/paddle/fluid/distributed/test/brpc_service_dense_sgd_test.cc +++ b/paddle/fluid/distributed/test/brpc_service_dense_sgd_test.cc @@ -51,14 +51,16 @@ void CreateVarsOnScope(framework::Scope* scope, platform::CPUPlace* place) { x_var->GetMutable(); } -void InitTensorsOnClient(framework::Scope* scope, platform::CPUPlace* place, +void InitTensorsOnClient(framework::Scope* scope, + platform::CPUPlace* place, int64_t rows_numel) { CreateVarsOnScope(scope, place); auto x_var = scope->Var("x")->GetMutable(); float* x_ptr = x_var->mutable_data(framework::DDim({1, rows_numel}), *place); - for (int64_t i = 0; i < rows_numel; ++i) x_ptr[i] = 1.0 * (float)i; + for (int64_t i = 0; i < rows_numel; ++i) + x_ptr[i] = 1.0 * static_cast(i); } void GetDownpourDenseTableProto( @@ -142,7 +144,7 @@ void GetDownpourDenseTableProto( /*-------------------------------------------------------------------------*/ -std::string ip_ = "127.0.0.1"; +const char* ip_ = "127.0.0.1"; uint32_t port_ = 4214; std::vector host_sign_list_; @@ -237,7 +239,7 @@ void RunBrpcPushDense() { pull_status.wait(); for (int64_t idx = 0; idx < tensor->numel(); ++idx) { - EXPECT_FLOAT_EQ(w[idx], float(idx)); + EXPECT_FLOAT_EQ(w[idx], static_cast(idx)); } /*-----------------------Test Push Grad----------------------------------*/ @@ -266,7 +268,7 @@ void RunBrpcPushDense() { pull_update_status.wait(); for (int64_t idx = 0; idx < tensor->numel(); ++idx) { - EXPECT_FLOAT_EQ(w[idx], float(idx) - 1.0); + EXPECT_FLOAT_EQ(w[idx], static_cast(idx) - 1.0); } LOG(INFO) << "Run stop_server"; diff --git a/paddle/fluid/inference/experimental/javaapi/native/com_baidu_paddle_inference_Config.h b/paddle/fluid/inference/experimental/javaapi/native/com_baidu_paddle_inference_Config.h index 648174d01dc97c5b7d0786df3c12e6e17a42a91f..6ad2097c881155020d88f06863855f371e412ff0 100644 --- a/paddle/fluid/inference/experimental/javaapi/native/com_baidu_paddle_inference_Config.h +++ b/paddle/fluid/inference/experimental/javaapi/native/com_baidu_paddle_inference_Config.h @@ -15,9 +15,8 @@ /* DO NOT EDIT THIS FILE - it is machine generated */ #include /* Header for class com_baidu_paddle_inference_Config */ +#pragma once -#ifndef PADDLE_FLUID_INFERENCE_JAVAAPI_NATIVE_COM_BAIDU_PADDLE_INFERENCE_CONFIG_H_ -#define PADDLE_FLUID_INFERENCE_JAVAAPI_NATIVE_COM_BAIDU_PADDLE_INFERENCE_CONFIG_H_ #ifdef __cplusplus extern "C" { #endif @@ -43,7 +42,8 @@ Java_com_baidu_paddle_inference_Config_createCppConfig(JNIEnv *, jobject); * Signature: (J)Z */ JNIEXPORT jboolean JNICALL -Java_com_baidu_paddle_inference_Config_isCppConfigValid(JNIEnv *, jobject, +Java_com_baidu_paddle_inference_Config_isCppConfigValid(JNIEnv *, + jobject, jlong); /* @@ -110,7 +110,8 @@ Java_com_baidu_paddle_inference_Config_paramsFile(JNIEnv *, jobject, jlong); JNIEXPORT void JNICALL Java_com_baidu_paddle_inference_Config_setCpuMathLibraryNumThreads(JNIEnv *, jobject, - jlong, jint); + jlong, + jint); /* * Class: com_baidu_paddle_inference_Config @@ -119,7 +120,8 @@ Java_com_baidu_paddle_inference_Config_setCpuMathLibraryNumThreads(JNIEnv *, */ JNIEXPORT jint JNICALL Java_com_baidu_paddle_inference_Config_cpuMathLibraryNumThreads(JNIEnv *, - jobject, jlong); + jobject, + jlong); /* * Class: com_baidu_paddle_inference_Config @@ -143,7 +145,8 @@ Java_com_baidu_paddle_inference_Config_mkldnnEnabled(JNIEnv *, jobject, jlong); * Signature: (J)V */ JNIEXPORT void JNICALL -Java_com_baidu_paddle_inference_Config_enableMkldnnBfloat16(JNIEnv *, jobject, +Java_com_baidu_paddle_inference_Config_enableMkldnnBfloat16(JNIEnv *, + jobject, jlong); /* @@ -152,7 +155,8 @@ Java_com_baidu_paddle_inference_Config_enableMkldnnBfloat16(JNIEnv *, jobject, * Signature: (J)Z */ JNIEXPORT jboolean JNICALL -Java_com_baidu_paddle_inference_Config_mkldnnBfloat16Enabled(JNIEnv *, jobject, +Java_com_baidu_paddle_inference_Config_mkldnnBfloat16Enabled(JNIEnv *, + jobject, jlong); /* @@ -193,7 +197,8 @@ Java_com_baidu_paddle_inference_Config_gpuDeviceId(JNIEnv *, jobject, jlong); * Signature: (J)I */ JNIEXPORT jint JNICALL -Java_com_baidu_paddle_inference_Config_memoryPoolInitSizeMb(JNIEnv *, jobject, +Java_com_baidu_paddle_inference_Config_memoryPoolInitSizeMb(JNIEnv *, + jobject, jlong); /* @@ -244,7 +249,8 @@ JNIEXPORT void JNICALL Java_com_baidu_paddle_inference_Config_enableMemoryOptim( * Signature: (J)Z */ JNIEXPORT jboolean JNICALL -Java_com_baidu_paddle_inference_Config_memoryOptimEnabled(JNIEnv *, jobject, +Java_com_baidu_paddle_inference_Config_memoryOptimEnabled(JNIEnv *, + jobject, jlong); /* @@ -282,4 +288,3 @@ Java_com_baidu_paddle_inference_Config_summary(JNIEnv *, jobject, jlong); #ifdef __cplusplus } #endif -#endif // PADDLE_FLUID_INFERENCE_JAVAAPI_NATIVE_COM_BAIDU_PADDLE_INFERENCE_CONFIG_H_ diff --git a/paddle/fluid/inference/experimental/javaapi/native/com_baidu_paddle_inference_Predictor.h b/paddle/fluid/inference/experimental/javaapi/native/com_baidu_paddle_inference_Predictor.h index fa38bba0f11415317417817a7d4d5b0dfa008225..da44efc8b8dcb203663286320358e9e432888a18 100644 --- a/paddle/fluid/inference/experimental/javaapi/native/com_baidu_paddle_inference_Predictor.h +++ b/paddle/fluid/inference/experimental/javaapi/native/com_baidu_paddle_inference_Predictor.h @@ -16,8 +16,7 @@ #include /* Header for class com_baidu_paddle_inference_Predictor */ -#ifndef PADDLE_FLUID_INFERENCE_JAVAAPI_NATIVE_COM_BAIDU_PADDLE_INFERENCE_PREDICTOR_H_ -#define PADDLE_FLUID_INFERENCE_JAVAAPI_NATIVE_COM_BAIDU_PADDLE_INFERENCE_PREDICTOR_H_ +#pragma once #ifdef __cplusplus extern "C" { #endif @@ -27,7 +26,8 @@ extern "C" { * Signature: (J)V */ JNIEXPORT void JNICALL -Java_com_baidu_paddle_inference_Predictor_cppPredictorDestroy(JNIEnv *, jobject, +Java_com_baidu_paddle_inference_Predictor_cppPredictorDestroy(JNIEnv *, + jobject, jlong); /* @@ -55,7 +55,8 @@ Java_com_baidu_paddle_inference_Predictor_predictorClearIntermediateTensor( * Signature: (J)J */ JNIEXPORT jlong JNICALL -Java_com_baidu_paddle_inference_Predictor_createPredictor(JNIEnv *, jobject, +Java_com_baidu_paddle_inference_Predictor_createPredictor(JNIEnv *, + jobject, jlong); /* @@ -80,8 +81,10 @@ JNIEXPORT jlong JNICALL Java_com_baidu_paddle_inference_Predictor_getOutputNum( * Signature: (JJ)Ljava/lang/String; */ JNIEXPORT jstring JNICALL -Java_com_baidu_paddle_inference_Predictor_getInputNameByIndex(JNIEnv *, jobject, - jlong, jlong); +Java_com_baidu_paddle_inference_Predictor_getInputNameByIndex(JNIEnv *, + jobject, + jlong, + jlong); /* * Class: com_baidu_paddle_inference_Predictor @@ -90,7 +93,8 @@ Java_com_baidu_paddle_inference_Predictor_getInputNameByIndex(JNIEnv *, jobject, */ JNIEXPORT jstring JNICALL Java_com_baidu_paddle_inference_Predictor_getOutputNameByIndex(JNIEnv *, - jobject, jlong, + jobject, + jlong, jlong); /* @@ -100,7 +104,8 @@ Java_com_baidu_paddle_inference_Predictor_getOutputNameByIndex(JNIEnv *, */ JNIEXPORT jlong JNICALL Java_com_baidu_paddle_inference_Predictor_getInputHandleByName(JNIEnv *, - jobject, jlong, + jobject, + jlong, jstring); /* @@ -110,7 +115,8 @@ Java_com_baidu_paddle_inference_Predictor_getInputHandleByName(JNIEnv *, */ JNIEXPORT jlong JNICALL Java_com_baidu_paddle_inference_Predictor_getOutputHandleByName(JNIEnv *, - jobject, jlong, + jobject, + jlong, jstring); /* @@ -124,4 +130,3 @@ Java_com_baidu_paddle_inference_Predictor_runPD(JNIEnv *, jobject, jlong); #ifdef __cplusplus } #endif -#endif // PADDLE_FLUID_INFERENCE_JAVAAPI_NATIVE_COM_BAIDU_PADDLE_INFERENCE_PREDICTOR_H_ diff --git a/paddle/fluid/inference/experimental/javaapi/native/com_baidu_paddle_inference_Tensor.h b/paddle/fluid/inference/experimental/javaapi/native/com_baidu_paddle_inference_Tensor.h index 632bff067fe377c52a1232ff33262b8239828000..6ddd7b7298197f4a7e225243a77d65980614076c 100644 --- a/paddle/fluid/inference/experimental/javaapi/native/com_baidu_paddle_inference_Tensor.h +++ b/paddle/fluid/inference/experimental/javaapi/native/com_baidu_paddle_inference_Tensor.h @@ -16,8 +16,7 @@ #include /* Header for class com_baidu_paddle_inference_Tensor */ -#ifndef PADDLE_FLUID_INFERENCE_JAVAAPI_NATIVE_COM_BAIDU_PADDLE_INFERENCE_TENSOR_H_ -#define PADDLE_FLUID_INFERENCE_JAVAAPI_NATIVE_COM_BAIDU_PADDLE_INFERENCE_TENSOR_H_ +#pragma once #ifdef __cplusplus extern "C" { #endif @@ -43,7 +42,8 @@ JNIEXPORT void JNICALL Java_com_baidu_paddle_inference_Tensor_cppTensorReshape( * Signature: (J)[I */ JNIEXPORT jintArray JNICALL -Java_com_baidu_paddle_inference_Tensor_cppTensorGetShape(JNIEnv *, jobject, +Java_com_baidu_paddle_inference_Tensor_cppTensorGetShape(JNIEnv *, + jobject, jlong); /* @@ -52,7 +52,8 @@ Java_com_baidu_paddle_inference_Tensor_cppTensorGetShape(JNIEnv *, jobject, * Signature: (J)Ljava/lang/String; */ JNIEXPORT jstring JNICALL -Java_com_baidu_paddle_inference_Tensor_cppTensorGetName(JNIEnv *, jobject, +Java_com_baidu_paddle_inference_Tensor_cppTensorGetName(JNIEnv *, + jobject, jlong); /* @@ -62,7 +63,8 @@ Java_com_baidu_paddle_inference_Tensor_cppTensorGetName(JNIEnv *, jobject, */ JNIEXPORT void JNICALL Java_com_baidu_paddle_inference_Tensor_cppTensorCopyFromCpuFloat(JNIEnv *, - jobject, jlong, + jobject, + jlong, jfloatArray); /* @@ -72,7 +74,8 @@ Java_com_baidu_paddle_inference_Tensor_cppTensorCopyFromCpuFloat(JNIEnv *, */ JNIEXPORT void JNICALL Java_com_baidu_paddle_inference_Tensor_cppTensorCopyFromCpuInt(JNIEnv *, - jobject, jlong, + jobject, + jlong, jintArray); /* @@ -82,7 +85,8 @@ Java_com_baidu_paddle_inference_Tensor_cppTensorCopyFromCpuInt(JNIEnv *, */ JNIEXPORT void JNICALL Java_com_baidu_paddle_inference_Tensor_cppTensorCopyFromCpuLong(JNIEnv *, - jobject, jlong, + jobject, + jlong, jlongArray); /* @@ -92,7 +96,8 @@ Java_com_baidu_paddle_inference_Tensor_cppTensorCopyFromCpuLong(JNIEnv *, */ JNIEXPORT void JNICALL Java_com_baidu_paddle_inference_Tensor_cppTensorCopyFromCpuByte(JNIEnv *, - jobject, jlong, + jobject, + jlong, jbyteArray); /* @@ -111,7 +116,8 @@ Java_com_baidu_paddle_inference_Tensor_cppTensorCopyFromCpuBoolean( */ JNIEXPORT void JNICALL Java_com_baidu_paddle_inference_Tensor_cppTensorCopyToCpuFloat(JNIEnv *, - jobject, jlong, + jobject, + jlong, jfloatArray); /* @@ -120,8 +126,10 @@ Java_com_baidu_paddle_inference_Tensor_cppTensorCopyToCpuFloat(JNIEnv *, * Signature: (J[I)V */ JNIEXPORT void JNICALL -Java_com_baidu_paddle_inference_Tensor_cppTensorCopyToCpuInt(JNIEnv *, jobject, - jlong, jintArray); +Java_com_baidu_paddle_inference_Tensor_cppTensorCopyToCpuInt(JNIEnv *, + jobject, + jlong, + jintArray); /* * Class: com_baidu_paddle_inference_Tensor @@ -129,7 +137,8 @@ Java_com_baidu_paddle_inference_Tensor_cppTensorCopyToCpuInt(JNIEnv *, jobject, * Signature: (J[J)V */ JNIEXPORT void JNICALL -Java_com_baidu_paddle_inference_Tensor_cppTensorCopyToCpuLong(JNIEnv *, jobject, +Java_com_baidu_paddle_inference_Tensor_cppTensorCopyToCpuLong(JNIEnv *, + jobject, jlong, jlongArray); @@ -139,7 +148,8 @@ Java_com_baidu_paddle_inference_Tensor_cppTensorCopyToCpuLong(JNIEnv *, jobject, * Signature: (J[B)V */ JNIEXPORT void JNICALL -Java_com_baidu_paddle_inference_Tensor_cppTensorCopyToCpuByte(JNIEnv *, jobject, +Java_com_baidu_paddle_inference_Tensor_cppTensorCopyToCpuByte(JNIEnv *, + jobject, jlong, jbyteArray); @@ -150,10 +160,10 @@ Java_com_baidu_paddle_inference_Tensor_cppTensorCopyToCpuByte(JNIEnv *, jobject, */ JNIEXPORT void JNICALL Java_com_baidu_paddle_inference_Tensor_cppTensorCopyToCpuBoolean(JNIEnv *, - jobject, jlong, + jobject, + jlong, jbooleanArray); #ifdef __cplusplus } #endif -#endif // PADDLE_FLUID_INFERENCE_JAVAAPI_NATIVE_COM_BAIDU_PADDLE_INFERENCE_TENSOR_H_