From 2e79f81c1ef708cba5d5fa280368910229ce802e Mon Sep 17 00:00:00 2001 From: wangzhen38 <41941775+wangzhen38@users.noreply.github.com> Date: Wed, 22 Jun 2022 14:16:42 +0800 Subject: [PATCH] =?UTF-8?q?=E3=80=90code=20format=E3=80=91fix=20cpplint=20?= =?UTF-8?q?style=206=20(#43740)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * fix cpplint style 6 * fix cpplint style 6 --- .../test/brpc_service_dense_sgd_test.cc | 12 +++--- .../com_baidu_paddle_inference_Config.h | 25 +++++++----- .../com_baidu_paddle_inference_Predictor.h | 25 +++++++----- .../com_baidu_paddle_inference_Tensor.h | 40 ++++++++++++------- 4 files changed, 62 insertions(+), 40 deletions(-) diff --git a/paddle/fluid/distributed/test/brpc_service_dense_sgd_test.cc b/paddle/fluid/distributed/test/brpc_service_dense_sgd_test.cc index b87f308aa6..b7f7df4627 100644 --- a/paddle/fluid/distributed/test/brpc_service_dense_sgd_test.cc +++ b/paddle/fluid/distributed/test/brpc_service_dense_sgd_test.cc @@ -51,14 +51,16 @@ void CreateVarsOnScope(framework::Scope* scope, platform::CPUPlace* place) { x_var->GetMutable(); } -void InitTensorsOnClient(framework::Scope* scope, platform::CPUPlace* place, +void InitTensorsOnClient(framework::Scope* scope, + platform::CPUPlace* place, int64_t rows_numel) { CreateVarsOnScope(scope, place); auto x_var = scope->Var("x")->GetMutable(); float* x_ptr = x_var->mutable_data(framework::DDim({1, rows_numel}), *place); - for (int64_t i = 0; i < rows_numel; ++i) x_ptr[i] = 1.0 * (float)i; + for (int64_t i = 0; i < rows_numel; ++i) + x_ptr[i] = 1.0 * static_cast(i); } void GetDownpourDenseTableProto( @@ -142,7 +144,7 @@ void GetDownpourDenseTableProto( /*-------------------------------------------------------------------------*/ -std::string ip_ = "127.0.0.1"; +const char* ip_ = "127.0.0.1"; uint32_t port_ = 4214; std::vector host_sign_list_; @@ -237,7 +239,7 @@ void RunBrpcPushDense() { pull_status.wait(); for (int64_t idx = 0; idx < tensor->numel(); ++idx) { - EXPECT_FLOAT_EQ(w[idx], float(idx)); + EXPECT_FLOAT_EQ(w[idx], static_cast(idx)); } /*-----------------------Test Push Grad----------------------------------*/ @@ -266,7 +268,7 @@ void RunBrpcPushDense() { pull_update_status.wait(); for (int64_t idx = 0; idx < tensor->numel(); ++idx) { - EXPECT_FLOAT_EQ(w[idx], float(idx) - 1.0); + EXPECT_FLOAT_EQ(w[idx], static_cast(idx) - 1.0); } LOG(INFO) << "Run stop_server"; diff --git a/paddle/fluid/inference/experimental/javaapi/native/com_baidu_paddle_inference_Config.h b/paddle/fluid/inference/experimental/javaapi/native/com_baidu_paddle_inference_Config.h index 648174d01d..6ad2097c88 100644 --- a/paddle/fluid/inference/experimental/javaapi/native/com_baidu_paddle_inference_Config.h +++ b/paddle/fluid/inference/experimental/javaapi/native/com_baidu_paddle_inference_Config.h @@ -15,9 +15,8 @@ /* DO NOT EDIT THIS FILE - it is machine generated */ #include /* Header for class com_baidu_paddle_inference_Config */ +#pragma once -#ifndef PADDLE_FLUID_INFERENCE_JAVAAPI_NATIVE_COM_BAIDU_PADDLE_INFERENCE_CONFIG_H_ -#define PADDLE_FLUID_INFERENCE_JAVAAPI_NATIVE_COM_BAIDU_PADDLE_INFERENCE_CONFIG_H_ #ifdef __cplusplus extern "C" { #endif @@ -43,7 +42,8 @@ Java_com_baidu_paddle_inference_Config_createCppConfig(JNIEnv *, jobject); * Signature: (J)Z */ JNIEXPORT jboolean JNICALL -Java_com_baidu_paddle_inference_Config_isCppConfigValid(JNIEnv *, jobject, +Java_com_baidu_paddle_inference_Config_isCppConfigValid(JNIEnv *, + jobject, jlong); /* @@ -110,7 +110,8 @@ Java_com_baidu_paddle_inference_Config_paramsFile(JNIEnv *, jobject, jlong); JNIEXPORT void JNICALL Java_com_baidu_paddle_inference_Config_setCpuMathLibraryNumThreads(JNIEnv *, jobject, - jlong, jint); + jlong, + jint); /* * Class: com_baidu_paddle_inference_Config @@ -119,7 +120,8 @@ Java_com_baidu_paddle_inference_Config_setCpuMathLibraryNumThreads(JNIEnv *, */ JNIEXPORT jint JNICALL Java_com_baidu_paddle_inference_Config_cpuMathLibraryNumThreads(JNIEnv *, - jobject, jlong); + jobject, + jlong); /* * Class: com_baidu_paddle_inference_Config @@ -143,7 +145,8 @@ Java_com_baidu_paddle_inference_Config_mkldnnEnabled(JNIEnv *, jobject, jlong); * Signature: (J)V */ JNIEXPORT void JNICALL -Java_com_baidu_paddle_inference_Config_enableMkldnnBfloat16(JNIEnv *, jobject, +Java_com_baidu_paddle_inference_Config_enableMkldnnBfloat16(JNIEnv *, + jobject, jlong); /* @@ -152,7 +155,8 @@ Java_com_baidu_paddle_inference_Config_enableMkldnnBfloat16(JNIEnv *, jobject, * Signature: (J)Z */ JNIEXPORT jboolean JNICALL -Java_com_baidu_paddle_inference_Config_mkldnnBfloat16Enabled(JNIEnv *, jobject, +Java_com_baidu_paddle_inference_Config_mkldnnBfloat16Enabled(JNIEnv *, + jobject, jlong); /* @@ -193,7 +197,8 @@ Java_com_baidu_paddle_inference_Config_gpuDeviceId(JNIEnv *, jobject, jlong); * Signature: (J)I */ JNIEXPORT jint JNICALL -Java_com_baidu_paddle_inference_Config_memoryPoolInitSizeMb(JNIEnv *, jobject, +Java_com_baidu_paddle_inference_Config_memoryPoolInitSizeMb(JNIEnv *, + jobject, jlong); /* @@ -244,7 +249,8 @@ JNIEXPORT void JNICALL Java_com_baidu_paddle_inference_Config_enableMemoryOptim( * Signature: (J)Z */ JNIEXPORT jboolean JNICALL -Java_com_baidu_paddle_inference_Config_memoryOptimEnabled(JNIEnv *, jobject, +Java_com_baidu_paddle_inference_Config_memoryOptimEnabled(JNIEnv *, + jobject, jlong); /* @@ -282,4 +288,3 @@ Java_com_baidu_paddle_inference_Config_summary(JNIEnv *, jobject, jlong); #ifdef __cplusplus } #endif -#endif // PADDLE_FLUID_INFERENCE_JAVAAPI_NATIVE_COM_BAIDU_PADDLE_INFERENCE_CONFIG_H_ diff --git a/paddle/fluid/inference/experimental/javaapi/native/com_baidu_paddle_inference_Predictor.h b/paddle/fluid/inference/experimental/javaapi/native/com_baidu_paddle_inference_Predictor.h index fa38bba0f1..da44efc8b8 100644 --- a/paddle/fluid/inference/experimental/javaapi/native/com_baidu_paddle_inference_Predictor.h +++ b/paddle/fluid/inference/experimental/javaapi/native/com_baidu_paddle_inference_Predictor.h @@ -16,8 +16,7 @@ #include /* Header for class com_baidu_paddle_inference_Predictor */ -#ifndef PADDLE_FLUID_INFERENCE_JAVAAPI_NATIVE_COM_BAIDU_PADDLE_INFERENCE_PREDICTOR_H_ -#define PADDLE_FLUID_INFERENCE_JAVAAPI_NATIVE_COM_BAIDU_PADDLE_INFERENCE_PREDICTOR_H_ +#pragma once #ifdef __cplusplus extern "C" { #endif @@ -27,7 +26,8 @@ extern "C" { * Signature: (J)V */ JNIEXPORT void JNICALL -Java_com_baidu_paddle_inference_Predictor_cppPredictorDestroy(JNIEnv *, jobject, +Java_com_baidu_paddle_inference_Predictor_cppPredictorDestroy(JNIEnv *, + jobject, jlong); /* @@ -55,7 +55,8 @@ Java_com_baidu_paddle_inference_Predictor_predictorClearIntermediateTensor( * Signature: (J)J */ JNIEXPORT jlong JNICALL -Java_com_baidu_paddle_inference_Predictor_createPredictor(JNIEnv *, jobject, +Java_com_baidu_paddle_inference_Predictor_createPredictor(JNIEnv *, + jobject, jlong); /* @@ -80,8 +81,10 @@ JNIEXPORT jlong JNICALL Java_com_baidu_paddle_inference_Predictor_getOutputNum( * Signature: (JJ)Ljava/lang/String; */ JNIEXPORT jstring JNICALL -Java_com_baidu_paddle_inference_Predictor_getInputNameByIndex(JNIEnv *, jobject, - jlong, jlong); +Java_com_baidu_paddle_inference_Predictor_getInputNameByIndex(JNIEnv *, + jobject, + jlong, + jlong); /* * Class: com_baidu_paddle_inference_Predictor @@ -90,7 +93,8 @@ Java_com_baidu_paddle_inference_Predictor_getInputNameByIndex(JNIEnv *, jobject, */ JNIEXPORT jstring JNICALL Java_com_baidu_paddle_inference_Predictor_getOutputNameByIndex(JNIEnv *, - jobject, jlong, + jobject, + jlong, jlong); /* @@ -100,7 +104,8 @@ Java_com_baidu_paddle_inference_Predictor_getOutputNameByIndex(JNIEnv *, */ JNIEXPORT jlong JNICALL Java_com_baidu_paddle_inference_Predictor_getInputHandleByName(JNIEnv *, - jobject, jlong, + jobject, + jlong, jstring); /* @@ -110,7 +115,8 @@ Java_com_baidu_paddle_inference_Predictor_getInputHandleByName(JNIEnv *, */ JNIEXPORT jlong JNICALL Java_com_baidu_paddle_inference_Predictor_getOutputHandleByName(JNIEnv *, - jobject, jlong, + jobject, + jlong, jstring); /* @@ -124,4 +130,3 @@ Java_com_baidu_paddle_inference_Predictor_runPD(JNIEnv *, jobject, jlong); #ifdef __cplusplus } #endif -#endif // PADDLE_FLUID_INFERENCE_JAVAAPI_NATIVE_COM_BAIDU_PADDLE_INFERENCE_PREDICTOR_H_ diff --git a/paddle/fluid/inference/experimental/javaapi/native/com_baidu_paddle_inference_Tensor.h b/paddle/fluid/inference/experimental/javaapi/native/com_baidu_paddle_inference_Tensor.h index 632bff067f..6ddd7b7298 100644 --- a/paddle/fluid/inference/experimental/javaapi/native/com_baidu_paddle_inference_Tensor.h +++ b/paddle/fluid/inference/experimental/javaapi/native/com_baidu_paddle_inference_Tensor.h @@ -16,8 +16,7 @@ #include /* Header for class com_baidu_paddle_inference_Tensor */ -#ifndef PADDLE_FLUID_INFERENCE_JAVAAPI_NATIVE_COM_BAIDU_PADDLE_INFERENCE_TENSOR_H_ -#define PADDLE_FLUID_INFERENCE_JAVAAPI_NATIVE_COM_BAIDU_PADDLE_INFERENCE_TENSOR_H_ +#pragma once #ifdef __cplusplus extern "C" { #endif @@ -43,7 +42,8 @@ JNIEXPORT void JNICALL Java_com_baidu_paddle_inference_Tensor_cppTensorReshape( * Signature: (J)[I */ JNIEXPORT jintArray JNICALL -Java_com_baidu_paddle_inference_Tensor_cppTensorGetShape(JNIEnv *, jobject, +Java_com_baidu_paddle_inference_Tensor_cppTensorGetShape(JNIEnv *, + jobject, jlong); /* @@ -52,7 +52,8 @@ Java_com_baidu_paddle_inference_Tensor_cppTensorGetShape(JNIEnv *, jobject, * Signature: (J)Ljava/lang/String; */ JNIEXPORT jstring JNICALL -Java_com_baidu_paddle_inference_Tensor_cppTensorGetName(JNIEnv *, jobject, +Java_com_baidu_paddle_inference_Tensor_cppTensorGetName(JNIEnv *, + jobject, jlong); /* @@ -62,7 +63,8 @@ Java_com_baidu_paddle_inference_Tensor_cppTensorGetName(JNIEnv *, jobject, */ JNIEXPORT void JNICALL Java_com_baidu_paddle_inference_Tensor_cppTensorCopyFromCpuFloat(JNIEnv *, - jobject, jlong, + jobject, + jlong, jfloatArray); /* @@ -72,7 +74,8 @@ Java_com_baidu_paddle_inference_Tensor_cppTensorCopyFromCpuFloat(JNIEnv *, */ JNIEXPORT void JNICALL Java_com_baidu_paddle_inference_Tensor_cppTensorCopyFromCpuInt(JNIEnv *, - jobject, jlong, + jobject, + jlong, jintArray); /* @@ -82,7 +85,8 @@ Java_com_baidu_paddle_inference_Tensor_cppTensorCopyFromCpuInt(JNIEnv *, */ JNIEXPORT void JNICALL Java_com_baidu_paddle_inference_Tensor_cppTensorCopyFromCpuLong(JNIEnv *, - jobject, jlong, + jobject, + jlong, jlongArray); /* @@ -92,7 +96,8 @@ Java_com_baidu_paddle_inference_Tensor_cppTensorCopyFromCpuLong(JNIEnv *, */ JNIEXPORT void JNICALL Java_com_baidu_paddle_inference_Tensor_cppTensorCopyFromCpuByte(JNIEnv *, - jobject, jlong, + jobject, + jlong, jbyteArray); /* @@ -111,7 +116,8 @@ Java_com_baidu_paddle_inference_Tensor_cppTensorCopyFromCpuBoolean( */ JNIEXPORT void JNICALL Java_com_baidu_paddle_inference_Tensor_cppTensorCopyToCpuFloat(JNIEnv *, - jobject, jlong, + jobject, + jlong, jfloatArray); /* @@ -120,8 +126,10 @@ Java_com_baidu_paddle_inference_Tensor_cppTensorCopyToCpuFloat(JNIEnv *, * Signature: (J[I)V */ JNIEXPORT void JNICALL -Java_com_baidu_paddle_inference_Tensor_cppTensorCopyToCpuInt(JNIEnv *, jobject, - jlong, jintArray); +Java_com_baidu_paddle_inference_Tensor_cppTensorCopyToCpuInt(JNIEnv *, + jobject, + jlong, + jintArray); /* * Class: com_baidu_paddle_inference_Tensor @@ -129,7 +137,8 @@ Java_com_baidu_paddle_inference_Tensor_cppTensorCopyToCpuInt(JNIEnv *, jobject, * Signature: (J[J)V */ JNIEXPORT void JNICALL -Java_com_baidu_paddle_inference_Tensor_cppTensorCopyToCpuLong(JNIEnv *, jobject, +Java_com_baidu_paddle_inference_Tensor_cppTensorCopyToCpuLong(JNIEnv *, + jobject, jlong, jlongArray); @@ -139,7 +148,8 @@ Java_com_baidu_paddle_inference_Tensor_cppTensorCopyToCpuLong(JNIEnv *, jobject, * Signature: (J[B)V */ JNIEXPORT void JNICALL -Java_com_baidu_paddle_inference_Tensor_cppTensorCopyToCpuByte(JNIEnv *, jobject, +Java_com_baidu_paddle_inference_Tensor_cppTensorCopyToCpuByte(JNIEnv *, + jobject, jlong, jbyteArray); @@ -150,10 +160,10 @@ Java_com_baidu_paddle_inference_Tensor_cppTensorCopyToCpuByte(JNIEnv *, jobject, */ JNIEXPORT void JNICALL Java_com_baidu_paddle_inference_Tensor_cppTensorCopyToCpuBoolean(JNIEnv *, - jobject, jlong, + jobject, + jlong, jbooleanArray); #ifdef __cplusplus } #endif -#endif // PADDLE_FLUID_INFERENCE_JAVAAPI_NATIVE_COM_BAIDU_PADDLE_INFERENCE_TENSOR_H_ -- GitLab