未验证 提交 2e79f81c 编写于 作者: W wangzhen38 提交者: GitHub

【code format】fix cpplint style 6 (#43740)

* fix cpplint style 6

* fix cpplint style 6
上级 fcc8a87b
...@@ -51,14 +51,16 @@ void CreateVarsOnScope(framework::Scope* scope, platform::CPUPlace* place) { ...@@ -51,14 +51,16 @@ void CreateVarsOnScope(framework::Scope* scope, platform::CPUPlace* place) {
x_var->GetMutable<framework::LoDTensor>(); x_var->GetMutable<framework::LoDTensor>();
} }
void InitTensorsOnClient(framework::Scope* scope, platform::CPUPlace* place, void InitTensorsOnClient(framework::Scope* scope,
platform::CPUPlace* place,
int64_t rows_numel) { int64_t rows_numel) {
CreateVarsOnScope(scope, place); CreateVarsOnScope(scope, place);
auto x_var = scope->Var("x")->GetMutable<framework::LoDTensor>(); auto x_var = scope->Var("x")->GetMutable<framework::LoDTensor>();
float* x_ptr = float* x_ptr =
x_var->mutable_data<float>(framework::DDim({1, rows_numel}), *place); x_var->mutable_data<float>(framework::DDim({1, rows_numel}), *place);
for (int64_t i = 0; i < rows_numel; ++i) x_ptr[i] = 1.0 * (float)i; for (int64_t i = 0; i < rows_numel; ++i)
x_ptr[i] = 1.0 * static_cast<float>(i);
} }
void GetDownpourDenseTableProto( void GetDownpourDenseTableProto(
...@@ -142,7 +144,7 @@ void GetDownpourDenseTableProto( ...@@ -142,7 +144,7 @@ void GetDownpourDenseTableProto(
/*-------------------------------------------------------------------------*/ /*-------------------------------------------------------------------------*/
std::string ip_ = "127.0.0.1"; const char* ip_ = "127.0.0.1";
uint32_t port_ = 4214; uint32_t port_ = 4214;
std::vector<std::string> host_sign_list_; std::vector<std::string> host_sign_list_;
...@@ -237,7 +239,7 @@ void RunBrpcPushDense() { ...@@ -237,7 +239,7 @@ void RunBrpcPushDense() {
pull_status.wait(); pull_status.wait();
for (int64_t idx = 0; idx < tensor->numel(); ++idx) { for (int64_t idx = 0; idx < tensor->numel(); ++idx) {
EXPECT_FLOAT_EQ(w[idx], float(idx)); EXPECT_FLOAT_EQ(w[idx], static_cast<float>(idx));
} }
/*-----------------------Test Push Grad----------------------------------*/ /*-----------------------Test Push Grad----------------------------------*/
...@@ -266,7 +268,7 @@ void RunBrpcPushDense() { ...@@ -266,7 +268,7 @@ void RunBrpcPushDense() {
pull_update_status.wait(); pull_update_status.wait();
for (int64_t idx = 0; idx < tensor->numel(); ++idx) { for (int64_t idx = 0; idx < tensor->numel(); ++idx) {
EXPECT_FLOAT_EQ(w[idx], float(idx) - 1.0); EXPECT_FLOAT_EQ(w[idx], static_cast<float>(idx) - 1.0);
} }
LOG(INFO) << "Run stop_server"; LOG(INFO) << "Run stop_server";
......
...@@ -15,9 +15,8 @@ ...@@ -15,9 +15,8 @@
/* DO NOT EDIT THIS FILE - it is machine generated */ /* DO NOT EDIT THIS FILE - it is machine generated */
#include <jni.h> #include <jni.h>
/* Header for class com_baidu_paddle_inference_Config */ /* Header for class com_baidu_paddle_inference_Config */
#pragma once
#ifndef PADDLE_FLUID_INFERENCE_JAVAAPI_NATIVE_COM_BAIDU_PADDLE_INFERENCE_CONFIG_H_
#define PADDLE_FLUID_INFERENCE_JAVAAPI_NATIVE_COM_BAIDU_PADDLE_INFERENCE_CONFIG_H_
#ifdef __cplusplus #ifdef __cplusplus
extern "C" { extern "C" {
#endif #endif
...@@ -43,7 +42,8 @@ Java_com_baidu_paddle_inference_Config_createCppConfig(JNIEnv *, jobject); ...@@ -43,7 +42,8 @@ Java_com_baidu_paddle_inference_Config_createCppConfig(JNIEnv *, jobject);
* Signature: (J)Z * Signature: (J)Z
*/ */
JNIEXPORT jboolean JNICALL JNIEXPORT jboolean JNICALL
Java_com_baidu_paddle_inference_Config_isCppConfigValid(JNIEnv *, jobject, Java_com_baidu_paddle_inference_Config_isCppConfigValid(JNIEnv *,
jobject,
jlong); jlong);
/* /*
...@@ -110,7 +110,8 @@ Java_com_baidu_paddle_inference_Config_paramsFile(JNIEnv *, jobject, jlong); ...@@ -110,7 +110,8 @@ Java_com_baidu_paddle_inference_Config_paramsFile(JNIEnv *, jobject, jlong);
JNIEXPORT void JNICALL JNIEXPORT void JNICALL
Java_com_baidu_paddle_inference_Config_setCpuMathLibraryNumThreads(JNIEnv *, Java_com_baidu_paddle_inference_Config_setCpuMathLibraryNumThreads(JNIEnv *,
jobject, jobject,
jlong, jint); jlong,
jint);
/* /*
* Class: com_baidu_paddle_inference_Config * Class: com_baidu_paddle_inference_Config
...@@ -119,7 +120,8 @@ Java_com_baidu_paddle_inference_Config_setCpuMathLibraryNumThreads(JNIEnv *, ...@@ -119,7 +120,8 @@ Java_com_baidu_paddle_inference_Config_setCpuMathLibraryNumThreads(JNIEnv *,
*/ */
JNIEXPORT jint JNICALL JNIEXPORT jint JNICALL
Java_com_baidu_paddle_inference_Config_cpuMathLibraryNumThreads(JNIEnv *, Java_com_baidu_paddle_inference_Config_cpuMathLibraryNumThreads(JNIEnv *,
jobject, jlong); jobject,
jlong);
/* /*
* Class: com_baidu_paddle_inference_Config * Class: com_baidu_paddle_inference_Config
...@@ -143,7 +145,8 @@ Java_com_baidu_paddle_inference_Config_mkldnnEnabled(JNIEnv *, jobject, jlong); ...@@ -143,7 +145,8 @@ Java_com_baidu_paddle_inference_Config_mkldnnEnabled(JNIEnv *, jobject, jlong);
* Signature: (J)V * Signature: (J)V
*/ */
JNIEXPORT void JNICALL JNIEXPORT void JNICALL
Java_com_baidu_paddle_inference_Config_enableMkldnnBfloat16(JNIEnv *, jobject, Java_com_baidu_paddle_inference_Config_enableMkldnnBfloat16(JNIEnv *,
jobject,
jlong); jlong);
/* /*
...@@ -152,7 +155,8 @@ Java_com_baidu_paddle_inference_Config_enableMkldnnBfloat16(JNIEnv *, jobject, ...@@ -152,7 +155,8 @@ Java_com_baidu_paddle_inference_Config_enableMkldnnBfloat16(JNIEnv *, jobject,
* Signature: (J)Z * Signature: (J)Z
*/ */
JNIEXPORT jboolean JNICALL JNIEXPORT jboolean JNICALL
Java_com_baidu_paddle_inference_Config_mkldnnBfloat16Enabled(JNIEnv *, jobject, Java_com_baidu_paddle_inference_Config_mkldnnBfloat16Enabled(JNIEnv *,
jobject,
jlong); jlong);
/* /*
...@@ -193,7 +197,8 @@ Java_com_baidu_paddle_inference_Config_gpuDeviceId(JNIEnv *, jobject, jlong); ...@@ -193,7 +197,8 @@ Java_com_baidu_paddle_inference_Config_gpuDeviceId(JNIEnv *, jobject, jlong);
* Signature: (J)I * Signature: (J)I
*/ */
JNIEXPORT jint JNICALL JNIEXPORT jint JNICALL
Java_com_baidu_paddle_inference_Config_memoryPoolInitSizeMb(JNIEnv *, jobject, Java_com_baidu_paddle_inference_Config_memoryPoolInitSizeMb(JNIEnv *,
jobject,
jlong); jlong);
/* /*
...@@ -244,7 +249,8 @@ JNIEXPORT void JNICALL Java_com_baidu_paddle_inference_Config_enableMemoryOptim( ...@@ -244,7 +249,8 @@ JNIEXPORT void JNICALL Java_com_baidu_paddle_inference_Config_enableMemoryOptim(
* Signature: (J)Z * Signature: (J)Z
*/ */
JNIEXPORT jboolean JNICALL JNIEXPORT jboolean JNICALL
Java_com_baidu_paddle_inference_Config_memoryOptimEnabled(JNIEnv *, jobject, Java_com_baidu_paddle_inference_Config_memoryOptimEnabled(JNIEnv *,
jobject,
jlong); jlong);
/* /*
...@@ -282,4 +288,3 @@ Java_com_baidu_paddle_inference_Config_summary(JNIEnv *, jobject, jlong); ...@@ -282,4 +288,3 @@ Java_com_baidu_paddle_inference_Config_summary(JNIEnv *, jobject, jlong);
#ifdef __cplusplus #ifdef __cplusplus
} }
#endif #endif
#endif // PADDLE_FLUID_INFERENCE_JAVAAPI_NATIVE_COM_BAIDU_PADDLE_INFERENCE_CONFIG_H_
...@@ -16,8 +16,7 @@ ...@@ -16,8 +16,7 @@
#include <jni.h> #include <jni.h>
/* Header for class com_baidu_paddle_inference_Predictor */ /* Header for class com_baidu_paddle_inference_Predictor */
#ifndef PADDLE_FLUID_INFERENCE_JAVAAPI_NATIVE_COM_BAIDU_PADDLE_INFERENCE_PREDICTOR_H_ #pragma once
#define PADDLE_FLUID_INFERENCE_JAVAAPI_NATIVE_COM_BAIDU_PADDLE_INFERENCE_PREDICTOR_H_
#ifdef __cplusplus #ifdef __cplusplus
extern "C" { extern "C" {
#endif #endif
...@@ -27,7 +26,8 @@ extern "C" { ...@@ -27,7 +26,8 @@ extern "C" {
* Signature: (J)V * Signature: (J)V
*/ */
JNIEXPORT void JNICALL JNIEXPORT void JNICALL
Java_com_baidu_paddle_inference_Predictor_cppPredictorDestroy(JNIEnv *, jobject, Java_com_baidu_paddle_inference_Predictor_cppPredictorDestroy(JNIEnv *,
jobject,
jlong); jlong);
/* /*
...@@ -55,7 +55,8 @@ Java_com_baidu_paddle_inference_Predictor_predictorClearIntermediateTensor( ...@@ -55,7 +55,8 @@ Java_com_baidu_paddle_inference_Predictor_predictorClearIntermediateTensor(
* Signature: (J)J * Signature: (J)J
*/ */
JNIEXPORT jlong JNICALL JNIEXPORT jlong JNICALL
Java_com_baidu_paddle_inference_Predictor_createPredictor(JNIEnv *, jobject, Java_com_baidu_paddle_inference_Predictor_createPredictor(JNIEnv *,
jobject,
jlong); jlong);
/* /*
...@@ -80,8 +81,10 @@ JNIEXPORT jlong JNICALL Java_com_baidu_paddle_inference_Predictor_getOutputNum( ...@@ -80,8 +81,10 @@ JNIEXPORT jlong JNICALL Java_com_baidu_paddle_inference_Predictor_getOutputNum(
* Signature: (JJ)Ljava/lang/String; * Signature: (JJ)Ljava/lang/String;
*/ */
JNIEXPORT jstring JNICALL JNIEXPORT jstring JNICALL
Java_com_baidu_paddle_inference_Predictor_getInputNameByIndex(JNIEnv *, jobject, Java_com_baidu_paddle_inference_Predictor_getInputNameByIndex(JNIEnv *,
jlong, jlong); jobject,
jlong,
jlong);
/* /*
* Class: com_baidu_paddle_inference_Predictor * Class: com_baidu_paddle_inference_Predictor
...@@ -90,7 +93,8 @@ Java_com_baidu_paddle_inference_Predictor_getInputNameByIndex(JNIEnv *, jobject, ...@@ -90,7 +93,8 @@ Java_com_baidu_paddle_inference_Predictor_getInputNameByIndex(JNIEnv *, jobject,
*/ */
JNIEXPORT jstring JNICALL JNIEXPORT jstring JNICALL
Java_com_baidu_paddle_inference_Predictor_getOutputNameByIndex(JNIEnv *, Java_com_baidu_paddle_inference_Predictor_getOutputNameByIndex(JNIEnv *,
jobject, jlong, jobject,
jlong,
jlong); jlong);
/* /*
...@@ -100,7 +104,8 @@ Java_com_baidu_paddle_inference_Predictor_getOutputNameByIndex(JNIEnv *, ...@@ -100,7 +104,8 @@ Java_com_baidu_paddle_inference_Predictor_getOutputNameByIndex(JNIEnv *,
*/ */
JNIEXPORT jlong JNICALL JNIEXPORT jlong JNICALL
Java_com_baidu_paddle_inference_Predictor_getInputHandleByName(JNIEnv *, Java_com_baidu_paddle_inference_Predictor_getInputHandleByName(JNIEnv *,
jobject, jlong, jobject,
jlong,
jstring); jstring);
/* /*
...@@ -110,7 +115,8 @@ Java_com_baidu_paddle_inference_Predictor_getInputHandleByName(JNIEnv *, ...@@ -110,7 +115,8 @@ Java_com_baidu_paddle_inference_Predictor_getInputHandleByName(JNIEnv *,
*/ */
JNIEXPORT jlong JNICALL JNIEXPORT jlong JNICALL
Java_com_baidu_paddle_inference_Predictor_getOutputHandleByName(JNIEnv *, Java_com_baidu_paddle_inference_Predictor_getOutputHandleByName(JNIEnv *,
jobject, jlong, jobject,
jlong,
jstring); jstring);
/* /*
...@@ -124,4 +130,3 @@ Java_com_baidu_paddle_inference_Predictor_runPD(JNIEnv *, jobject, jlong); ...@@ -124,4 +130,3 @@ Java_com_baidu_paddle_inference_Predictor_runPD(JNIEnv *, jobject, jlong);
#ifdef __cplusplus #ifdef __cplusplus
} }
#endif #endif
#endif // PADDLE_FLUID_INFERENCE_JAVAAPI_NATIVE_COM_BAIDU_PADDLE_INFERENCE_PREDICTOR_H_
...@@ -16,8 +16,7 @@ ...@@ -16,8 +16,7 @@
#include <jni.h> #include <jni.h>
/* Header for class com_baidu_paddle_inference_Tensor */ /* Header for class com_baidu_paddle_inference_Tensor */
#ifndef PADDLE_FLUID_INFERENCE_JAVAAPI_NATIVE_COM_BAIDU_PADDLE_INFERENCE_TENSOR_H_ #pragma once
#define PADDLE_FLUID_INFERENCE_JAVAAPI_NATIVE_COM_BAIDU_PADDLE_INFERENCE_TENSOR_H_
#ifdef __cplusplus #ifdef __cplusplus
extern "C" { extern "C" {
#endif #endif
...@@ -43,7 +42,8 @@ JNIEXPORT void JNICALL Java_com_baidu_paddle_inference_Tensor_cppTensorReshape( ...@@ -43,7 +42,8 @@ JNIEXPORT void JNICALL Java_com_baidu_paddle_inference_Tensor_cppTensorReshape(
* Signature: (J)[I * Signature: (J)[I
*/ */
JNIEXPORT jintArray JNICALL JNIEXPORT jintArray JNICALL
Java_com_baidu_paddle_inference_Tensor_cppTensorGetShape(JNIEnv *, jobject, Java_com_baidu_paddle_inference_Tensor_cppTensorGetShape(JNIEnv *,
jobject,
jlong); jlong);
/* /*
...@@ -52,7 +52,8 @@ Java_com_baidu_paddle_inference_Tensor_cppTensorGetShape(JNIEnv *, jobject, ...@@ -52,7 +52,8 @@ Java_com_baidu_paddle_inference_Tensor_cppTensorGetShape(JNIEnv *, jobject,
* Signature: (J)Ljava/lang/String; * Signature: (J)Ljava/lang/String;
*/ */
JNIEXPORT jstring JNICALL JNIEXPORT jstring JNICALL
Java_com_baidu_paddle_inference_Tensor_cppTensorGetName(JNIEnv *, jobject, Java_com_baidu_paddle_inference_Tensor_cppTensorGetName(JNIEnv *,
jobject,
jlong); jlong);
/* /*
...@@ -62,7 +63,8 @@ Java_com_baidu_paddle_inference_Tensor_cppTensorGetName(JNIEnv *, jobject, ...@@ -62,7 +63,8 @@ Java_com_baidu_paddle_inference_Tensor_cppTensorGetName(JNIEnv *, jobject,
*/ */
JNIEXPORT void JNICALL JNIEXPORT void JNICALL
Java_com_baidu_paddle_inference_Tensor_cppTensorCopyFromCpuFloat(JNIEnv *, Java_com_baidu_paddle_inference_Tensor_cppTensorCopyFromCpuFloat(JNIEnv *,
jobject, jlong, jobject,
jlong,
jfloatArray); jfloatArray);
/* /*
...@@ -72,7 +74,8 @@ Java_com_baidu_paddle_inference_Tensor_cppTensorCopyFromCpuFloat(JNIEnv *, ...@@ -72,7 +74,8 @@ Java_com_baidu_paddle_inference_Tensor_cppTensorCopyFromCpuFloat(JNIEnv *,
*/ */
JNIEXPORT void JNICALL JNIEXPORT void JNICALL
Java_com_baidu_paddle_inference_Tensor_cppTensorCopyFromCpuInt(JNIEnv *, Java_com_baidu_paddle_inference_Tensor_cppTensorCopyFromCpuInt(JNIEnv *,
jobject, jlong, jobject,
jlong,
jintArray); jintArray);
/* /*
...@@ -82,7 +85,8 @@ Java_com_baidu_paddle_inference_Tensor_cppTensorCopyFromCpuInt(JNIEnv *, ...@@ -82,7 +85,8 @@ Java_com_baidu_paddle_inference_Tensor_cppTensorCopyFromCpuInt(JNIEnv *,
*/ */
JNIEXPORT void JNICALL JNIEXPORT void JNICALL
Java_com_baidu_paddle_inference_Tensor_cppTensorCopyFromCpuLong(JNIEnv *, Java_com_baidu_paddle_inference_Tensor_cppTensorCopyFromCpuLong(JNIEnv *,
jobject, jlong, jobject,
jlong,
jlongArray); jlongArray);
/* /*
...@@ -92,7 +96,8 @@ Java_com_baidu_paddle_inference_Tensor_cppTensorCopyFromCpuLong(JNIEnv *, ...@@ -92,7 +96,8 @@ Java_com_baidu_paddle_inference_Tensor_cppTensorCopyFromCpuLong(JNIEnv *,
*/ */
JNIEXPORT void JNICALL JNIEXPORT void JNICALL
Java_com_baidu_paddle_inference_Tensor_cppTensorCopyFromCpuByte(JNIEnv *, Java_com_baidu_paddle_inference_Tensor_cppTensorCopyFromCpuByte(JNIEnv *,
jobject, jlong, jobject,
jlong,
jbyteArray); jbyteArray);
/* /*
...@@ -111,7 +116,8 @@ Java_com_baidu_paddle_inference_Tensor_cppTensorCopyFromCpuBoolean( ...@@ -111,7 +116,8 @@ Java_com_baidu_paddle_inference_Tensor_cppTensorCopyFromCpuBoolean(
*/ */
JNIEXPORT void JNICALL JNIEXPORT void JNICALL
Java_com_baidu_paddle_inference_Tensor_cppTensorCopyToCpuFloat(JNIEnv *, Java_com_baidu_paddle_inference_Tensor_cppTensorCopyToCpuFloat(JNIEnv *,
jobject, jlong, jobject,
jlong,
jfloatArray); jfloatArray);
/* /*
...@@ -120,8 +126,10 @@ Java_com_baidu_paddle_inference_Tensor_cppTensorCopyToCpuFloat(JNIEnv *, ...@@ -120,8 +126,10 @@ Java_com_baidu_paddle_inference_Tensor_cppTensorCopyToCpuFloat(JNIEnv *,
* Signature: (J[I)V * Signature: (J[I)V
*/ */
JNIEXPORT void JNICALL JNIEXPORT void JNICALL
Java_com_baidu_paddle_inference_Tensor_cppTensorCopyToCpuInt(JNIEnv *, jobject, Java_com_baidu_paddle_inference_Tensor_cppTensorCopyToCpuInt(JNIEnv *,
jlong, jintArray); jobject,
jlong,
jintArray);
/* /*
* Class: com_baidu_paddle_inference_Tensor * Class: com_baidu_paddle_inference_Tensor
...@@ -129,7 +137,8 @@ Java_com_baidu_paddle_inference_Tensor_cppTensorCopyToCpuInt(JNIEnv *, jobject, ...@@ -129,7 +137,8 @@ Java_com_baidu_paddle_inference_Tensor_cppTensorCopyToCpuInt(JNIEnv *, jobject,
* Signature: (J[J)V * Signature: (J[J)V
*/ */
JNIEXPORT void JNICALL JNIEXPORT void JNICALL
Java_com_baidu_paddle_inference_Tensor_cppTensorCopyToCpuLong(JNIEnv *, jobject, Java_com_baidu_paddle_inference_Tensor_cppTensorCopyToCpuLong(JNIEnv *,
jobject,
jlong, jlong,
jlongArray); jlongArray);
...@@ -139,7 +148,8 @@ Java_com_baidu_paddle_inference_Tensor_cppTensorCopyToCpuLong(JNIEnv *, jobject, ...@@ -139,7 +148,8 @@ Java_com_baidu_paddle_inference_Tensor_cppTensorCopyToCpuLong(JNIEnv *, jobject,
* Signature: (J[B)V * Signature: (J[B)V
*/ */
JNIEXPORT void JNICALL JNIEXPORT void JNICALL
Java_com_baidu_paddle_inference_Tensor_cppTensorCopyToCpuByte(JNIEnv *, jobject, Java_com_baidu_paddle_inference_Tensor_cppTensorCopyToCpuByte(JNIEnv *,
jobject,
jlong, jlong,
jbyteArray); jbyteArray);
...@@ -150,10 +160,10 @@ Java_com_baidu_paddle_inference_Tensor_cppTensorCopyToCpuByte(JNIEnv *, jobject, ...@@ -150,10 +160,10 @@ Java_com_baidu_paddle_inference_Tensor_cppTensorCopyToCpuByte(JNIEnv *, jobject,
*/ */
JNIEXPORT void JNICALL JNIEXPORT void JNICALL
Java_com_baidu_paddle_inference_Tensor_cppTensorCopyToCpuBoolean(JNIEnv *, Java_com_baidu_paddle_inference_Tensor_cppTensorCopyToCpuBoolean(JNIEnv *,
jobject, jlong, jobject,
jlong,
jbooleanArray); jbooleanArray);
#ifdef __cplusplus #ifdef __cplusplus
} }
#endif #endif
#endif // PADDLE_FLUID_INFERENCE_JAVAAPI_NATIVE_COM_BAIDU_PADDLE_INFERENCE_TENSOR_H_
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册