未验证 提交 2e79f81c 编写于 作者: W wangzhen38 提交者: GitHub

【code format】fix cpplint style 6 (#43740)

* fix cpplint style 6

* fix cpplint style 6
上级 fcc8a87b
......@@ -51,14 +51,16 @@ void CreateVarsOnScope(framework::Scope* scope, platform::CPUPlace* place) {
x_var->GetMutable<framework::LoDTensor>();
}
void InitTensorsOnClient(framework::Scope* scope, platform::CPUPlace* place,
void InitTensorsOnClient(framework::Scope* scope,
platform::CPUPlace* place,
int64_t rows_numel) {
CreateVarsOnScope(scope, place);
auto x_var = scope->Var("x")->GetMutable<framework::LoDTensor>();
float* x_ptr =
x_var->mutable_data<float>(framework::DDim({1, rows_numel}), *place);
for (int64_t i = 0; i < rows_numel; ++i) x_ptr[i] = 1.0 * (float)i;
for (int64_t i = 0; i < rows_numel; ++i)
x_ptr[i] = 1.0 * static_cast<float>(i);
}
void GetDownpourDenseTableProto(
......@@ -142,7 +144,7 @@ void GetDownpourDenseTableProto(
/*-------------------------------------------------------------------------*/
std::string ip_ = "127.0.0.1";
const char* ip_ = "127.0.0.1";
uint32_t port_ = 4214;
std::vector<std::string> host_sign_list_;
......@@ -237,7 +239,7 @@ void RunBrpcPushDense() {
pull_status.wait();
for (int64_t idx = 0; idx < tensor->numel(); ++idx) {
EXPECT_FLOAT_EQ(w[idx], float(idx));
EXPECT_FLOAT_EQ(w[idx], static_cast<float>(idx));
}
/*-----------------------Test Push Grad----------------------------------*/
......@@ -266,7 +268,7 @@ void RunBrpcPushDense() {
pull_update_status.wait();
for (int64_t idx = 0; idx < tensor->numel(); ++idx) {
EXPECT_FLOAT_EQ(w[idx], float(idx) - 1.0);
EXPECT_FLOAT_EQ(w[idx], static_cast<float>(idx) - 1.0);
}
LOG(INFO) << "Run stop_server";
......
......@@ -15,9 +15,8 @@
/* DO NOT EDIT THIS FILE - it is machine generated */
#include <jni.h>
/* Header for class com_baidu_paddle_inference_Config */
#pragma once
#ifndef PADDLE_FLUID_INFERENCE_JAVAAPI_NATIVE_COM_BAIDU_PADDLE_INFERENCE_CONFIG_H_
#define PADDLE_FLUID_INFERENCE_JAVAAPI_NATIVE_COM_BAIDU_PADDLE_INFERENCE_CONFIG_H_
#ifdef __cplusplus
extern "C" {
#endif
......@@ -43,7 +42,8 @@ Java_com_baidu_paddle_inference_Config_createCppConfig(JNIEnv *, jobject);
* Signature: (J)Z
*/
JNIEXPORT jboolean JNICALL
Java_com_baidu_paddle_inference_Config_isCppConfigValid(JNIEnv *, jobject,
Java_com_baidu_paddle_inference_Config_isCppConfigValid(JNIEnv *,
jobject,
jlong);
/*
......@@ -110,7 +110,8 @@ Java_com_baidu_paddle_inference_Config_paramsFile(JNIEnv *, jobject, jlong);
JNIEXPORT void JNICALL
Java_com_baidu_paddle_inference_Config_setCpuMathLibraryNumThreads(JNIEnv *,
jobject,
jlong, jint);
jlong,
jint);
/*
* Class: com_baidu_paddle_inference_Config
......@@ -119,7 +120,8 @@ Java_com_baidu_paddle_inference_Config_setCpuMathLibraryNumThreads(JNIEnv *,
*/
JNIEXPORT jint JNICALL
Java_com_baidu_paddle_inference_Config_cpuMathLibraryNumThreads(JNIEnv *,
jobject, jlong);
jobject,
jlong);
/*
* Class: com_baidu_paddle_inference_Config
......@@ -143,7 +145,8 @@ Java_com_baidu_paddle_inference_Config_mkldnnEnabled(JNIEnv *, jobject, jlong);
* Signature: (J)V
*/
JNIEXPORT void JNICALL
Java_com_baidu_paddle_inference_Config_enableMkldnnBfloat16(JNIEnv *, jobject,
Java_com_baidu_paddle_inference_Config_enableMkldnnBfloat16(JNIEnv *,
jobject,
jlong);
/*
......@@ -152,7 +155,8 @@ Java_com_baidu_paddle_inference_Config_enableMkldnnBfloat16(JNIEnv *, jobject,
* Signature: (J)Z
*/
JNIEXPORT jboolean JNICALL
Java_com_baidu_paddle_inference_Config_mkldnnBfloat16Enabled(JNIEnv *, jobject,
Java_com_baidu_paddle_inference_Config_mkldnnBfloat16Enabled(JNIEnv *,
jobject,
jlong);
/*
......@@ -193,7 +197,8 @@ Java_com_baidu_paddle_inference_Config_gpuDeviceId(JNIEnv *, jobject, jlong);
* Signature: (J)I
*/
JNIEXPORT jint JNICALL
Java_com_baidu_paddle_inference_Config_memoryPoolInitSizeMb(JNIEnv *, jobject,
Java_com_baidu_paddle_inference_Config_memoryPoolInitSizeMb(JNIEnv *,
jobject,
jlong);
/*
......@@ -244,7 +249,8 @@ JNIEXPORT void JNICALL Java_com_baidu_paddle_inference_Config_enableMemoryOptim(
* Signature: (J)Z
*/
JNIEXPORT jboolean JNICALL
Java_com_baidu_paddle_inference_Config_memoryOptimEnabled(JNIEnv *, jobject,
Java_com_baidu_paddle_inference_Config_memoryOptimEnabled(JNIEnv *,
jobject,
jlong);
/*
......@@ -282,4 +288,3 @@ Java_com_baidu_paddle_inference_Config_summary(JNIEnv *, jobject, jlong);
#ifdef __cplusplus
}
#endif
#endif // PADDLE_FLUID_INFERENCE_JAVAAPI_NATIVE_COM_BAIDU_PADDLE_INFERENCE_CONFIG_H_
......@@ -16,8 +16,7 @@
#include <jni.h>
/* Header for class com_baidu_paddle_inference_Predictor */
#ifndef PADDLE_FLUID_INFERENCE_JAVAAPI_NATIVE_COM_BAIDU_PADDLE_INFERENCE_PREDICTOR_H_
#define PADDLE_FLUID_INFERENCE_JAVAAPI_NATIVE_COM_BAIDU_PADDLE_INFERENCE_PREDICTOR_H_
#pragma once
#ifdef __cplusplus
extern "C" {
#endif
......@@ -27,7 +26,8 @@ extern "C" {
* Signature: (J)V
*/
JNIEXPORT void JNICALL
Java_com_baidu_paddle_inference_Predictor_cppPredictorDestroy(JNIEnv *, jobject,
Java_com_baidu_paddle_inference_Predictor_cppPredictorDestroy(JNIEnv *,
jobject,
jlong);
/*
......@@ -55,7 +55,8 @@ Java_com_baidu_paddle_inference_Predictor_predictorClearIntermediateTensor(
* Signature: (J)J
*/
JNIEXPORT jlong JNICALL
Java_com_baidu_paddle_inference_Predictor_createPredictor(JNIEnv *, jobject,
Java_com_baidu_paddle_inference_Predictor_createPredictor(JNIEnv *,
jobject,
jlong);
/*
......@@ -80,8 +81,10 @@ JNIEXPORT jlong JNICALL Java_com_baidu_paddle_inference_Predictor_getOutputNum(
* Signature: (JJ)Ljava/lang/String;
*/
JNIEXPORT jstring JNICALL
Java_com_baidu_paddle_inference_Predictor_getInputNameByIndex(JNIEnv *, jobject,
jlong, jlong);
Java_com_baidu_paddle_inference_Predictor_getInputNameByIndex(JNIEnv *,
jobject,
jlong,
jlong);
/*
* Class: com_baidu_paddle_inference_Predictor
......@@ -90,7 +93,8 @@ Java_com_baidu_paddle_inference_Predictor_getInputNameByIndex(JNIEnv *, jobject,
*/
JNIEXPORT jstring JNICALL
Java_com_baidu_paddle_inference_Predictor_getOutputNameByIndex(JNIEnv *,
jobject, jlong,
jobject,
jlong,
jlong);
/*
......@@ -100,7 +104,8 @@ Java_com_baidu_paddle_inference_Predictor_getOutputNameByIndex(JNIEnv *,
*/
JNIEXPORT jlong JNICALL
Java_com_baidu_paddle_inference_Predictor_getInputHandleByName(JNIEnv *,
jobject, jlong,
jobject,
jlong,
jstring);
/*
......@@ -110,7 +115,8 @@ Java_com_baidu_paddle_inference_Predictor_getInputHandleByName(JNIEnv *,
*/
JNIEXPORT jlong JNICALL
Java_com_baidu_paddle_inference_Predictor_getOutputHandleByName(JNIEnv *,
jobject, jlong,
jobject,
jlong,
jstring);
/*
......@@ -124,4 +130,3 @@ Java_com_baidu_paddle_inference_Predictor_runPD(JNIEnv *, jobject, jlong);
#ifdef __cplusplus
}
#endif
#endif // PADDLE_FLUID_INFERENCE_JAVAAPI_NATIVE_COM_BAIDU_PADDLE_INFERENCE_PREDICTOR_H_
......@@ -16,8 +16,7 @@
#include <jni.h>
/* Header for class com_baidu_paddle_inference_Tensor */
#ifndef PADDLE_FLUID_INFERENCE_JAVAAPI_NATIVE_COM_BAIDU_PADDLE_INFERENCE_TENSOR_H_
#define PADDLE_FLUID_INFERENCE_JAVAAPI_NATIVE_COM_BAIDU_PADDLE_INFERENCE_TENSOR_H_
#pragma once
#ifdef __cplusplus
extern "C" {
#endif
......@@ -43,7 +42,8 @@ JNIEXPORT void JNICALL Java_com_baidu_paddle_inference_Tensor_cppTensorReshape(
* Signature: (J)[I
*/
JNIEXPORT jintArray JNICALL
Java_com_baidu_paddle_inference_Tensor_cppTensorGetShape(JNIEnv *, jobject,
Java_com_baidu_paddle_inference_Tensor_cppTensorGetShape(JNIEnv *,
jobject,
jlong);
/*
......@@ -52,7 +52,8 @@ Java_com_baidu_paddle_inference_Tensor_cppTensorGetShape(JNIEnv *, jobject,
* Signature: (J)Ljava/lang/String;
*/
JNIEXPORT jstring JNICALL
Java_com_baidu_paddle_inference_Tensor_cppTensorGetName(JNIEnv *, jobject,
Java_com_baidu_paddle_inference_Tensor_cppTensorGetName(JNIEnv *,
jobject,
jlong);
/*
......@@ -62,7 +63,8 @@ Java_com_baidu_paddle_inference_Tensor_cppTensorGetName(JNIEnv *, jobject,
*/
JNIEXPORT void JNICALL
Java_com_baidu_paddle_inference_Tensor_cppTensorCopyFromCpuFloat(JNIEnv *,
jobject, jlong,
jobject,
jlong,
jfloatArray);
/*
......@@ -72,7 +74,8 @@ Java_com_baidu_paddle_inference_Tensor_cppTensorCopyFromCpuFloat(JNIEnv *,
*/
JNIEXPORT void JNICALL
Java_com_baidu_paddle_inference_Tensor_cppTensorCopyFromCpuInt(JNIEnv *,
jobject, jlong,
jobject,
jlong,
jintArray);
/*
......@@ -82,7 +85,8 @@ Java_com_baidu_paddle_inference_Tensor_cppTensorCopyFromCpuInt(JNIEnv *,
*/
JNIEXPORT void JNICALL
Java_com_baidu_paddle_inference_Tensor_cppTensorCopyFromCpuLong(JNIEnv *,
jobject, jlong,
jobject,
jlong,
jlongArray);
/*
......@@ -92,7 +96,8 @@ Java_com_baidu_paddle_inference_Tensor_cppTensorCopyFromCpuLong(JNIEnv *,
*/
JNIEXPORT void JNICALL
Java_com_baidu_paddle_inference_Tensor_cppTensorCopyFromCpuByte(JNIEnv *,
jobject, jlong,
jobject,
jlong,
jbyteArray);
/*
......@@ -111,7 +116,8 @@ Java_com_baidu_paddle_inference_Tensor_cppTensorCopyFromCpuBoolean(
*/
JNIEXPORT void JNICALL
Java_com_baidu_paddle_inference_Tensor_cppTensorCopyToCpuFloat(JNIEnv *,
jobject, jlong,
jobject,
jlong,
jfloatArray);
/*
......@@ -120,8 +126,10 @@ Java_com_baidu_paddle_inference_Tensor_cppTensorCopyToCpuFloat(JNIEnv *,
* Signature: (J[I)V
*/
JNIEXPORT void JNICALL
Java_com_baidu_paddle_inference_Tensor_cppTensorCopyToCpuInt(JNIEnv *, jobject,
jlong, jintArray);
Java_com_baidu_paddle_inference_Tensor_cppTensorCopyToCpuInt(JNIEnv *,
jobject,
jlong,
jintArray);
/*
* Class: com_baidu_paddle_inference_Tensor
......@@ -129,7 +137,8 @@ Java_com_baidu_paddle_inference_Tensor_cppTensorCopyToCpuInt(JNIEnv *, jobject,
* Signature: (J[J)V
*/
JNIEXPORT void JNICALL
Java_com_baidu_paddle_inference_Tensor_cppTensorCopyToCpuLong(JNIEnv *, jobject,
Java_com_baidu_paddle_inference_Tensor_cppTensorCopyToCpuLong(JNIEnv *,
jobject,
jlong,
jlongArray);
......@@ -139,7 +148,8 @@ Java_com_baidu_paddle_inference_Tensor_cppTensorCopyToCpuLong(JNIEnv *, jobject,
* Signature: (J[B)V
*/
JNIEXPORT void JNICALL
Java_com_baidu_paddle_inference_Tensor_cppTensorCopyToCpuByte(JNIEnv *, jobject,
Java_com_baidu_paddle_inference_Tensor_cppTensorCopyToCpuByte(JNIEnv *,
jobject,
jlong,
jbyteArray);
......@@ -150,10 +160,10 @@ Java_com_baidu_paddle_inference_Tensor_cppTensorCopyToCpuByte(JNIEnv *, jobject,
*/
JNIEXPORT void JNICALL
Java_com_baidu_paddle_inference_Tensor_cppTensorCopyToCpuBoolean(JNIEnv *,
jobject, jlong,
jobject,
jlong,
jbooleanArray);
#ifdef __cplusplus
}
#endif
#endif // PADDLE_FLUID_INFERENCE_JAVAAPI_NATIVE_COM_BAIDU_PADDLE_INFERENCE_TENSOR_H_
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册