diff --git a/paddle/cuda/src/hl_cuda_device.cc b/paddle/cuda/src/hl_cuda_device.cc index f8ce8cb428a1d9c6a0308ee68c55f05dc4895676..4042d9742a92f6718406c8923d9129b81afe89e7 100644 --- a/paddle/cuda/src/hl_cuda_device.cc +++ b/paddle/cuda/src/hl_cuda_device.cc @@ -97,11 +97,11 @@ int g_cuda_lib_version = 0; * Check build-in cuda function using glog and it **does not** * support << operator for more details error info. */ -#define CHECK_CUDA(cudaFunc) \ - do { \ - cudaError_t cudaStat = cudaFunc; \ - CHECK_EQ(cudaSuccess, cudaStat) \ - << "Cuda Error: " << cudaGetErrorString(cudaStat); \ +#define CHECK_CUDA(cudaFunc) \ + do { \ + cudaError_t cudaStat = cudaFunc; \ + CHECK_EQ(cudaSuccess, cudaStat) << "Cuda Error: " \ + << cudaGetErrorString(cudaStat); \ } while (0) /** @@ -468,8 +468,8 @@ void hl_specify_devices_start(int *device, int number) { CHECK(tmp) << "[Start failed] System memory is not enough."; g_device = (hl_device_prop *)tmp; - device_prop = (hl_device_prop)((char *)tmp + g_system_device_num * - sizeof(hl_device_prop *)); + device_prop = (hl_device_prop)( + (char *)tmp + g_system_device_num * sizeof(hl_device_prop *)); memset(g_device, 0, g_system_device_num * sizeof(hl_device_prop *)); int num = 0; for (int i = 0; i < number; i++) { @@ -558,8 +558,8 @@ bool hl_get_sync_flag() { return g_sync_flag; } void hl_stream_synchronize(hl_stream_t stream) { cudaStream_t cu_stream; - CHECK_LT(stream, HPPL_STREAM_END) - << __func__ << ": the parameter stream is error."; + CHECK_LT(stream, HPPL_STREAM_END) << __func__ + << ": the parameter stream is error."; cu_stream = t_resource.stream[stream]; CHECK_CUDA(cudaStreamSynchronize(cu_stream)); @@ -589,8 +589,8 @@ void hl_stream_record_event(hl_stream_t stream, hl_event_t event) { cudaStream_t cu_stream; CHECK_NOTNULL(event); - CHECK_LT(stream, HPPL_STREAM_END) - << __func__ << ": the parameter stream is error."; + CHECK_LT(stream, HPPL_STREAM_END) << __func__ + << ": the parameter stream is error."; cu_stream = t_resource.stream[stream]; CHECK_CUDA(cudaEventRecord(event->cu_event, cu_stream)); @@ -600,8 +600,8 @@ void hl_stream_wait_event(hl_stream_t stream, hl_event_t event) { cudaStream_t cu_stream; CHECK_NOTNULL(event); - CHECK_LT(stream, HPPL_STREAM_END) - << __func__ << ": the parameter stream is error."; + CHECK_LT(stream, HPPL_STREAM_END) << __func__ + << ": the parameter stream is error."; cu_stream = t_resource.stream[stream]; CHECK_CUDA(cudaStreamWaitEvent(cu_stream, event->cu_event, 0)); diff --git a/paddle/function/BufferArgTest.cpp b/paddle/function/BufferArgTest.cpp index f1a234ab1a1068e1b98a86eaf9d79dbf4edf04b2..1744f377808f137dcda4a28acce336dc22be3d01 100644 --- a/paddle/function/BufferArgTest.cpp +++ b/paddle/function/BufferArgTest.cpp @@ -12,8 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include #include "BufferArg.h" +#include #include "paddle/math/MemoryHandle.h" namespace paddle { diff --git a/paddle/function/CosSimOp.cpp b/paddle/function/CosSimOp.cpp index e44581ae1060249e401c9462740da02508a4c00d..7ece7b2dfedaf460741c97b5a700eb632d85cabc 100644 --- a/paddle/function/CosSimOp.cpp +++ b/paddle/function/CosSimOp.cpp @@ -165,12 +165,12 @@ void CosSimBackward(const CpuMatrix& out_grad, real reciprocal_square_sum_x = 1.0f / square_sum_x; real reciprocal_square_sum_y = 1.0f / square_sum_y; for (size_t j = 0; j < dim; ++j) { - prev_grad_x[j] += out[i] * grad[i] * - (prev_out_y[j] * reciprocal_xy - - prev_out_x[j] * reciprocal_square_sum_x); - prev_grad_y[j] += out[i] * grad[i] * - (prev_out_x[j] * reciprocal_xy - - prev_out_y[j] * reciprocal_square_sum_y); + prev_grad_x[j] += + out[i] * grad[i] * (prev_out_y[j] * reciprocal_xy - + prev_out_x[j] * reciprocal_square_sum_x); + prev_grad_y[j] += + out[i] * grad[i] * (prev_out_x[j] * reciprocal_xy - + prev_out_y[j] * reciprocal_square_sum_y); } } } diff --git a/paddle/function/FunctionTest.cpp b/paddle/function/FunctionTest.cpp index f9ea7c7e4f6ed31321cf7fe62371c2e9dd685a86..fdf7e631e5ab8c67eb5cf906bd0af49740d60112 100644 --- a/paddle/function/FunctionTest.cpp +++ b/paddle/function/FunctionTest.cpp @@ -12,8 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include #include "Function.h" +#include #include "paddle/math/SparseMatrix.h" namespace paddle { diff --git a/paddle/function/TensorShapeTest.cpp b/paddle/function/TensorShapeTest.cpp index e19afe0c4d594c9ff40d0a3a86682f3ff241d82c..45a2e106e7fc3f0e9e57cf8c2bb549d747f4f49b 100644 --- a/paddle/function/TensorShapeTest.cpp +++ b/paddle/function/TensorShapeTest.cpp @@ -12,8 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include #include "TensorShape.h" +#include namespace paddle { diff --git a/paddle/function/TensorTypeTest.cpp b/paddle/function/TensorTypeTest.cpp index 5b5c504ae2a330e19b8fddcef0dbf98014b80572..e50e46f3e99111731d9587f3e4ddfd4b26ae27e9 100644 --- a/paddle/function/TensorTypeTest.cpp +++ b/paddle/function/TensorTypeTest.cpp @@ -12,8 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include #include "TensorType.h" +#include namespace paddle { diff --git a/paddle/gserver/dataproviders/PyDataProvider.cpp b/paddle/gserver/dataproviders/PyDataProvider.cpp index e23051cd8efcee330313e0c156b3125dafc7b24e..b53790e764b9f9ad668abd1f4125695e3533a027 100644 --- a/paddle/gserver/dataproviders/PyDataProvider.cpp +++ b/paddle/gserver/dataproviders/PyDataProvider.cpp @@ -194,8 +194,8 @@ void PyDataProvider::fillSlotsByStr(const std::string& samples) { auto& slot = slots_[j]; CHECK(SlotDef::INDEX >= slot.type || SlotDef::STRING == slot.type) << " Slot type:" << slot.type << " is out of range."; - CHECK_GE(slot.type, SlotDef::VECTOR_DENSE) - << " Slot type:" << slot.type << " is out of range."; + CHECK_GE(slot.type, SlotDef::VECTOR_DENSE) << " Slot type:" << slot.type + << " is out of range."; switch (slot.type) { case SlotDef::VECTOR_DENSE: fillDenseSlot(slot, data, dataEnd); diff --git a/paddle/gserver/evaluators/Evaluator.cpp b/paddle/gserver/evaluators/Evaluator.cpp index 56cf9ac78097067c78fffe1244abed560095e3f5..9db6d252d97bfeee3fe376bcda431fe94c65a678 100644 --- a/paddle/gserver/evaluators/Evaluator.cpp +++ b/paddle/gserver/evaluators/Evaluator.cpp @@ -446,9 +446,9 @@ real AucEvaluator::evalImp(std::vector& arguments) { for (size_t i = 0; i < insNum; ++i) { real value = outputD[pos]; uint32_t binIdx = static_cast(value * kBinNum_); - CHECK(binIdx <= kBinNum_) - << "bin index [" << binIdx << "] out of range, predict value[" << value - << "]"; + CHECK(binIdx <= kBinNum_) << "bin index [" << binIdx + << "] out of range, predict value[" << value + << "]"; real w = supportWeight ? weightD[i] : 1.0; if (labelD[i] == kNegativeLabel_) { statNeg_[binIdx] += w; diff --git a/paddle/gserver/layers/Layer.cpp b/paddle/gserver/layers/Layer.cpp index 00dd8a8508bfa6057eaeef0268dd2befb79761be..125aaf947f3c9d976b117667d1d1b7700a029cc6 100644 --- a/paddle/gserver/layers/Layer.cpp +++ b/paddle/gserver/layers/Layer.cpp @@ -263,9 +263,8 @@ void Layer::zeroGrad() { } void Layer::initNeedFlags() { - auto initFlag = [this](bool& flag, - bool (Layer::*flagQueryFunc)() const, - ParameterType type) { + auto initFlag = [this]( + bool& flag, bool (Layer::*flagQueryFunc)() const, ParameterType type) { flag = false; if (biasParameter_ && biasParameter_->hasType(type)) { flag = true; diff --git a/paddle/gserver/tests/test_RecurrentLayer.cpp b/paddle/gserver/tests/test_RecurrentLayer.cpp index e8e1db51ae6a846fc1aeb3cca3bf07a7c29ccee2..16ab0e6aecb6a895b20389992a44dc542eb3b00a 100644 --- a/paddle/gserver/tests/test_RecurrentLayer.cpp +++ b/paddle/gserver/tests/test_RecurrentLayer.cpp @@ -292,27 +292,26 @@ void checkRecurrentLayer(LayerConfig layerConfig, TestRecurrentLayer testGpu(layerConfig, true, gpuBatch); testCpu.init(batchSize); testGpu.init(batchSize); - auto checkError = - [](MatrixPtr cpu, MatrixPtr gpu, int numSequences, const char* str) { - CpuMatrix check(gpu->getHeight(), gpu->getWidth()); - check.copyFrom(*gpu); - int height = cpu->getHeight(); - int width = cpu->getWidth(); - const real* data1 = cpu->getData(); - const real* data2 = check.getData(); - int count = 0; - for (int i = 0; i < height; i++) { - for (int j = 0; j < width; j++) { - if (fabs(data1[i * width + j] - data2[i * width + j]) / - numSequences > - 1e-4) { - count++; - } - } + auto checkError = []( + MatrixPtr cpu, MatrixPtr gpu, int numSequences, const char* str) { + CpuMatrix check(gpu->getHeight(), gpu->getWidth()); + check.copyFrom(*gpu); + int height = cpu->getHeight(); + int width = cpu->getWidth(); + const real* data1 = cpu->getData(); + const real* data2 = check.getData(); + int count = 0; + for (int i = 0; i < height; i++) { + for (int j = 0; j < width; j++) { + if (fabs(data1[i * width + j] - data2[i * width + j]) / numSequences > + 1e-4) { + count++; } - EXPECT_EQ(count, 0) << "[" << str << "]" - << "There are " << count << " different element."; - }; + } + } + EXPECT_EQ(count, 0) << "[" << str << "]" + << "There are " << count << " different element."; + }; T* cpuLayer = dynamic_cast(testCpu.testLayer_.get()); T* gpuLayer = dynamic_cast(testGpu.testLayer_.get()); diff --git a/paddle/math/MatrixBitCode.cpp b/paddle/math/MatrixBitCode.cpp index 0ea387d0208fad15d6458d0208b215173030a14e..cea912d3ca02715c203814d13529aadfd9d3b7fb 100644 --- a/paddle/math/MatrixBitCode.cpp +++ b/paddle/math/MatrixBitCode.cpp @@ -174,10 +174,8 @@ void CpuMatrix::mulByBitCode(size_t numClasses, const IVector& codes, const Matrix& weight, const Matrix& input) { - auto op = [](real& t, - const real* weightRow, - const real* inputRow, - size_t inputDim) { + auto op = []( + real& t, const real* weightRow, const real* inputRow, size_t inputDim) { real sum = 0; for (size_t k = 0; k < inputDim; ++k) { sum += weightRow[k] * inputRow[k]; @@ -195,12 +193,12 @@ void CpuMatrix::mulByBitCodeBackwardWeight(size_t numClasses, const IVector& codes, Matrix& weight, const Matrix& input) { - auto op = - [](const real t, real* weightRow, const real* inputRow, size_t inputDim) { - for (size_t k = 0; k < inputDim; ++k) { - weightRow[k] += t * inputRow[k]; - } - }; + auto op = []( + const real t, real* weightRow, const real* inputRow, size_t inputDim) { + for (size_t k = 0; k < inputDim; ++k) { + weightRow[k] += t * inputRow[k]; + } + }; mulByBitCodeT(op, SimpleCodeTable(numClasses), codes, *this, weight, input); } @@ -212,12 +210,12 @@ void CpuMatrix::mulByBitCodeBackwardError(size_t numClasses, const IVector& codes, const Matrix& weight, Matrix& input) { - auto op = - [](const real t, const real* weightRow, real* inputRow, size_t inputDim) { - for (size_t k = 0; k < inputDim; ++k) { - inputRow[k] += t * weightRow[k]; - } - }; + auto op = []( + const real t, const real* weightRow, real* inputRow, size_t inputDim) { + for (size_t k = 0; k < inputDim; ++k) { + inputRow[k] += t * weightRow[k]; + } + }; mulByBitCodeT(op, SimpleCodeTable(numClasses), codes, *this, weight, input); } diff --git a/paddle/math/tests/TensorCheck.h b/paddle/math/tests/TensorCheck.h index 86f5982e758e678181d1e5a09ecfe094e48dbabd..5bc4a03067a75527fa30e5bb5526f93dc7b9fdcc 100644 --- a/paddle/math/tests/TensorCheck.h +++ b/paddle/math/tests/TensorCheck.h @@ -183,8 +183,8 @@ void TensorCheck(AssertEq compare, template void TensorCheck(AssertEq compare, real args1, real args2) { - EXPECT_EQ(compare(args1, args2), true) - << "[Test error] args1 = " << args1 << ", args2 = " << args2; + EXPECT_EQ(compare(args1, args2), true) << "[Test error] args1 = " << args1 + << ", args2 = " << args2; } template diff --git a/paddle/math/tests/test_SIMDFunctions.cpp b/paddle/math/tests/test_SIMDFunctions.cpp index e4f8cf4c24beadf54089a83bedf066b072e02663..e8f9b26ff240f9c339404a919c14eb3e3704c1de 100644 --- a/paddle/math/tests/test_SIMDFunctions.cpp +++ b/paddle/math/tests/test_SIMDFunctions.cpp @@ -126,15 +126,15 @@ TEST(SIMDFunction, decayL1_WithLR) { typedef std::function DecayL1MethodType; - DecayL1MethodType naive = - [](float* d, float* s, float* lr, float l, size_t len) { - paddle::simd::naive::decayL1(d, s, lr, l, len); - }; - - DecayL1MethodType simd = - [](float* d, float* s, float* lr, float l, size_t len) { - paddle::simd::decayL1(d, s, lr, l, len); - }; + DecayL1MethodType naive = []( + float* d, float* s, float* lr, float l, size_t len) { + paddle::simd::naive::decayL1(d, s, lr, l, len); + }; + + DecayL1MethodType simd = []( + float* d, float* s, float* lr, float l, size_t len) { + paddle::simd::decayL1(d, s, lr, l, len); + }; naive(dest.get(), src.get(), lr.get(), lambda, VECTOR_LEN); simd(simd_dest.get(), src.get(), lr.get(), lambda, VECTOR_LEN); diff --git a/paddle/parameter/Argument.cpp b/paddle/parameter/Argument.cpp index 10e4a0ae68fb50bec0bd419ca353450fdcbd92ad..6d9365af2d14673146d9e427138bf6dd5f5b41b6 100644 --- a/paddle/parameter/Argument.cpp +++ b/paddle/parameter/Argument.cpp @@ -379,7 +379,7 @@ void Argument::concat(const std::vector& args, } auto copyArg = [batchSize, stream]( - MatrixPtr& dst, MatrixPtr src, int startRow, bool useGpu) { + MatrixPtr& dst, MatrixPtr src, int startRow, bool useGpu) { if (!src) { dst.reset(); return; @@ -395,31 +395,29 @@ void Argument::concat(const std::vector& args, tmpMatrix->copyFrom(*src, stream); }; - auto copyIds = - [batchSize, stream]( - IVectorPtr& dst, const IVectorPtr& src, int startRow, bool useGpu) { - if (!src) { - dst.reset(); - return; - } - IVector::resizeOrCreate(dst, batchSize, useGpu); - dst->subVec(startRow, src->getSize())->copyFrom(*src, stream); - }; - - auto copyStrs = - [batchSize, stream]( - SVectorPtr& dst, const SVectorPtr& src, int startRow, bool useGpu) { - if (!src) { - dst.reset(); - return; - } - if (!dst) { - dst = std::make_shared>(batchSize); - } else { - dst->resize(batchSize); - } - std::copy(src->begin(), src->end(), dst->begin() + startRow); - }; + auto copyIds = [batchSize, stream]( + IVectorPtr& dst, const IVectorPtr& src, int startRow, bool useGpu) { + if (!src) { + dst.reset(); + return; + } + IVector::resizeOrCreate(dst, batchSize, useGpu); + dst->subVec(startRow, src->getSize())->copyFrom(*src, stream); + }; + + auto copyStrs = [batchSize, stream]( + SVectorPtr& dst, const SVectorPtr& src, int startRow, bool useGpu) { + if (!src) { + dst.reset(); + return; + } + if (!dst) { + dst = std::make_shared>(batchSize); + } else { + dst->resize(batchSize); + } + std::copy(src->begin(), src->end(), dst->begin() + startRow); + }; auto copySequencePos = [](ICpuGpuVectorPtr& dstSeq, const ICpuGpuVectorPtr& srcSeq, diff --git a/paddle/parameter/AverageOptimizer.cpp b/paddle/parameter/AverageOptimizer.cpp index 5db5ddd10c49dd395cebefcff355669b7de1d785..e51ca5652090e6fba5e2070fc8f8c1d10e9ecc7a 100644 --- a/paddle/parameter/AverageOptimizer.cpp +++ b/paddle/parameter/AverageOptimizer.cpp @@ -155,9 +155,8 @@ ParameterOptimizer::TraverseCallback AverageOptimizer::restore() { return nullptr; } - return [](const VectorPtr vecs[], - const ParameterConfig& config, - size_t sparseId) { + return []( + const VectorPtr vecs[], const ParameterConfig& config, size_t sparseId) { vecs[PARAMETER_VALUE]->copyFrom(*vecs[PARAMETER_GRADIENT]); vecs[PARAMETER_GRADIENT]->zeroMem(); }; diff --git a/paddle/parameter/Parameter.cpp b/paddle/parameter/Parameter.cpp index 7eb90920ae38e72524058bd1f3210c360633fb37..b8efabbe2a0b54edec64f6cee62b44c76ca7bf10 100644 --- a/paddle/parameter/Parameter.cpp +++ b/paddle/parameter/Parameter.cpp @@ -352,8 +352,8 @@ bool Parameter::load(std::istream& s) { Header header; CHECK(s.read(reinterpret_cast(&header), sizeof(header))) << "Fail to read parameter " << getName(); - CHECK_EQ(header.version, kFormatVersion) - << "Incorrect format version: " << header.version; + CHECK_EQ(header.version, kFormatVersion) << "Incorrect format version: " + << header.version; CHECK_EQ(header.size, getSize()) << "The size (" << header.size << ") in the file does not match the size " << "(" << getSize() << ") of the parameter: " << getName(); diff --git a/paddle/pserver/LightNetwork.cpp b/paddle/pserver/LightNetwork.cpp index 09175f249fc87a91103ebe9a2b4b9a1dc3ec8599..8c8ba0a2e51b85bde0544c6780b07130336a6bdd 100644 --- a/paddle/pserver/LightNetwork.cpp +++ b/paddle/pserver/LightNetwork.cpp @@ -359,8 +359,8 @@ void SocketClient::TcpClient(const std::string &serverAddr, int serverPort) { #if defined(__OSX__) || defined(__APPLE__) server = getipnodebyname(serverAddr.c_str(), AF_INET, AI_DEFAULT, &errRet); - CHECK_NE(HOST_NOT_FOUND, errRet) - << "ERROR, no such host: " << serverAddr << " ret = " << errRet; + CHECK_NE(HOST_NOT_FOUND, errRet) << "ERROR, no such host: " << serverAddr + << " ret = " << errRet; CHECK(server) << "getipnodebyname error!"; #else struct hostent hostinfo; diff --git a/paddle/pserver/ParameterClient2.cpp b/paddle/pserver/ParameterClient2.cpp index df80a322e1b706c83e0c6fe159c21b52938c3694..a97859f83fe6495b298e920346c964ef2a9b146c 100644 --- a/paddle/pserver/ParameterClient2.cpp +++ b/paddle/pserver/ParameterClient2.cpp @@ -549,9 +549,9 @@ PServerVector ParameterClient2::createVector() { if (handle == -1) { handle = response.handle(); } else { - CHECK_EQ(handle, response.handle()) - << "Inconsistent handle from client" << &response - &responses[0] - << " " << handle << " " << response.handle(); + CHECK_EQ(handle, response.handle()) << "Inconsistent handle from client" + << &response - &responses[0] << " " + << handle << " " << response.handle(); } } return PServerVector{handle}; @@ -579,9 +579,9 @@ PServerMatrix ParameterClient2::createMatrix(int32_t numCols) { if (handle == -1) { handle = response.handle(); } else { - CHECK_EQ(handle, response.handle()) - << "Inconsistent handle from client" << &response - &responses[0] - << " " << handle << " " << response.handle(); + CHECK_EQ(handle, response.handle()) << "Inconsistent handle from client" + << &response - &responses[0] << " " + << handle << " " << response.handle(); } } return PServerMatrix{handle}; diff --git a/paddle/pserver/ParameterServer2.cpp b/paddle/pserver/ParameterServer2.cpp index a72dd3dc08cac84cd6f4ea995a2936598de2cc2a..19ff40ba7e9584f772043f939bcb31caf666163d 100644 --- a/paddle/pserver/ParameterServer2.cpp +++ b/paddle/pserver/ParameterServer2.cpp @@ -1213,8 +1213,8 @@ void ParameterServer2::loadValueVector(const LoadValueRequest& request, CHECK_EQ(header.size, (size_t)size_) << "The size (" << header.size << ") in the file does not match the size " << "(" << size_ << ") of the pserver: " << serverId_; - CHECK_EQ(header.valueSize, sizeof(real)) - << "Unsupported valueSize " << header.valueSize; + CHECK_EQ(header.valueSize, sizeof(real)) << "Unsupported valueSize " + << header.valueSize; CHECK(fs.read(reinterpret_cast(vec.getData()), header.size * sizeof(real))); diff --git a/paddle/pserver/ParameterServer2.h b/paddle/pserver/ParameterServer2.h index 0344196d7b8063f7ad23dde4e4701c2d898d3011..0f5a5895907b20a0cf882b6fa6fb74bd52dce058 100644 --- a/paddle/pserver/ParameterServer2.h +++ b/paddle/pserver/ParameterServer2.h @@ -545,11 +545,11 @@ protected: std::vector* buffers); const ParameterConfig& getParameterConfig(const ParameterBlock& block) { - CHECK_LT(block.para_id(), -1UL) - << "invalid parameter id:" << block.para_id(); + CHECK_LT(block.para_id(), -1UL) << "invalid parameter id:" + << block.para_id(); const auto it = configMap_.find(block.para_id()); - CHECK(it != configMap_.end()) - << "can not find parameter id: " << block.para_id(); + CHECK(it != configMap_.end()) << "can not find parameter id: " + << block.para_id(); return it->second; } diff --git a/paddle/pserver/ProtoServer.cpp b/paddle/pserver/ProtoServer.cpp index 52344123a6b860b54d71e80edf62cccdc4e40ea0..410317ece28ec79dd668e91ff9fbed11f20a5acc 100644 --- a/paddle/pserver/ProtoServer.cpp +++ b/paddle/pserver/ProtoServer.cpp @@ -41,8 +41,8 @@ void ProtoServer::handleRequest(std::unique_ptr msgReader, void ProtoServer::registerServiceFunctionImp(const std::string& funcName, ServiceFunction func) { - CHECK(!nameToFuncMap_.count(funcName)) - << "Duplicated registration: " << funcName; + CHECK(!nameToFuncMap_.count(funcName)) << "Duplicated registration: " + << funcName; nameToFuncMap_[funcName] = func; } diff --git a/paddle/trainer/TrainerInternal.cpp b/paddle/trainer/TrainerInternal.cpp index 97b401688e9be7533824e97b426dd1d1fcab61ee..4c5d4a0913aaf3a9932b3d67806378ece4245304 100644 --- a/paddle/trainer/TrainerInternal.cpp +++ b/paddle/trainer/TrainerInternal.cpp @@ -97,7 +97,7 @@ void TrainerInternal::trainOneBatch(int64_t batchId, } UpdateCallback updateCallback = [this, showStats, ¶Stats]( - Parameter* para) { + Parameter* para) { if (showStats) { //! @TODO(yuyang18) Show stats is actually a ParameterHook, refactor // it diff --git a/paddle/utils/BarrierStat.h b/paddle/utils/BarrierStat.h index 817ab8fc9d67bfd0f6e025aae31475f7f0cd9576..a9c925eff66838d58d540d7be5476e6207a30bec 100644 --- a/paddle/utils/BarrierStat.h +++ b/paddle/utils/BarrierStat.h @@ -344,14 +344,14 @@ private: } while (0); // check end barrier -#define __CHECK_BARRIER_TIMER(set, statName, numConnThreads, ...) \ - do { \ - std::string internalName = \ - std::string(statName) + std::string(__VA_ARGS__); \ - BarrierStatPtr __stat = \ - (set).getStat(numConnThreads, internalName, BARRIER_END); \ - PCHECK(__stat->checkPassBarrier()) \ - << internalName << ": invalid barrier data"; \ +#define __CHECK_BARRIER_TIMER(set, statName, numConnThreads, ...) \ + do { \ + std::string internalName = \ + std::string(statName) + std::string(__VA_ARGS__); \ + BarrierStatPtr __stat = \ + (set).getStat(numConnThreads, internalName, BARRIER_END); \ + PCHECK(__stat->checkPassBarrier()) << internalName \ + << ": invalid barrier data"; \ } while (0); /* diff --git a/paddle/utils/ClassRegistrar.h b/paddle/utils/ClassRegistrar.h index e4351dbcb7d78bd22099e527b30ab90467fb3a54..1ac27bafabd1945d1d01e3bead22b0dd200d8688 100644 --- a/paddle/utils/ClassRegistrar.h +++ b/paddle/utils/ClassRegistrar.h @@ -62,8 +62,8 @@ public: // Create a class instance of type @type using args BaseClass* createByType(const std::string& type, CreateArgs... args) { ClassCreator creator; - CHECK(mapGet(type, creatorMap_, &creator)) - << "Unknown class type: " << type; + CHECK(mapGet(type, creatorMap_, &creator)) << "Unknown class type: " + << type; return creator(args...); }