提交 6237f6f5 编写于 作者: L liaogang

revert clang-format

上级 b13871c3
......@@ -97,11 +97,11 @@ int g_cuda_lib_version = 0;
* Check build-in cuda function using glog and it **does not**
* support << operator for more details error info.
*/
#define CHECK_CUDA(cudaFunc) \
do { \
cudaError_t cudaStat = cudaFunc; \
CHECK_EQ(cudaSuccess, cudaStat) \
<< "Cuda Error: " << cudaGetErrorString(cudaStat); \
#define CHECK_CUDA(cudaFunc) \
do { \
cudaError_t cudaStat = cudaFunc; \
CHECK_EQ(cudaSuccess, cudaStat) << "Cuda Error: " \
<< cudaGetErrorString(cudaStat); \
} while (0)
/**
......@@ -468,8 +468,8 @@ void hl_specify_devices_start(int *device, int number) {
CHECK(tmp) << "[Start failed] System memory is not enough.";
g_device = (hl_device_prop *)tmp;
device_prop = (hl_device_prop)((char *)tmp + g_system_device_num *
sizeof(hl_device_prop *));
device_prop = (hl_device_prop)(
(char *)tmp + g_system_device_num * sizeof(hl_device_prop *));
memset(g_device, 0, g_system_device_num * sizeof(hl_device_prop *));
int num = 0;
for (int i = 0; i < number; i++) {
......@@ -558,8 +558,8 @@ bool hl_get_sync_flag() { return g_sync_flag; }
void hl_stream_synchronize(hl_stream_t stream) {
cudaStream_t cu_stream;
CHECK_LT(stream, HPPL_STREAM_END)
<< __func__ << ": the parameter stream is error.";
CHECK_LT(stream, HPPL_STREAM_END) << __func__
<< ": the parameter stream is error.";
cu_stream = t_resource.stream[stream];
CHECK_CUDA(cudaStreamSynchronize(cu_stream));
......@@ -589,8 +589,8 @@ void hl_stream_record_event(hl_stream_t stream, hl_event_t event) {
cudaStream_t cu_stream;
CHECK_NOTNULL(event);
CHECK_LT(stream, HPPL_STREAM_END)
<< __func__ << ": the parameter stream is error.";
CHECK_LT(stream, HPPL_STREAM_END) << __func__
<< ": the parameter stream is error.";
cu_stream = t_resource.stream[stream];
CHECK_CUDA(cudaEventRecord(event->cu_event, cu_stream));
......@@ -600,8 +600,8 @@ void hl_stream_wait_event(hl_stream_t stream, hl_event_t event) {
cudaStream_t cu_stream;
CHECK_NOTNULL(event);
CHECK_LT(stream, HPPL_STREAM_END)
<< __func__ << ": the parameter stream is error.";
CHECK_LT(stream, HPPL_STREAM_END) << __func__
<< ": the parameter stream is error.";
cu_stream = t_resource.stream[stream];
CHECK_CUDA(cudaStreamWaitEvent(cu_stream, event->cu_event, 0));
......
......@@ -12,8 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <gtest/gtest.h>
#include "BufferArg.h"
#include <gtest/gtest.h>
#include "paddle/math/MemoryHandle.h"
namespace paddle {
......
......@@ -165,12 +165,12 @@ void CosSimBackward<DEVICE_TYPE_CPU>(const CpuMatrix& out_grad,
real reciprocal_square_sum_x = 1.0f / square_sum_x;
real reciprocal_square_sum_y = 1.0f / square_sum_y;
for (size_t j = 0; j < dim; ++j) {
prev_grad_x[j] += out[i] * grad[i] *
(prev_out_y[j] * reciprocal_xy -
prev_out_x[j] * reciprocal_square_sum_x);
prev_grad_y[j] += out[i] * grad[i] *
(prev_out_x[j] * reciprocal_xy -
prev_out_y[j] * reciprocal_square_sum_y);
prev_grad_x[j] +=
out[i] * grad[i] * (prev_out_y[j] * reciprocal_xy -
prev_out_x[j] * reciprocal_square_sum_x);
prev_grad_y[j] +=
out[i] * grad[i] * (prev_out_x[j] * reciprocal_xy -
prev_out_y[j] * reciprocal_square_sum_y);
}
}
}
......
......@@ -12,8 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <gtest/gtest.h>
#include "Function.h"
#include <gtest/gtest.h>
#include "paddle/math/SparseMatrix.h"
namespace paddle {
......
......@@ -12,8 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <gtest/gtest.h>
#include "TensorShape.h"
#include <gtest/gtest.h>
namespace paddle {
......
......@@ -12,8 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <gtest/gtest.h>
#include "TensorType.h"
#include <gtest/gtest.h>
namespace paddle {
......
......@@ -194,8 +194,8 @@ void PyDataProvider::fillSlotsByStr(const std::string& samples) {
auto& slot = slots_[j];
CHECK(SlotDef::INDEX >= slot.type || SlotDef::STRING == slot.type)
<< " Slot type:" << slot.type << " is out of range.";
CHECK_GE(slot.type, SlotDef::VECTOR_DENSE)
<< " Slot type:" << slot.type << " is out of range.";
CHECK_GE(slot.type, SlotDef::VECTOR_DENSE) << " Slot type:" << slot.type
<< " is out of range.";
switch (slot.type) {
case SlotDef::VECTOR_DENSE:
fillDenseSlot(slot, data, dataEnd);
......
......@@ -446,9 +446,9 @@ real AucEvaluator::evalImp(std::vector<Argument>& arguments) {
for (size_t i = 0; i < insNum; ++i) {
real value = outputD[pos];
uint32_t binIdx = static_cast<uint32_t>(value * kBinNum_);
CHECK(binIdx <= kBinNum_)
<< "bin index [" << binIdx << "] out of range, predict value[" << value
<< "]";
CHECK(binIdx <= kBinNum_) << "bin index [" << binIdx
<< "] out of range, predict value[" << value
<< "]";
real w = supportWeight ? weightD[i] : 1.0;
if (labelD[i] == kNegativeLabel_) {
statNeg_[binIdx] += w;
......
......@@ -263,9 +263,8 @@ void Layer::zeroGrad() {
}
void Layer::initNeedFlags() {
auto initFlag = [this](bool& flag,
bool (Layer::*flagQueryFunc)() const,
ParameterType type) {
auto initFlag = [this](
bool& flag, bool (Layer::*flagQueryFunc)() const, ParameterType type) {
flag = false;
if (biasParameter_ && biasParameter_->hasType(type)) {
flag = true;
......
......@@ -292,27 +292,26 @@ void checkRecurrentLayer(LayerConfig layerConfig,
TestRecurrentLayer<T> testGpu(layerConfig, true, gpuBatch);
testCpu.init(batchSize);
testGpu.init(batchSize);
auto checkError =
[](MatrixPtr cpu, MatrixPtr gpu, int numSequences, const char* str) {
CpuMatrix check(gpu->getHeight(), gpu->getWidth());
check.copyFrom(*gpu);
int height = cpu->getHeight();
int width = cpu->getWidth();
const real* data1 = cpu->getData();
const real* data2 = check.getData();
int count = 0;
for (int i = 0; i < height; i++) {
for (int j = 0; j < width; j++) {
if (fabs(data1[i * width + j] - data2[i * width + j]) /
numSequences >
1e-4) {
count++;
}
}
auto checkError = [](
MatrixPtr cpu, MatrixPtr gpu, int numSequences, const char* str) {
CpuMatrix check(gpu->getHeight(), gpu->getWidth());
check.copyFrom(*gpu);
int height = cpu->getHeight();
int width = cpu->getWidth();
const real* data1 = cpu->getData();
const real* data2 = check.getData();
int count = 0;
for (int i = 0; i < height; i++) {
for (int j = 0; j < width; j++) {
if (fabs(data1[i * width + j] - data2[i * width + j]) / numSequences >
1e-4) {
count++;
}
EXPECT_EQ(count, 0) << "[" << str << "]"
<< "There are " << count << " different element.";
};
}
}
EXPECT_EQ(count, 0) << "[" << str << "]"
<< "There are " << count << " different element.";
};
T* cpuLayer = dynamic_cast<T*>(testCpu.testLayer_.get());
T* gpuLayer = dynamic_cast<T*>(testGpu.testLayer_.get());
......
......@@ -174,10 +174,8 @@ void CpuMatrix::mulByBitCode(size_t numClasses,
const IVector& codes,
const Matrix& weight,
const Matrix& input) {
auto op = [](real& t,
const real* weightRow,
const real* inputRow,
size_t inputDim) {
auto op = [](
real& t, const real* weightRow, const real* inputRow, size_t inputDim) {
real sum = 0;
for (size_t k = 0; k < inputDim; ++k) {
sum += weightRow[k] * inputRow[k];
......@@ -195,12 +193,12 @@ void CpuMatrix::mulByBitCodeBackwardWeight(size_t numClasses,
const IVector& codes,
Matrix& weight,
const Matrix& input) {
auto op =
[](const real t, real* weightRow, const real* inputRow, size_t inputDim) {
for (size_t k = 0; k < inputDim; ++k) {
weightRow[k] += t * inputRow[k];
}
};
auto op = [](
const real t, real* weightRow, const real* inputRow, size_t inputDim) {
for (size_t k = 0; k < inputDim; ++k) {
weightRow[k] += t * inputRow[k];
}
};
mulByBitCodeT(op, SimpleCodeTable(numClasses), codes, *this, weight, input);
}
......@@ -212,12 +210,12 @@ void CpuMatrix::mulByBitCodeBackwardError(size_t numClasses,
const IVector& codes,
const Matrix& weight,
Matrix& input) {
auto op =
[](const real t, const real* weightRow, real* inputRow, size_t inputDim) {
for (size_t k = 0; k < inputDim; ++k) {
inputRow[k] += t * weightRow[k];
}
};
auto op = [](
const real t, const real* weightRow, real* inputRow, size_t inputDim) {
for (size_t k = 0; k < inputDim; ++k) {
inputRow[k] += t * weightRow[k];
}
};
mulByBitCodeT(op, SimpleCodeTable(numClasses), codes, *this, weight, input);
}
......
......@@ -183,8 +183,8 @@ void TensorCheck(AssertEq compare,
template <typename AssertEq>
void TensorCheck(AssertEq compare, real args1, real args2) {
EXPECT_EQ(compare(args1, args2), true)
<< "[Test error] args1 = " << args1 << ", args2 = " << args2;
EXPECT_EQ(compare(args1, args2), true) << "[Test error] args1 = " << args1
<< ", args2 = " << args2;
}
template <typename AssertEq>
......
......@@ -126,15 +126,15 @@ TEST(SIMDFunction, decayL1_WithLR) {
typedef std::function<void(float*, float*, float*, float, size_t)>
DecayL1MethodType;
DecayL1MethodType naive =
[](float* d, float* s, float* lr, float l, size_t len) {
paddle::simd::naive::decayL1<float>(d, s, lr, l, len);
};
DecayL1MethodType simd =
[](float* d, float* s, float* lr, float l, size_t len) {
paddle::simd::decayL1<float>(d, s, lr, l, len);
};
DecayL1MethodType naive = [](
float* d, float* s, float* lr, float l, size_t len) {
paddle::simd::naive::decayL1<float>(d, s, lr, l, len);
};
DecayL1MethodType simd = [](
float* d, float* s, float* lr, float l, size_t len) {
paddle::simd::decayL1<float>(d, s, lr, l, len);
};
naive(dest.get(), src.get(), lr.get(), lambda, VECTOR_LEN);
simd(simd_dest.get(), src.get(), lr.get(), lambda, VECTOR_LEN);
......
......@@ -379,7 +379,7 @@ void Argument::concat(const std::vector<Argument>& args,
}
auto copyArg = [batchSize, stream](
MatrixPtr& dst, MatrixPtr src, int startRow, bool useGpu) {
MatrixPtr& dst, MatrixPtr src, int startRow, bool useGpu) {
if (!src) {
dst.reset();
return;
......@@ -395,31 +395,29 @@ void Argument::concat(const std::vector<Argument>& args,
tmpMatrix->copyFrom(*src, stream);
};
auto copyIds =
[batchSize, stream](
IVectorPtr& dst, const IVectorPtr& src, int startRow, bool useGpu) {
if (!src) {
dst.reset();
return;
}
IVector::resizeOrCreate(dst, batchSize, useGpu);
dst->subVec(startRow, src->getSize())->copyFrom(*src, stream);
};
auto copyStrs =
[batchSize, stream](
SVectorPtr& dst, const SVectorPtr& src, int startRow, bool useGpu) {
if (!src) {
dst.reset();
return;
}
if (!dst) {
dst = std::make_shared<std::vector<std::string>>(batchSize);
} else {
dst->resize(batchSize);
}
std::copy(src->begin(), src->end(), dst->begin() + startRow);
};
auto copyIds = [batchSize, stream](
IVectorPtr& dst, const IVectorPtr& src, int startRow, bool useGpu) {
if (!src) {
dst.reset();
return;
}
IVector::resizeOrCreate(dst, batchSize, useGpu);
dst->subVec(startRow, src->getSize())->copyFrom(*src, stream);
};
auto copyStrs = [batchSize, stream](
SVectorPtr& dst, const SVectorPtr& src, int startRow, bool useGpu) {
if (!src) {
dst.reset();
return;
}
if (!dst) {
dst = std::make_shared<std::vector<std::string>>(batchSize);
} else {
dst->resize(batchSize);
}
std::copy(src->begin(), src->end(), dst->begin() + startRow);
};
auto copySequencePos = [](ICpuGpuVectorPtr& dstSeq,
const ICpuGpuVectorPtr& srcSeq,
......
......@@ -155,9 +155,8 @@ ParameterOptimizer::TraverseCallback AverageOptimizer::restore() {
return nullptr;
}
return [](const VectorPtr vecs[],
const ParameterConfig& config,
size_t sparseId) {
return [](
const VectorPtr vecs[], const ParameterConfig& config, size_t sparseId) {
vecs[PARAMETER_VALUE]->copyFrom(*vecs[PARAMETER_GRADIENT]);
vecs[PARAMETER_GRADIENT]->zeroMem();
};
......
......@@ -352,8 +352,8 @@ bool Parameter::load(std::istream& s) {
Header header;
CHECK(s.read(reinterpret_cast<char*>(&header), sizeof(header)))
<< "Fail to read parameter " << getName();
CHECK_EQ(header.version, kFormatVersion)
<< "Incorrect format version: " << header.version;
CHECK_EQ(header.version, kFormatVersion) << "Incorrect format version: "
<< header.version;
CHECK_EQ(header.size, getSize())
<< "The size (" << header.size << ") in the file does not match the size "
<< "(" << getSize() << ") of the parameter: " << getName();
......
......@@ -359,8 +359,8 @@ void SocketClient::TcpClient(const std::string &serverAddr, int serverPort) {
#if defined(__OSX__) || defined(__APPLE__)
server = getipnodebyname(serverAddr.c_str(), AF_INET, AI_DEFAULT, &errRet);
CHECK_NE(HOST_NOT_FOUND, errRet)
<< "ERROR, no such host: " << serverAddr << " ret = " << errRet;
CHECK_NE(HOST_NOT_FOUND, errRet) << "ERROR, no such host: " << serverAddr
<< " ret = " << errRet;
CHECK(server) << "getipnodebyname error!";
#else
struct hostent hostinfo;
......
......@@ -549,9 +549,9 @@ PServerVector ParameterClient2::createVector() {
if (handle == -1) {
handle = response.handle();
} else {
CHECK_EQ(handle, response.handle())
<< "Inconsistent handle from client" << &response - &responses[0]
<< " " << handle << " " << response.handle();
CHECK_EQ(handle, response.handle()) << "Inconsistent handle from client"
<< &response - &responses[0] << " "
<< handle << " " << response.handle();
}
}
return PServerVector{handle};
......@@ -579,9 +579,9 @@ PServerMatrix ParameterClient2::createMatrix(int32_t numCols) {
if (handle == -1) {
handle = response.handle();
} else {
CHECK_EQ(handle, response.handle())
<< "Inconsistent handle from client" << &response - &responses[0]
<< " " << handle << " " << response.handle();
CHECK_EQ(handle, response.handle()) << "Inconsistent handle from client"
<< &response - &responses[0] << " "
<< handle << " " << response.handle();
}
}
return PServerMatrix{handle};
......
......@@ -1213,8 +1213,8 @@ void ParameterServer2::loadValueVector(const LoadValueRequest& request,
CHECK_EQ(header.size, (size_t)size_)
<< "The size (" << header.size << ") in the file does not match the size "
<< "(" << size_ << ") of the pserver: " << serverId_;
CHECK_EQ(header.valueSize, sizeof(real))
<< "Unsupported valueSize " << header.valueSize;
CHECK_EQ(header.valueSize, sizeof(real)) << "Unsupported valueSize "
<< header.valueSize;
CHECK(fs.read(reinterpret_cast<char*>(vec.getData()),
header.size * sizeof(real)));
......
......@@ -545,11 +545,11 @@ protected:
std::vector<ParameterServer2::Buffer>* buffers);
const ParameterConfig& getParameterConfig(const ParameterBlock& block) {
CHECK_LT(block.para_id(), -1UL)
<< "invalid parameter id:" << block.para_id();
CHECK_LT(block.para_id(), -1UL) << "invalid parameter id:"
<< block.para_id();
const auto it = configMap_.find(block.para_id());
CHECK(it != configMap_.end())
<< "can not find parameter id: " << block.para_id();
CHECK(it != configMap_.end()) << "can not find parameter id: "
<< block.para_id();
return it->second;
}
......
......@@ -41,8 +41,8 @@ void ProtoServer::handleRequest(std::unique_ptr<MsgReader> msgReader,
void ProtoServer::registerServiceFunctionImp(const std::string& funcName,
ServiceFunction func) {
CHECK(!nameToFuncMap_.count(funcName))
<< "Duplicated registration: " << funcName;
CHECK(!nameToFuncMap_.count(funcName)) << "Duplicated registration: "
<< funcName;
nameToFuncMap_[funcName] = func;
}
......
......@@ -97,7 +97,7 @@ void TrainerInternal::trainOneBatch(int64_t batchId,
}
UpdateCallback updateCallback = [this, showStats, &paraStats](
Parameter* para) {
Parameter* para) {
if (showStats) {
//! @TODO(yuyang18) Show stats is actually a ParameterHook, refactor
// it
......
......@@ -344,14 +344,14 @@ private:
} while (0);
// check end barrier
#define __CHECK_BARRIER_TIMER(set, statName, numConnThreads, ...) \
do { \
std::string internalName = \
std::string(statName) + std::string(__VA_ARGS__); \
BarrierStatPtr __stat = \
(set).getStat(numConnThreads, internalName, BARRIER_END); \
PCHECK(__stat->checkPassBarrier()) \
<< internalName << ": invalid barrier data"; \
#define __CHECK_BARRIER_TIMER(set, statName, numConnThreads, ...) \
do { \
std::string internalName = \
std::string(statName) + std::string(__VA_ARGS__); \
BarrierStatPtr __stat = \
(set).getStat(numConnThreads, internalName, BARRIER_END); \
PCHECK(__stat->checkPassBarrier()) << internalName \
<< ": invalid barrier data"; \
} while (0);
/*
......
......@@ -62,8 +62,8 @@ public:
// Create a class instance of type @type using args
BaseClass* createByType(const std::string& type, CreateArgs... args) {
ClassCreator creator;
CHECK(mapGet(type, creatorMap_, &creator))
<< "Unknown class type: " << type;
CHECK(mapGet(type, creatorMap_, &creator)) << "Unknown class type: "
<< type;
return creator(args...);
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册