提交 d42fbed0 编写于 作者: Y Yu Yang

Fix several cpp issues

* Different Type compare.
* ostream << should pass a const object.
* remove always true checks.
上级 a4d18146
......@@ -240,7 +240,7 @@ public:
seqClassficationError_ = 0;
}
virtual void printStats(std::ostream& os) {
virtual void printStats(std::ostream& os) const {
os << config_.name() << "="
<< (numSequences_ ? totalScore_ / numSequences_ : 0);
os << " deletions error"
......
......@@ -114,7 +114,7 @@ public:
numCorrect_ = 0;
}
virtual void printStats(std::ostream& os) {
virtual void printStats(std::ostream& os) const {
double precision = (double)numCorrect_ / numOutputSegments_;
double recall = (double)numCorrect_ / numLabelSegments_;
double f1 =
......
......@@ -315,7 +315,7 @@ public:
return 0;
}
virtual void printStats(std::ostream& os) {
virtual void printStats(std::ostream& os) const {
CHECK(colIdx_ + (int32_t)colNum_ >= 0 && colIdx_ - (int32_t)colNum_ < 0)
<< "column index [" << colIdx_ << "] out of range [-" << colNum_ << ", "
<< colNum_ << ")";
......@@ -421,7 +421,7 @@ void AucEvaluator::distributeEval(ParameterClient2* client) {
client->reduce(statNeg_, statNeg_, kBinNum_ + 1, FLAGS_trainer_id, 0);
}
double AucEvaluator::calcAuc() {
double AucEvaluator::calcAuc() const {
double totPos = 0.0;
double totNeg = 0.0;
double totPosPrev = 0.0;
......@@ -584,7 +584,7 @@ real PrecisionRecallEvaluator::evalImp(std::vector<Argument>& arguments) {
return 0;
}
void PrecisionRecallEvaluator::printStats(std::ostream& os) {
void PrecisionRecallEvaluator::printStats(std::ostream& os) const {
int label = config_.positive_label();
if (label != -1) {
CHECK(label >= 0 && label < (int)statsInfo_.size())
......
......@@ -99,19 +99,19 @@ public:
* @brief print the statistics of evaluate result
* @note finish() should be called before printStats
*/
virtual void printStats(std::ostream& os) {
virtual void printStats(std::ostream& os) const {
os << config_.name() << "="
<< (numSamples_ ? totalScore_ / numSamples_ : 0);
}
friend std::ostream& operator<<(std::ostream& os,
Evaluator& evaluator) {
const Evaluator& evaluator) {
evaluator.printStats(os);
return os;
}
friend std::ostream&& operator<<(std::ostream&& os, // NOLINT
Evaluator& evaluator) {
const Evaluator& evaluator) {
evaluator.printStats(os);
return std::move(os);
}
......@@ -135,7 +135,7 @@ public:
return -1;
}
virtual void finish() {}
virtual void printStats(std::ostream&) {}
virtual void printStats(std::ostream&) const {}
};
/**
* @brief evaluate AUC using colIdx-th column as prediction.
......@@ -165,7 +165,7 @@ public:
virtual real evalImp(std::vector<Argument>& arguments);
virtual void printStats(std::ostream& os) {
virtual void printStats(std::ostream& os) const {
os << config_.name() << "=" << calcAuc();
}
......@@ -189,7 +189,7 @@ private:
return (X1 > X2 ? (X1 - X2) : (X2 - X1)) * (Y1 + Y2) / 2.0;
}
double calcAuc();
double calcAuc() const;
};
/**
......@@ -244,7 +244,7 @@ public:
virtual real evalImp(std::vector<Argument>& arguments);
virtual void printStats(std::ostream& os);
virtual void printStats(std::ostream& os) const;
virtual void distributeEval(ParameterClient2* client);
......@@ -339,7 +339,7 @@ public:
virtual void finish() { calc(predictArray_); }
virtual void printStats(std::ostream& os) {
virtual void printStats(std::ostream& os) const {
os << " pos/neg"
<< "=" << pairArray_[0] / ((pairArray_[1] <= 0) ? 1.0 : pairArray_[1]);
}
......
......@@ -154,7 +154,7 @@ public:
return -1;
}
virtual void printStats(std::ostream& os) {
virtual void printStats(std::ostream& os) const {
for (auto& evaluator : evaluators_) {
evaluator->printStats(os);
os << ' ';
......
......@@ -325,7 +325,7 @@ public:
(void)arguments;
return -1;
}
virtual void printStats(std::ostream& os) {
virtual void printStats(std::ostream& os) const {
for (auto& evaluator : evaluators_) {
evaluator->printStats(os);
os << ' ';
......
......@@ -1449,8 +1449,8 @@ template<>
template <class Agg>
int BaseMatrixT<real>::applyRow(Agg agg, BaseMatrixT& b) {
MatrixOffset offset(0, 0, 0, 0, 0, 0);
int numRows = b.height_;
int numCols = b.width_;
auto numRows = b.height_;
auto numCols = b.width_;
CHECK_EQ(height_, numRows);
CHECK_EQ(width_, 1UL);
aggregate(agg, base::unary::identity(), base::binary::second(), b, numRows,
......@@ -1463,8 +1463,8 @@ template<>
template <class Agg, class Saver>
int BaseMatrixT<real>::applyRow(Agg agg, Saver sv, BaseMatrixT& b) {
MatrixOffset offset(0, 0, 0, 0, 0, 0);
int numRows = b.height_;
int numCols = b.width_;
auto numRows = b.height_;
auto numCols = b.width_;
CHECK_EQ(height_, numRows);
CHECK_EQ(width_, 1UL);
aggregate(agg, base::unary::identity(), sv, b, numRows, numCols, offset,
......@@ -1493,8 +1493,8 @@ template <class Agg, class Op, class Saver>
int BaseMatrixT<real>::applyRow(Agg agg, Op op, Saver sv,
BaseMatrixT& b, BaseMatrixT& c) {
MatrixOffset offset(0, 0, 0, 0, 0, 0);
int numRows = b.height_;
int numCols = b.width_;
auto numRows = b.height_;
auto numCols = b.width_;
CHECK_EQ(height_, numRows);
CHECK_EQ(width_, 1UL);
CHECK_EQ(c.height_, numRows);
......@@ -1524,8 +1524,8 @@ template<>
template <class Agg>
int BaseMatrixT<real>::applyCol(Agg agg, BaseMatrixT& b) {
MatrixOffset offset(0, 0, 0, 0, 0, 0);
int numRows = b.height_;
int numCols = b.width_;
auto numRows = b.height_;
auto numCols = b.width_;
CHECK_EQ(width_, numCols);
CHECK_EQ(height_, 1UL);
aggregate(agg, base::unary::identity(), base::binary::second(), b, numRows,
......@@ -1538,8 +1538,8 @@ template<>
template <class Agg, class Saver>
int BaseMatrixT<real>::applyCol(Agg agg, Saver sv, BaseMatrixT& b) {
MatrixOffset offset(0, 0, 0, 0, 0, 0);
int numRows = b.height_;
int numCols = b.width_;
auto numRows = b.height_;
auto numCols = b.width_;
CHECK_EQ(width_, numCols);
CHECK_EQ(height_, 1UL);
aggregate(agg, base::unary::identity(), sv, b, numRows, numCols, offset,
......
......@@ -82,8 +82,8 @@ MatrixPtr VectorT<real>::toOneHotSparseMatrix(size_t idRange, bool useGpu) {
template <>
MatrixPtr VectorT<int>::toOneHotSparseMatrix(size_t idRange, bool useGpu) {
int height = getSize();
int width = idRange;
auto height = getSize();
auto width = idRange;
MatrixPtr mat = Matrix::createSparseMatrix(
height, idRange, height, NO_VALUE, SPARSE_CSR, false, useGpu);
......@@ -91,7 +91,7 @@ MatrixPtr VectorT<int>::toOneHotSparseMatrix(size_t idRange, bool useGpu) {
cpuIds.copyFrom(*this);
int *idData = cpuIds.getData();
for (int i = 0; i < height; i ++) {
for (decltype(height) i = 0; i < height; i ++) {
const unsigned int id = idData[i];
CHECK_LT(id, width);
mat->setRow(i, 1, &id, nullptr);
......
......@@ -1469,7 +1469,6 @@ void ParameterServer2::waitPassFinish(const WaitPassFinishRequest& request,
void ParameterServer2::synchronize(const SynchronizeRequest& request,
ProtoResponseCallback callback) {
CHECK_LT(request.sync_object_id(), SyncObject_ARRAYSIZE);
synchronizeBarriers_[request.sync_object_id()]->wait();
dataSize_ = 0;
callback(SynchronizeResponse());
......@@ -1477,7 +1476,6 @@ void ParameterServer2::synchronize(const SynchronizeRequest& request,
void ParameterServer2::asyncFinishPass(const SynchronizeRequest& request,
ProtoResponseCallback callback) {
CHECK_LT(request.sync_object_id(), SyncObject_ARRAYSIZE);
synchronizeBarriers_[request.sync_object_id()]->wait();
callback(SynchronizeResponse());
......
......@@ -29,10 +29,10 @@ P_DEFINE_bool(log_barrier_show_log, false, // for performance tuning insight
namespace paddle {
std::ostream &operator<<(std::ostream &output, BarrierStatBase &stat) {
std::ostream &operator<<(std::ostream &output,
const BarrierStatBase &stat) {
if (FLAGS_log_barrier_abstract) {
std::lock_guard<std::mutex> guard(
const_cast<BarrierStatBase &>(stat).lock_);
std::lock_guard<std::mutex> guard(stat.lock_);
stat.showAbstract(output);
}
return output;
......@@ -136,7 +136,7 @@ void BarrierEndStat::reset(bool clearRawData) {
totAbstract_.minDelta = UINT64_MAX;
}
void BarrierEndStat::showAbstract(std::ostream &output) {
void BarrierEndStat::showAbstract(std::ostream &output) const {
// do not support the case "<=2 pserver"
if (numConnThreads_ <= 2 || !totSamples_) {
return;
......@@ -272,7 +272,7 @@ void BarrierDeltaStat::reset(bool clearRawData) {
totAbstract_.minDelta = UINT64_MAX;
}
void BarrierDeltaStat::showAbstract(std::ostream &output) {
void BarrierDeltaStat::showAbstract(std::ostream &output) const {
// do not support the case "<=2 pserver"
if (numConnThreads_ <= 2 || !totSamples_) {
return;
......
......@@ -218,11 +218,12 @@ public:
}
protected:
virtual void showAbstract(std::ostream &output) {}
friend std::ostream &operator<<(std::ostream &output, BarrierStatBase &stat);
virtual void showAbstract(std::ostream &output) const {}
friend std::ostream &operator<<(std::ostream &output,
const BarrierStatBase &stat);
protected:
std::mutex lock_;
mutable std::mutex lock_;
std::mutex abstractLock_; // see note on updaterStat
// each freqency for each barrier trainer
std::vector<struct Abstract> abstract_;
......@@ -262,7 +263,7 @@ protected:
* log_barrier_abstract, log_barrier_lowest_nodes, log_barrier_threshold
* control details.
*/
virtual void showAbstract(std::ostream &output);
virtual void showAbstract(std::ostream &output) const;
private:
std::unique_ptr<TimeVectorEnd> timeVector_;
......@@ -286,7 +287,7 @@ public:
virtual bool checkPassBarrier() { return timeVector_->empty(); }
protected:
virtual void showAbstract(std::ostream &outPut);
virtual void showAbstract(std::ostream &outPut) const;
private:
// store delta time in uint64_t, eg BP time of all trainers
......
/* Copyright (c) 2016 Baidu, Inc. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#define ATTR_NORETURN __attribute__((noreturn))
......@@ -134,7 +134,7 @@ static void initializeLogFds(char* argv0) {
gLogInited = true;
}
static void (*gFailureFunctionPtr)() __attribute__((noreturn)) = abort;
static void (*gFailureFunctionPtr)() ATTR_NORETURN = abort;
LogMessage::LogMessage(const char* fname, int line, int severity)
: fname_(fname), line_(line), severity_(severity) {}
......@@ -171,7 +171,7 @@ void setMinLogLevel(int level) {
paddle::internal::gMinLogLevel = level;
}
void installFailureFunction(void (*callback)()) {
void installFailureFunction(void (*callback)() ATTR_NORETURN) {
paddle::internal::gFailureFunctionPtr = callback;
}
......
......@@ -23,6 +23,7 @@ limitations under the License. */
#include <string>
#ifndef PADDLE_USE_GLOG
#include "CompilerMacros.h"
//! TODO(yuyang18): Move this utility macro into some global header.
#define PP_CAT(a, b) PP_CAT_I(a, b)
......@@ -168,7 +169,7 @@ void setMinLogLevel(int level);
* @brief Install Log(Fatal) failure function. Default is abort();
* @param callback: The failure function.
*/
void installFailureFunction(void (*callback)());
void installFailureFunction(void (*callback)() ATTR_NORETURN);
/**
* @brief installFailureWriter
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册