提交 10c14cfc 编写于 作者: V Vitaliy Lyudvichenko 提交者: alexey-milovidov

Small code enhancements according to clang-tidy. [#CLICKHOUSE-2931]

上级 23263a1a
......@@ -350,7 +350,7 @@ PoolWithFailoverBase<TNestedPool>::updatePoolStates()
for (auto & state : shared_pool_states)
state.randomize();
time_t current_time = time(0);
time_t current_time = time(nullptr);
if (last_error_decrease_time)
{
......
......@@ -27,7 +27,7 @@ int main(int argc, char ** argv)
std::cerr << "Please run `./nozk.sh && sleep 40s && ./yeszk.sh`" << std::endl;
time_t time0 = time(0);
time_t time0 = time(nullptr);
while (true)
{
......
......@@ -203,7 +203,7 @@ void IProfilingBlockInputStream::checkQuota(Block & block)
case LIMITS_CURRENT:
{
time_t current_time = time(0);
time_t current_time = time(nullptr);
double total_elapsed = info.total_stopwatch.elapsedSeconds();
quota->checkAndAddResultRowsBytes(current_time, block.rows(), block.bytes());
......
......@@ -128,7 +128,7 @@ static std::tuple<ASTPtr, BlockIO> executeQueryImpl(
QueryProcessingStage::Enum stage)
{
ProfileEvents::increment(ProfileEvents::Query);
time_t current_time = time(0);
time_t current_time = time(nullptr);
const Settings & settings = context.getSettingsRef();
......@@ -253,7 +253,7 @@ static std::tuple<ASTPtr, BlockIO> executeQueryImpl(
elem.type = QueryLogElement::QUERY_FINISH;
elem.event_time = time(0);
elem.event_time = time(nullptr);
elem.query_duration_ms = elapsed_seconds * 1000;
elem.read_rows = process_list_elem->progress_in.rows;
......@@ -305,7 +305,7 @@ static std::tuple<ASTPtr, BlockIO> executeQueryImpl(
elem.type = QueryLogElement::EXCEPTION_WHILE_PROCESSING;
elem.event_time = time(0);
elem.event_time = time(nullptr);
elem.query_duration_ms = 1000 * (elem.event_time - elem.query_start_time);
elem.exception = getCurrentExceptionMessage(false);
......
......@@ -249,7 +249,7 @@ private:
/// Should we celebrate a bit?
bool isNewYearMode()
{
time_t current_time = time(0);
time_t current_time = time(nullptr);
/// It's bad to be intrusive.
if (current_time % 3 != 0)
......
......@@ -265,7 +265,7 @@ MergeTreeData::MutableDataPartPtr Fetcher::fetchPartImpl(
assertEOF(in);
ActiveDataPartSet::parsePartName(part_name, *new_data_part);
new_data_part->modification_time = time(0);
new_data_part->modification_time = time(nullptr);
new_data_part->loadColumns(true);
new_data_part->loadChecksums(true);
new_data_part->loadIndex();
......
......@@ -483,7 +483,7 @@ void MergeTreeData::clearOldTemporaryDirectories(ssize_t custom_directories_life
if (!lock.try_lock())
return;
time_t current_time = time(0);
time_t current_time = time(nullptr);
ssize_t deadline = (custom_directories_lifetime_seconds >= 0)
? current_time - custom_directories_lifetime_seconds
: current_time - settings.temporary_directories_lifetime;
......@@ -522,12 +522,12 @@ MergeTreeData::DataPartsVector MergeTreeData::grabOldParts()
if (!lock.try_lock())
return res;
time_t now = time(0);
time_t now = time(nullptr);
{
std::lock_guard<std::mutex> lock(all_data_parts_mutex);
std::lock_guard<std::mutex> lock_all_parts(all_data_parts_mutex);
for (DataParts::iterator it = all_data_parts.begin(); it != all_data_parts.end();)
for (auto it = all_data_parts.begin(); it != all_data_parts.end();)
{
if (it->unique() && /// After this ref_count cannot increase.
(*it)->remove_time < now &&
......@@ -1047,7 +1047,7 @@ MergeTreeData::AlterDataPartTransactionPtr MergeTreeData::alterDataPart(
DataPart::Checksums new_checksums = part->checksums;
for (auto it : transaction->rename_map)
{
if (it.second == "")
if (it.second.empty())
new_checksums.files.erase(it.first);
else
new_checksums.files[it.second] = add_checksums.files[it.first];
......@@ -1110,7 +1110,7 @@ void MergeTreeData::AlterDataPartTransaction::commit()
Poco::File{path + it.first}.renameTo(path + it.second);
}
DataPart & mutable_part = const_cast<DataPart &>(*data_part);
auto & mutable_part = const_cast<DataPart &>(*data_part);
mutable_part.checksums = new_checksums;
mutable_part.columns = new_columns;
......@@ -1231,7 +1231,7 @@ MergeTreeData::DataPartsVector MergeTreeData::renameTempPartAndReplace(
/// Parts contained in the part are consecutive in data_parts, intersecting the insertion place
/// for the part itself.
DataParts::iterator it = data_parts.lower_bound(part);
auto it = data_parts.lower_bound(part);
/// Go to the left.
while (it != data_parts.begin())
{
......@@ -1244,7 +1244,7 @@ MergeTreeData::DataPartsVector MergeTreeData::renameTempPartAndReplace(
break;
}
replaced.push_back(*it);
(*it)->remove_time = time(0);
(*it)->remove_time = time(nullptr);
removePartContributionToColumnSizes(*it);
data_parts.erase(it++); /// Yes, ++, not --.
}
......@@ -1259,7 +1259,7 @@ MergeTreeData::DataPartsVector MergeTreeData::renameTempPartAndReplace(
break;
}
replaced.push_back(*it);
(*it)->remove_time = time(0);
(*it)->remove_time = time(nullptr);
removePartContributionToColumnSizes(*it);
data_parts.erase(it++);
}
......@@ -1267,7 +1267,7 @@ MergeTreeData::DataPartsVector MergeTreeData::renameTempPartAndReplace(
if (obsolete)
{
LOG_WARNING(log, "Obsolete part " << part->name << " added");
part->remove_time = time(0);
part->remove_time = time(nullptr);
}
else
{
......@@ -1488,7 +1488,7 @@ MergeTreeData::DataPartPtr MergeTreeData::getActiveContainingPart(const String &
std::lock_guard<std::mutex> lock(data_parts_mutex);
/// The part can be covered only by the previous or the next one in data_parts.
DataParts::iterator it = data_parts.lower_bound(tmp_part);
auto it = data_parts.lower_bound(tmp_part);
if (it != data_parts.end())
{
......@@ -1514,7 +1514,7 @@ MergeTreeData::DataPartPtr MergeTreeData::getPartIfExists(const String & part_na
ActiveDataPartSet::parsePartName(part_name, *tmp_part);
std::lock_guard<std::mutex> lock(all_data_parts_mutex);
DataParts::iterator it = all_data_parts.lower_bound(tmp_part);
auto it = all_data_parts.lower_bound(tmp_part);
if (it != all_data_parts.end() && (*it)->name == part_name)
return *it;
......@@ -1527,8 +1527,8 @@ MergeTreeData::DataPartPtr MergeTreeData::getShardedPartIfExists(const String &
if (part_from_shard->name == part_name)
return part_from_shard;
else
return nullptr;
return nullptr;
}
MergeTreeData::MutableDataPartPtr MergeTreeData::loadPartAndFixMetadata(const String & relative_path)
......
......@@ -262,7 +262,7 @@ bool MergeTreeDataMerger::selectAllPartsToMergeWithinPartition(
/// Enough disk space to cover the new merge with a margin.
if (available_disk_space <= sum_bytes * DISK_USAGE_COEFFICIENT_TO_SELECT)
{
time_t now = time(0);
time_t now = time(nullptr);
if (now - disk_space_warning_time > 3600)
{
disk_space_warning_time = now;
......@@ -758,7 +758,7 @@ MergeTreeData::MutableDataPartPtr MergeTreeDataMerger::mergePartsToTemporaryPart
throw Exception("Empty part after merge", ErrorCodes::LOGICAL_ERROR);
new_data_part->size = to.marksCount();
new_data_part->modification_time = time(0);
new_data_part->modification_time = time(nullptr);
new_data_part->size_in_bytes = MergeTreeData::DataPart::calcTotalSize(new_part_tmp_path);
new_data_part->is_sharded = false;
......@@ -1080,7 +1080,7 @@ MergeTreeData::PerShardDataParts MergeTreeDataMerger::reshardPartition(
data_part->checksums = output_stream->writeSuffixAndGetChecksums();
data_part->index.swap(output_stream->getIndex());
data_part->size = output_stream->marksCount();
data_part->modification_time = time(0);
data_part->modification_time = time(nullptr);
data_part->size_in_bytes = MergeTreeData::DataPart::calcTotalSize(output_stream->getPartPath());
data_part->is_sharded = true;
data_part->shard_no = shard_no;
......
......@@ -163,7 +163,7 @@ MergeTreeData::MutableDataPartPtr MergeTreeDataWriter::writeTempPart(BlockWithDa
new_data_part->right = temp_index;
new_data_part->level = 0;
new_data_part->size = part_size;
new_data_part->modification_time = time(0);
new_data_part->modification_time = time(nullptr);
new_data_part->month = min_month;
new_data_part->columns = columns;
new_data_part->checksums = checksums;
......
......@@ -182,7 +182,7 @@ void ReplicatedMergeTreeBlockOutputStream::commitPart(zkutil::ZooKeeperPtr & zoo
StorageReplicatedMergeTree::LogEntry log_entry;
log_entry.type = StorageReplicatedMergeTree::LogEntry::GET_PART;
log_entry.create_time = time(0);
log_entry.create_time = time(nullptr);
log_entry.source_replica = storage.replica_name;
log_entry.new_part_name = part_name;
log_entry.quorum = quorum;
......
......@@ -305,7 +305,7 @@ void ReplicatedMergeTreePartCheckThread::run()
{
try
{
time_t current_time = time(0);
time_t current_time = time(nullptr);
/// Take part from the queue for verification.
PartsToCheckQueue::iterator selected = parts_queue.end(); /// end from std::list is not get invalidated
......
......@@ -341,7 +341,7 @@ bool ReplicatedMergeTreeQueue::pullLogsToQueue(zkutil::ZooKeeperPtr zookeeper, z
insertUnlocked(copied_entries[i]);
}
last_queue_update = time(0);
last_queue_update = time(nullptr);
}
catch (...)
{
......@@ -644,7 +644,7 @@ ReplicatedMergeTreeQueue::CurrentlyExecuting::CurrentlyExecuting(ReplicatedMerge
{
entry->currently_executing = true;
++entry->num_tries;
entry->last_attempt_time = time(0);
entry->last_attempt_time = time(nullptr);
if (!queue.future_parts.insert(entry->new_part_name).second)
throw Exception("Tagging already tagged future part " + entry->new_part_name + ". This is a bug.", ErrorCodes::LOGICAL_ERROR);
......@@ -708,7 +708,7 @@ ReplicatedMergeTreeQueue::SelectedEntry ReplicatedMergeTreeQueue::selectEntryToP
else
{
++(*it)->num_postponed;
(*it)->last_postpone_time = time(0);
(*it)->last_postpone_time = time(nullptr);
}
}
......
......@@ -112,7 +112,7 @@ void ReplicatedMergeTreeRestartingThread::run()
first_time = false;
}
time_t current_time = time(0);
time_t current_time = time(nullptr);
if (current_time >= prev_time_of_check_delay + static_cast<time_t>(storage.data.settings.check_delay_period))
{
/// Find out lag of replicas.
......
......@@ -105,7 +105,7 @@ void Service::processQuery(const Poco::Net::HTMLForm & params, ReadBuffer & body
assertEOF(body);
ActiveDataPartSet::parsePartName(part_name, *data_part);
data_part->modification_time = time(0);
data_part->modification_time = time(nullptr);
data_part->loadColumns(true);
data_part->loadChecksums(true);
data_part->loadIndex();
......
......@@ -296,7 +296,7 @@ private:
void insertIntoBuffer(const Block & block, StorageBuffer::Buffer & buffer, std::unique_lock<std::mutex> && lock)
{
time_t current_time = time(0);
time_t current_time = time(nullptr);
/// Sort the columns in the block. This is necessary to make it easier to concatenate the blocks later.
Block sorted_block = block.sortColumns();
......@@ -434,7 +434,7 @@ void StorageBuffer::flushAllBuffers(const bool check_thresholds)
void StorageBuffer::flushBuffer(Buffer & buffer, bool check_thresholds)
{
Block block_to_write;
time_t current_time = time(0);
time_t current_time = time(nullptr);
size_t rows = 0;
size_t bytes = 0;
......
......@@ -343,7 +343,7 @@ bool StorageMergeTree::merge(
if (auto part_log = context.getPartLog(database_name, table_name))
{
PartLogElement elem;
elem.event_time = time(0);
elem.event_time = time(nullptr);
elem.merged_from.reserve(merging_tagger->parts.size());
for (const auto & part : merging_tagger->parts)
......
......@@ -1120,7 +1120,7 @@ bool StorageReplicatedMergeTree::executeLogEntry(const LogEntry & entry)
if (auto part_log = context.getPartLog(database_name, table_name))
{
PartLogElement elem;
elem.event_time = time(0);
elem.event_time = time(nullptr);
elem.merged_from.reserve(parts.size());
for (const auto & part : parts)
......@@ -1843,7 +1843,7 @@ bool StorageReplicatedMergeTree::createLogEntryToMergeParts(
entry.source_replica = replica_name;
entry.new_part_name = merged_name;
entry.deduplicate = deduplicate;
entry.create_time = time(0);
entry.create_time = time(nullptr);
for (const auto & part : parts)
entry.parts_to_merge.push_back(part->name);
......@@ -2147,7 +2147,7 @@ bool StorageReplicatedMergeTree::fetchPart(const String & part_name, const Strin
if (auto part_log = context.getPartLog(database_name, table_name))
{
PartLogElement elem;
elem.event_time = time(0);
elem.event_time = time(nullptr);
elem.event_type = PartLogElement::DOWNLOAD_PART;
elem.size_in_bytes = part->size_in_bytes;
elem.duration_ms = stopwatch.elapsed() / 10000000;
......@@ -2650,7 +2650,7 @@ void StorageReplicatedMergeTree::clearColumnInPartition(
entry.type = LogEntry::CLEAR_COLUMN;
entry.new_part_name = fake_part_name;
entry.column_name = column_name.safeGet<String>();
entry.create_time = time(0);
entry.create_time = time(nullptr);
String log_znode_path = getZooKeeper()->create(zookeeper_path + "/log/log-", entry.toString(), zkutil::CreateMode::PersistentSequential);
entry.znode_name = log_znode_path.substr(log_znode_path.find_last_of('/') + 1);
......@@ -2700,7 +2700,7 @@ void StorageReplicatedMergeTree::dropPartition(const ASTPtr & query, const Field
entry.source_replica = replica_name;
entry.new_part_name = fake_part_name;
entry.detach = detach;
entry.create_time = time(0);
entry.create_time = time(nullptr);
String log_znode_path = getZooKeeper()->create(zookeeper_path + "/log/log-", entry.toString(), zkutil::CreateMode::PersistentSequential);
entry.znode_name = log_znode_path.substr(log_znode_path.find_last_of('/') + 1);
......
......@@ -191,7 +191,7 @@ void StorageTrivialBuffer::addBlock(const Block & block, DeduplicationController
void StorageTrivialBuffer::flush(bool check_thresholds, bool is_called_from_background)
{
Block block_to_write;
time_t current_time = time(0);
time_t current_time = time(nullptr);
time_t time_passed = 0;
......@@ -316,7 +316,7 @@ public:
}
}
time_t current_time = time(0);
time_t current_time = time(nullptr);
if (buffer.checkThresholds(current_time, rows, bytes))
{
/** We'll try to flush the buffer if thresholds are overdrafted.
......
......@@ -17,46 +17,46 @@ using Poco::Logger;
if ((logger)->trace()) {\
std::stringstream oss_internal_rare; \
oss_internal_rare << message; \
(logger)->trace(oss_internal_rare.str());}} while(0)
(logger)->trace(oss_internal_rare.str());}} while(false)
#define LOG_DEBUG(logger, message) do { \
if ((logger)->debug()) {\
std::stringstream oss_internal_rare; \
oss_internal_rare << message; \
(logger)->debug(oss_internal_rare.str());}} while(0)
(logger)->debug(oss_internal_rare.str());}} while(false)
#define LOG_INFO(logger, message) do { \
if ((logger)->information()) {\
std::stringstream oss_internal_rare; \
oss_internal_rare << message; \
(logger)->information(oss_internal_rare.str());}} while(0)
(logger)->information(oss_internal_rare.str());}} while(false)
#define LOG_NOTICE(logger, message) do { \
if ((logger)->notice()) {\
std::stringstream oss_internal_rare; \
oss_internal_rare << message; \
(logger)->notice(oss_internal_rare.str());}} while(0)
(logger)->notice(oss_internal_rare.str());}} while(false)
#define LOG_WARNING(logger, message) do { \
if ((logger)->warning()) {\
std::stringstream oss_internal_rare; \
oss_internal_rare << message; \
(logger)->warning(oss_internal_rare.str());}} while(0)
(logger)->warning(oss_internal_rare.str());}} while(false)
#define LOG_ERROR(logger, message) do { \
if ((logger)->error()) {\
std::stringstream oss_internal_rare; \
oss_internal_rare << message; \
(logger)->error(oss_internal_rare.str());}} while(0)
(logger)->error(oss_internal_rare.str());}} while(false)
#define LOG_CRITICAL(logger, message) do { \
if ((logger)->critical()) {\
std::stringstream oss_internal_rare; \
oss_internal_rare << message; \
(logger)->critical(oss_internal_rare.str());}} while(0)
(logger)->critical(oss_internal_rare.str());}} while(false)
#define LOG_FATAL(logger, message) do { \
if ((logger)->fatal()) {\
std::stringstream oss_internal_rare; \
oss_internal_rare << message; \
(logger)->fatal(oss_internal_rare.str());}} while(0)
(logger)->fatal(oss_internal_rare.str());}} while(false)
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册