提交 7334c13d 编写于 作者: A Alexey Milovidov

clang-tidy, part 13

上级 cde492a7
......@@ -8,7 +8,7 @@ int main(int argc, char ** argv)
{
const auto & date_lut = DateLUT::instance();
std::cout << "Detected default timezone: `" << date_lut.getTimeZone() << "'" << std::endl;
time_t now = time(NULL);
time_t now = time(nullptr);
std::cout << "Current time: " << date_lut.timeToString(now)
<< ", UTC: " << DateLUT::instance("UTC").timeToString(now) << std::endl;
}
......
......@@ -3,7 +3,7 @@
#include <gtest/gtest.h>
TEST(find_symbols, SimpleTest)
TEST(FindSymbols, SimpleTest)
{
std::string s = "Hello, world! Goodbye...";
const char * begin = s.data();
......
......@@ -68,10 +68,10 @@ int main(int, char **)
Queries queries;
queries.push_back(query);
for (Queries::iterator it = queries.begin(); it != queries.end(); ++it)
for (auto & query : queries)
{
std::cerr << it->str() << std::endl;
std::cerr << it->store().at(0) << std::endl;
std::cerr << query.str() << std::endl;
std::cerr << query.store().at(0) << std::endl;
}
}
......@@ -92,10 +92,10 @@ int main(int, char **)
mysqlxx::Query & qref = queries.back();
qref << " 1";
for (Queries::iterator it = queries.begin(); it != queries.end(); ++it)
for (auto & query : queries)
{
std::cerr << it->str() << std::endl;
std::cerr << it->store().at(0) << std::endl;
std::cerr << query.str() << std::endl;
std::cerr << query.store().at(0) << std::endl;
}
}
......
......@@ -199,19 +199,19 @@ void IMergeTreeDataPartWriter::initSkipIndices()
skip_indices_initialized = true;
}
void IMergeTreeDataPartWriter::calculateAndSerializePrimaryIndex(const Block & primary_keys_block, size_t rows)
void IMergeTreeDataPartWriter::calculateAndSerializePrimaryIndex(const Block & primary_index_block, size_t rows)
{
if (!primary_index_initialized)
throw Exception("Primary index is not initialized", ErrorCodes::LOGICAL_ERROR);
size_t primary_columns_num = primary_keys_block.columns();
size_t primary_columns_num = primary_index_block.columns();
if (index_columns.empty())
{
index_types = primary_keys_block.getDataTypes();
index_types = primary_index_block.getDataTypes();
index_columns.resize(primary_columns_num);
last_index_row.resize(primary_columns_num);
for (size_t i = 0; i < primary_columns_num; ++i)
index_columns[i] = primary_keys_block.getByPosition(i).column->cloneEmpty();
index_columns[i] = primary_index_block.getByPosition(i).column->cloneEmpty();
}
/** While filling index (index_columns), disable memory tracker.
......@@ -230,7 +230,7 @@ void IMergeTreeDataPartWriter::calculateAndSerializePrimaryIndex(const Block & p
{
for (size_t j = 0; j < primary_columns_num; ++j)
{
const auto & primary_column = primary_keys_block.getByPosition(j);
const auto & primary_column = primary_index_block.getByPosition(j);
index_columns[j]->insertFrom(*primary_column.column, i);
primary_column.type->serializeBinary(*primary_column.column, i, *index_stream);
}
......@@ -244,7 +244,7 @@ void IMergeTreeDataPartWriter::calculateAndSerializePrimaryIndex(const Block & p
/// store last index row to write final mark at the end of column
for (size_t j = 0; j < primary_columns_num; ++j)
{
const IColumn & primary_column = *primary_keys_block.getByPosition(j).column.get();
const IColumn & primary_column = *primary_index_block.getByPosition(j).column.get();
primary_column.get(rows - 1, last_index_row[j]);
}
}
......
......@@ -351,8 +351,7 @@ FieldWithInfinity::FieldWithInfinity(Field && field_)
}
FieldWithInfinity::FieldWithInfinity(const Type type_)
: field(),
type(type_)
: type(type_)
{
}
......@@ -722,10 +721,7 @@ bool KeyCondition::isKeyPossiblyWrappedByMonotonicFunctionsImpl(
out_functions_chain.push_back(func);
if (!isKeyPossiblyWrappedByMonotonicFunctionsImpl(args[0], out_key_column_num, out_key_column_type, out_functions_chain))
return false;
return true;
return isKeyPossiblyWrappedByMonotonicFunctionsImpl(args[0], out_key_column_num, out_key_column_type, out_functions_chain);
}
return false;
......@@ -1131,9 +1127,8 @@ BoolMask KeyCondition::checkInParallelogram(
const DataTypes & data_types) const
{
std::vector<BoolMask> rpn_stack;
for (size_t i = 0; i < rpn.size(); ++i)
for (const auto & element : rpn)
{
const auto & element = rpn[i];
if (element.function == RPNElement::FUNCTION_UNKNOWN)
{
rpn_stack.emplace_back(true, true);
......
......@@ -663,7 +663,7 @@ void MergeTreeData::setTTLExpressions(const ColumnsDescription::ColumnTTLs & new
TTLEntry update_rows_ttl_entry;
bool seen_delete_ttl = false;
for (auto ttl_element_ptr : new_ttl_table_ast->children)
for (const auto & ttl_element_ptr : new_ttl_table_ast->children)
{
const auto * ttl_element = ttl_element_ptr->as<ASTTTLElement>();
if (!ttl_element)
......@@ -1822,7 +1822,7 @@ void MergeTreeData::alterDataPart(
/// Update the checksums.
DataPart::Checksums new_checksums = part->checksums;
for (auto it : transaction->rename_map)
for (const auto & it : transaction->rename_map)
{
if (it.second.empty())
new_checksums.files.erase(it.first);
......@@ -1846,8 +1846,6 @@ void MergeTreeData::alterDataPart(
transaction->new_columns.writeText(columns_file);
transaction->rename_map["columns.txt.tmp"] = "columns.txt";
}
return;
}
void MergeTreeData::changeSettings(
......@@ -2688,7 +2686,7 @@ MergeTreeData::DataPartPtr MergeTreeData::getActiveContainingPart(
void MergeTreeData::swapActivePart(MergeTreeData::DataPartPtr part_copy)
{
auto lock = lockParts();
for (auto original_active_part : getDataPartsStateRange(DataPartState::Committed))
for (const auto & original_active_part : getDataPartsStateRange(DataPartState::Committed))
{
if (part_copy->name == original_active_part->name)
{
......@@ -3810,10 +3808,7 @@ bool MergeTreeData::areBackgroundMovesNeeded() const
if (policy->getVolumes().size() > 1)
return true;
if (policy->getVolumes().size() == 1 && policy->getVolumes()[0]->disks.size() > 1 && move_ttl_entries.size() > 0)
return true;
return false;
return policy->getVolumes().size() == 1 && policy->getVolumes()[0]->disks.size() > 1 && !move_ttl_entries.empty();
}
bool MergeTreeData::movePartsToSpace(const DataPartsVector & parts, SpacePtr space)
......
......@@ -547,7 +547,7 @@ public:
/// Moves the entire data directory.
/// Flushes the uncompressed blocks cache and the marks cache.
/// Must be called with locked lockStructureForAlter().
void rename(const String & new_path_to_table_data, const String & new_database_name,
void rename(const String & new_table_path, const String & new_database_name,
const String & new_table_name, TableStructureWriteLockHolder &) override;
/// Check if the ALTER can be performed:
......@@ -569,7 +569,7 @@ public:
/// Change MergeTreeSettings
void changeSettings(
const ASTPtr & new_changes,
const ASTPtr & new_settings,
TableStructureWriteLockHolder & table_lock_holder);
/// Remove columns, that have been marked as empty after zeroing values with expired ttl
......@@ -638,7 +638,7 @@ public:
}
/// For ATTACH/DETACH/DROP PARTITION.
String getPartitionIDFromQuery(const ASTPtr & partition, const Context & context);
String getPartitionIDFromQuery(const ASTPtr & ast, const Context & context);
/// Extracts MergeTreeData of other *MergeTree* storage
/// and checks that their structure suitable for ALTER TABLE ATTACH PARTITION FROM
......@@ -957,7 +957,7 @@ protected:
using MatcherFn = std::function<bool(const DataPartPtr &)>;
void freezePartitionsByMatcher(MatcherFn matcher, const String & with_name, const Context & context);
bool canReplacePartition(const DataPartPtr & data_part) const;
bool canReplacePartition(const DataPartPtr & src_part) const;
void writePartLog(
PartLogElement::Type type,
......@@ -996,7 +996,7 @@ private:
};
/// Move selected parts to corresponding disks
bool moveParts(CurrentlyMovingPartsTagger && parts_to_move);
bool moveParts(CurrentlyMovingPartsTagger && moving_tagger);
/// Select parts for move and disks for them. Used in background moving processes.
CurrentlyMovingPartsTagger selectPartsForMove();
......
......@@ -378,9 +378,8 @@ MergeTreeData::DataPartsVector MergeTreeDataMergerMutator::selectAllPartsFromPar
MergeTreeData::DataParts data_parts = data.getDataParts();
for (MergeTreeData::DataParts::iterator it = data_parts.cbegin(); it != data_parts.cend(); ++it)
for (const auto & current_part : data_parts)
{
const MergeTreeData::DataPartPtr & current_part = *it;
if (current_part->info.partition_id != partition_id)
continue;
......
......@@ -108,7 +108,7 @@ public:
MergeTreeData::MutableDataPartPtr mergePartsToTemporaryPart(
const FutureMergedMutatedPart & future_part,
MergeListEntry & merge_entry, TableStructureReadLockHolder & table_lock_holder, time_t time_of_merge,
const ReservationPtr & disk_reservation, bool deduplication, bool force_ttl);
const ReservationPtr & space_reservation, bool deduplicate, bool force_ttl);
/// Mutate a single data part with the specified commands. Will create and return a temporary part.
MergeTreeData::MutableDataPartPtr mutatePartToTemporaryPart(
......@@ -117,7 +117,7 @@ public:
MergeListEntry & merge_entry,
time_t time_of_mutation,
const Context & context,
const ReservationPtr & disk_reservation,
const ReservationPtr & space_reservation,
TableStructureReadLockHolder & table_lock_holder);
MergeTreeData::DataPartPtr renameMergedTemporaryPart(
......
......@@ -72,9 +72,9 @@ struct MergeTreeDataPartChecksums
bool read(ReadBuffer & in, size_t format_version);
bool read_v2(ReadBuffer & in);
bool read_v3(ReadBuffer & in);
bool read_v4(ReadBuffer & in);
bool read_v4(ReadBuffer & from);
void write(WriteBuffer & out) const;
void write(WriteBuffer & to) const;
/// Checksum from the set of checksums of .bin files (for deduplication).
void computeTotalChecksumDataOnly(SipHash & hash) const;
......
......@@ -36,7 +36,7 @@ void MergeTreeDataPartTTLInfos::read(ReadBuffer & in)
if (json.has("columns"))
{
const JSON & columns = json["columns"];
for (auto col : columns)
for (auto col : columns) // NOLINT
{
MergeTreeDataPartTTLInfo ttl_info;
ttl_info.min = col["min"].getUInt();
......@@ -58,7 +58,7 @@ void MergeTreeDataPartTTLInfos::read(ReadBuffer & in)
if (json.has("moves"))
{
const JSON & moves = json["moves"];
for (auto move : moves)
for (auto move : moves) // NOLINT
{
MergeTreeDataPartTTLInfo ttl_info;
ttl_info.min = move["min"].getUInt();
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册