提交 14c80030 编写于 作者: A Alexey Milovidov

Normalized formatting [#CLICKHOUSE-3].

Apply in a loop:
find dbms -name '*.h' -or -name '*.cpp' | xargs grep -l -P '^(\t*)    ' | xargs sed -i -r -e 's/^(\t*)    /\1\t/'
上级 93890423
......@@ -14,18 +14,18 @@
*/
#if USE_VECTORCLASS
#if __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wshift-negative-value"
#endif
#include <vectorf128.h>
#include <vectormath_exp.h>
#include <vectormath_trig.h>
#if __clang__
#pragma clang diagnostic pop
#endif
#if __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wshift-negative-value"
#endif
#include <vectorf128.h>
#include <vectormath_exp.h>
#include <vectormath_trig.h>
#if __clang__
#pragma clang diagnostic pop
#endif
#endif
......
......@@ -161,14 +161,14 @@ AggregateFunctionPtr AggregateFunctionFactory::getImpl(const String & name, cons
if ((recursion_level == 0) && endsWith(name, "State"))
{
/// For aggregate functions of the form `aggState`, where `agg` is the name of another aggregate function.
/// For aggregate functions of the form `aggState`, where `agg` is the name of another aggregate function.
AggregateFunctionPtr nested = get(trimRight(name, "State"), argument_types, recursion_level + 1);
return createAggregateFunctionState(nested);
}
if ((recursion_level <= 1) && endsWith(name, "Merge"))
{
/// For aggregate functions of the form `aggMerge`, where `agg` is the name of another aggregate function.
/// For aggregate functions of the form `aggMerge`, where `agg` is the name of another aggregate function.
if (argument_types.size() != 1)
throw Exception("Incorrect number of arguments for aggregate function " + name, ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH);
const DataTypeAggregateFunction * function = typeid_cast<const DataTypeAggregateFunction *>(&*argument_types[0]);
......@@ -202,7 +202,7 @@ AggregateFunctionPtr AggregateFunctionFactory::getImpl(const String & name, cons
if ((recursion_level <= 3) && endsWith(name, "Array"))
{
/// For aggregate functions of the form `aggArray`, where `agg` is the name of another aggregate function.
/// For aggregate functions of the form `aggArray`, where `agg` is the name of another aggregate function.
size_t num_agruments = argument_types.size();
DataTypes nested_arguments;
......@@ -214,7 +214,7 @@ AggregateFunctionPtr AggregateFunctionFactory::getImpl(const String & name, cons
throw Exception("Illegal type " + argument_types[i]->getName() + " of argument #" + toString(i + 1) +
" for aggregate function " + name + ". Must be array.", ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT);
}
/// + 3, so that no other modifier can go before the `Array`
/// + 3, so that no other modifier can go before the `Array`
AggregateFunctionPtr nested = get(trimRight(name, "Array"), nested_arguments, recursion_level + 3);
return createAggregateFunctionArray(nested);
}
......@@ -239,11 +239,11 @@ bool AggregateFunctionFactory::isAggregateFunctionName(const String & name, int
if (recursion_level == 0 && case_insensitive_aggregate_functions.count(Poco::toLower(name)))
return true;
/// For aggregate functions of the form `aggState`, where `agg` is the name of another aggregate function.
/// For aggregate functions of the form `aggState`, where `agg` is the name of another aggregate function.
if ((recursion_level <= 0) && endsWith(name, "State"))
return isAggregateFunctionName(trimRight(name, "State"), recursion_level + 1);
/// For aggregate functions of the form `aggMerge`, where `agg` is the name of another aggregate function.
/// For aggregate functions of the form `aggMerge`, where `agg` is the name of another aggregate function.
if ((recursion_level <= 1) && endsWith(name, "Merge"))
return isAggregateFunctionName(trimRight(name, "Merge"), recursion_level + 1);
......@@ -251,10 +251,10 @@ bool AggregateFunctionFactory::isAggregateFunctionName(const String & name, int
if ((recursion_level <= 2) && endsWith(name, "If"))
return isAggregateFunctionName(trimRight(name, "If"), recursion_level + 1);
/// For aggregate functions of the form `aggArray`, where `agg` is the name of another aggregate function.
/// For aggregate functions of the form `aggArray`, where `agg` is the name of another aggregate function.
if ((recursion_level <= 3) && endsWith(name, "Array"))
{
/// + 3, so that no other modifier can go before `Array`
/// + 3, so that no other modifier can go before `Array`
return isAggregateFunctionName(trimRight(name, "Array"), recursion_level + 3);
}
......
......@@ -31,7 +31,7 @@ AggregateFunctionPtr createAggregateFunctionUniqUpTo(const std::string & name, c
}
else if (argument_types.size() > 1)
{
/// If there are several arguments, then no tuples allowed among them.
/// If there are several arguments, then no tuples allowed among them.
for (const auto & type : argument_types)
if (typeid_cast<const DataTypeTuple *>(type.get()))
throw Exception("Tuple argument of function " + name + " must be the only argument",
......
......@@ -37,7 +37,7 @@ AggregateFunctionPtr createAggregateFunctionUniq(const std::string & name, const
}
else if (argument_types.size() > 1)
{
/// If there are several arguments, then no tuples allowed among them.
/// If there are several arguments, then no tuples allowed among them.
for (const auto & type : argument_types)
if (typeid_cast<const DataTypeTuple *>(type.get()))
throw Exception("Tuple argument of function " + name + " must be the only argument",
......@@ -75,7 +75,7 @@ AggregateFunctionPtr createAggregateFunctionUniq(const std::string & name, const
}
else if (argument_types.size() > 1)
{
/// If there are several arguments, then no tuples allowed among them.
/// If there are several arguments, then no tuples allowed among them.
for (const auto & type : argument_types)
if (typeid_cast<const DataTypeTuple *>(type.get()))
throw Exception("Tuple argument of function " + name + " must be the only argument",
......
......@@ -282,7 +282,7 @@ void MultiplexedConnections::initFromShard(IConnectionPool * pool)
auto entries = pool->getMany(settings, pool_mode);
/// If getMany() did not allocate connections and did not throw exceptions, this means that
/// `skip_unavailable_shards` was set. Then just return.
/// `skip_unavailable_shards` was set. Then just return.
if (entries.empty())
return;
......@@ -397,7 +397,7 @@ MultiplexedConnections::ReplicaMap::iterator MultiplexedConnections::waitForRead
read_list.reserve(active_connection_total_count);
/// First, we check if there are data already in the buffer
/// of at least one connection.
/// of at least one connection.
for (const auto & e : replica_map)
{
const ReplicaState & state = e.second;
......
......@@ -28,11 +28,11 @@ ColumnPtr ColumnAggregateFunction::convertToValues() const
const IAggregateFunction * function = func.get();
ColumnPtr res = function->getReturnType()->createColumn();
/** If the aggregate function returns an unfinalized/unfinished state,
/** If the aggregate function returns an unfinalized/unfinished state,
* then you just need to copy pointers to it and also shared ownership of data.
*
* Also replace the aggregate function with the nested function.
* That is, if this column is the states of the aggregate function `aggState`,
* Also replace the aggregate function with the nested function.
* That is, if this column is the states of the aggregate function `aggState`,
* then we return the same column, but with the states of the aggregate function `agg`.
* These are the same states, changing only the function to which they correspond.
*
......@@ -43,10 +43,10 @@ ColumnPtr ColumnAggregateFunction::convertToValues() const
*
* This calculates the aggregate function `quantileTimingState`.
* Its return type AggregateFunction(quantileTiming(0.5), UInt64)`.
* Due to the presence of WITH TOTALS, during aggregation the states of this aggregate function will be stored
* in the ColumnAggregateFunction column of type
* Due to the presence of WITH TOTALS, during aggregation the states of this aggregate function will be stored
* in the ColumnAggregateFunction column of type
* AggregateFunction(quantileTimingState(0.5), UInt64).
* Then, in `TotalsHavingBlockInputStream`, it will be called `convertToValues` method,
* Then, in `TotalsHavingBlockInputStream`, it will be called `convertToValues` method,
* to get the "ready" values.
* But it just converts a column of type
* `AggregateFunction(quantileTimingState(0.5), UInt64)`
......
......@@ -12,9 +12,9 @@ size_t countBytesInFilter(const IColumn::Filter & filt)
{
size_t count = 0;
/** NOTE: In theory, `filt` should only contain zeros and ones.
* But, just in case, here the condition > 0 (to signed bytes) is used.
* It would be better to use != 0, then this does not allow SSE2.
/** NOTE: In theory, `filt` should only contain zeros and ones.
* But, just in case, here the condition > 0 (to signed bytes) is used.
* It would be better to use != 0, then this does not allow SSE2.
*/
const Int8 * pos = reinterpret_cast<const Int8 *>(&filt[0]);
......
......@@ -44,8 +44,8 @@ void FileChecker::update(const Files::const_iterator & begin, const Files::const
bool FileChecker::check() const
{
/** Read the files again every time you call `check` - so as not to violate the constancy.
* `check` method is rarely called.
/** Read the files again every time you call `check` - so as not to violate the constancy.
* `check` method is rarely called.
*/
Map local_map;
load(local_map);
......@@ -100,7 +100,7 @@ void FileChecker::save() const
if (it != map.begin())
writeString(",", out);
/// `escapeForFileName` is not really needed. But it is left for compatibility with the old code.
/// `escapeForFileName` is not really needed. But it is left for compatibility with the old code.
writeJSONString(escapeForFileName(it->first), out);
writeString(":{\"size\":\"", out);
writeIntText(it->second, out);
......
......@@ -62,10 +62,10 @@ namespace
}
};
/// By these return codes from the child process, we learn (for sure) about errors when creating it.
/// By these return codes from the child process, we learn (for sure) about errors when creating it.
enum class ReturnCodes : int
{
CANNOT_DUP_STDIN = 42, /// The value is not important, but it is chosen so that it's rare to conflict with the program return code.
CANNOT_DUP_STDIN = 42, /// The value is not important, but it is chosen so that it's rare to conflict with the program return code.
CANNOT_DUP_STDOUT = 43,
CANNOT_DUP_STDERR = 44,
CANNOT_EXEC = 45,
......@@ -79,10 +79,10 @@ namespace DB
std::unique_ptr<ShellCommand> ShellCommand::executeImpl(const char * filename, char * const argv[], bool pipe_stdin_only)
{
/** Here it is written that with a normal call `vfork`, there is a chance of deadlock in multithreaded programs,
* because of the resolving of characters in the shared library
/** Here it is written that with a normal call `vfork`, there is a chance of deadlock in multithreaded programs,
* because of the resolving of characters in the shared library
* http://www.oracle.com/technetwork/server-storage/solaris10/subprocess-136439.html
* Therefore, separate the resolving of the symbol from the call.
* Therefore, separate the resolving of the symbol from the call.
*/
static void * real_vfork = dlsym(RTLD_DEFAULT, "vfork");
......@@ -102,8 +102,8 @@ std::unique_ptr<ShellCommand> ShellCommand::executeImpl(const char * filename, c
{
/// We are in the freshly created process.
/// Why `_exit` and not `exit`? Because `exit` calls `atexit` and destructors of thread local storage.
/// And there is a lot of garbage (including, for example, mutex is blocked). And this can not be done after `vfork` - deadlock happens.
/// Why `_exit` and not `exit`? Because `exit` calls `atexit` and destructors of thread local storage.
/// And there is a lot of garbage (including, for example, mutex is blocked). And this can not be done after `vfork` - deadlock happens.
/// Replace the file descriptors with the ends of our pipes.
if (STDIN_FILENO != dup2(pipe_stdin.read_fd, STDIN_FILENO))
......@@ -119,7 +119,7 @@ std::unique_ptr<ShellCommand> ShellCommand::executeImpl(const char * filename, c
}
execv(filename, argv);
/// If the process is running, then `execv` does not return here.
/// If the process is running, then `execv` does not return here.
_exit(int(ReturnCodes::CANNOT_EXEC));
}
......@@ -138,7 +138,7 @@ std::unique_ptr<ShellCommand> ShellCommand::executeImpl(const char * filename, c
std::unique_ptr<ShellCommand> ShellCommand::execute(const std::string & command, bool pipe_stdin_only)
{
/// Arguments in non-constant chunks of memory (as required for `execv`).
/// Moreover, their copying must be done before calling `vfork`, so after `vfork` do a minimum of things.
/// Moreover, their copying must be done before calling `vfork`, so after `vfork` do a minimum of things.
std::vector<char> argv0("sh", "sh" + strlen("sh") + 1);
std::vector<char> argv1("-c", "-c" + strlen("-c") + 1);
std::vector<char> argv2(command.data(), command.data() + command.size() + 1);
......
......@@ -27,7 +27,7 @@ std::string StackTrace::toString() const
{
for (size_t i = 0, size = frames_size; i < size; ++i)
{
/// We do "demangling" of names. The name is in parenthesis, before the '+' character.
/// We do "demangling" of names. The name is in parenthesis, before the '+' character.
char * name_start = nullptr;
char * name_end = nullptr;
......
......@@ -19,9 +19,9 @@ bool isLocalAddress(const Poco::Net::SocketAddress & address)
return interfaces.end() != std::find_if(interfaces.begin(), interfaces.end(),
[&] (const Poco::Net::NetworkInterface & interface)
{
/** Compare the addresses without taking into account `scope`.
* Theoretically, this may not be correct - depends on `route` setting
* - through which interface we will actually access the specified address.
/** Compare the addresses without taking into account `scope`.
* Theoretically, this may not be correct - depends on `route` setting
* - through which interface we will actually access the specified address.
*/
return interface.address().length() == address.host().length()
&& 0 == memcmp(interface.address().addr(), address.host().addr(), address.host().length());
......
......@@ -41,7 +41,7 @@ static void localBackupImpl(Poco::Path source_path, Poco::Path destination_path,
std::string source_str = source.toString();
std::string destination_str = destination.toString();
/** We are trying to create a hard link.
/** We are trying to create a hard link.
* If it already exists, we check that source and destination point to the same inode.
*/
if (0 != link(source_str.c_str(), destination_str.c_str()))
......@@ -85,8 +85,8 @@ void localBackup(Poco::Path source_path, Poco::Path destination_path)
const size_t max_tries = 10;
/** Files in the directory can be permanently added and deleted.
* If some file is deleted during an attempt to make a backup, then try again,
* because it's important to take into account any new files that might appear.
* If some file is deleted during an attempt to make a backup, then try again,
* because it's important to take into account any new files that might appear.
*/
while (true)
{
......
......@@ -274,7 +274,7 @@ int main(int argc, char ** argv)
if (!method || method == 1)
{
/** Option 1.
/** Option 1.
* In different threads, we aggregate independently into different hash tables.
* Then merge them together.
*/
......@@ -385,9 +385,9 @@ int main(int argc, char ** argv)
if (!method || method == 11)
{
/** Option 11.
* Same as option 1, but with merge, the order of the cycles is changed,
* which potentially can give better cache locality.
/** Option 11.
* Same as option 1, but with merge, the order of the cycles is changed,
* which potentially can give better cache locality.
*
* In practice, there is no difference.
*/
......@@ -461,11 +461,11 @@ int main(int argc, char ** argv)
if (!method || method == 2)
{
/** Option 2.
* In different threads, we aggregate independently into different two-level hash tables.
* Then merge them together, parallelizing by the first level buckets.
* When using hash tables of large sizes (10 million elements or more),
* and a large number of threads (8-32), the merge is a bottleneck,
* and has a performance advantage of 4 times.
* In different threads, we aggregate independently into different two-level hash tables.
* Then merge them together, parallelizing by the first level buckets.
* When using hash tables of large sizes (10 million elements or more),
* and a large number of threads (8-32), the merge is a bottleneck,
* and has a performance advantage of 4 times.
*/
std::vector<MapTwoLevel> maps(num_threads);
......@@ -576,14 +576,14 @@ int main(int argc, char ** argv)
if (!method || method == 3)
{
/** Option 3.
* In different threads, we aggregate independently into different hash tables,
* until their size becomes large enough.
* If the size of the local hash table is large, and there is no element in it,
* then we insert it into one global hash table, protected by mutex,
* and if mutex failed to capture, then insert it into the local one.
* Then merge all the local hash tables to the global one.
* This method is bad - a lot of contention.
/** Option 3.
* In different threads, we aggregate independently into different hash tables,
* until their size becomes large enough.
* If the size of the local hash table is large, and there is no element in it,
* then we insert it into one global hash table, protected by mutex,
* and if mutex failed to capture, then insert it into the local one.
* Then merge all the local hash tables to the global one.
* This method is bad - a lot of contention.
*/
std::vector<Map> local_maps(num_threads);
......@@ -646,10 +646,10 @@ int main(int argc, char ** argv)
if (!method || method == 33)
{
/** Option 33.
* In different threads, we aggregate independently into different hash tables,
* until their size becomes large enough.
* Then we insert the data to the global hash table, protected by mutex, and continue.
/** Option 33.
* In different threads, we aggregate independently into different hash tables,
* until their size becomes large enough.
* Then we insert the data to the global hash table, protected by mutex, and continue.
*/
std::vector<Map> local_maps(num_threads);
......@@ -712,13 +712,13 @@ int main(int argc, char ** argv)
if (!method || method == 4)
{
/** Option 4.
* In different threads, we aggregate independently into different hash tables,
* until their size becomes large enough.
* If the size of the local hash table is large, and there is no element in it,
* then insert it into one of 256 global hash tables, each of which is under its mutex.
* Then merge all local hash tables into the global one.
* This method is not so bad with a lot of threads, but worse than the second one.
/** Option 4.
* In different threads, we aggregate independently into different hash tables,
* until their size becomes large enough.
* If the size of the local hash table is large, and there is no element in it,
* then insert it into one of 256 global hash tables, each of which is under its mutex.
* Then merge all local hash tables into the global one.
* This method is not so bad with a lot of threads, but worse than the second one.
*/
std::vector<Map> local_maps(num_threads);
......@@ -783,12 +783,12 @@ int main(int argc, char ** argv)
/* if (!method || method == 5)
{
*/ /** Option 5.
* In different threads, we aggregate independently into different hash tables,
* until their size becomes large enough.
* If the size of the local hash table is large and there is no element in it,
* then insert it into one global hash table containing small latches in each cell,
* and if the latch can not be captured, then insert it into the local one.
*/ /** Option 5.
* In different threads, we aggregate independently into different hash tables,
* until their size becomes large enough.
* If the size of the local hash table is large and there is no element in it,
* then insert it into one global hash table containing small latches in each cell,
* and if the latch can not be captured, then insert it into the local one.
* Then merge all local hash tables into the global one.
*/
/*
......@@ -850,10 +850,10 @@ int main(int argc, char ** argv)
/*if (!method || method == 6)
{
*//** Option 6.
*//** Option 6.
* In different threads, we aggregate independently into different hash tables.
* Then "merge" them, passing them in the same order of the keys.
* Quite a slow option.
* Quite a slow option.
*/
/*
std::vector<Map> maps(num_threads);
......
......@@ -28,8 +28,8 @@ namespace ErrorCodes
void Block::addDefaults(const NamesAndTypesList & required_columns)
{
/// For missing columns of nested structure, you need to create not a column of empty arrays, but a column of arrays of correct lengths.
/// First, remember the offset columns for all arrays in the block.
/// For missing columns of nested structure, you need to create not a column of empty arrays, but a column of arrays of correct lengths.
/// First, remember the offset columns for all arrays in the block.
std::map<String, ColumnPtr> offset_columns;
for (const auto & elem : data)
......@@ -39,7 +39,7 @@ void Block::addDefaults(const NamesAndTypesList & required_columns)
String offsets_name = DataTypeNested::extractNestedTableName(elem.name);
auto & offsets_column = offset_columns[offsets_name];
/// If for some reason there are different displacement columns for one nested structure, then we take nonempty.
/// If for some reason there are different displacement columns for one nested structure, then we take nonempty.
if (!offsets_column || offsets_column->empty())
offsets_column = array->getOffsetsColumn();
}
......@@ -70,8 +70,8 @@ void Block::addDefaults(const NamesAndTypesList & required_columns)
}
else
{
/** It is necessary to turn a constant column into a full column, since in part of blocks (from other parts),
* it can be full (or the interpreter may decide that it is constant everywhere).
/** It is necessary to turn a constant column into a full column, since in part of blocks (from other parts),
* it can be full (or the interpreter may decide that it is constant everywhere).
*/
column_to_add.column = dynamic_cast<IColumnConst &>(
*column_to_add.type->createConstColumn(
......@@ -450,7 +450,7 @@ void Block::optimizeNestedArraysOffsets()
if (!it->second->hasEqualOffsets(*column_array))
throw Exception("Sizes of nested arrays do not match", ErrorCodes::SIZES_OF_ARRAYS_DOESNT_MATCH);
/// make columns of arrays offsets inside one nested table point to the same place
/// make columns of arrays offsets inside one nested table point to the same place
column_array->getOffsetsColumn() = it->second->getOffsetsColumn();
}
}
......@@ -479,8 +479,8 @@ bool blocksHaveEqualStructure(const Block & lhs, const Block & rhs)
void getBlocksDifference(const Block & lhs, const Block & rhs, std::string & out_lhs_diff, std::string & out_rhs_diff)
{
/// The traditional task: the largest common subsequence (LCS).
/// Assume that order is important. If this becomes wrong once, let's simplify it: for example, make 2 sets.
/// The traditional task: the largest common subsequence (LCS).
/// Assume that order is important. If this becomes wrong once, let's simplify it: for example, make 2 sets.
std::vector<std::vector<int>> lcs(lhs.columns() + 1);
for (auto & v : lcs)
......@@ -506,15 +506,15 @@ void getBlocksDifference(const Block & lhs, const Block & rhs, std::string & out
{
if (lhs.safeGetByPosition(l - 1) == rhs.safeGetByPosition(r - 1))
{
/// This element is in both sequences, so it does not get into `diff`.
/// This element is in both sequences, so it does not get into `diff`.
--l;
--r;
}
else
{
/// Small heuristics: most often used when getting a difference for (expected_block, actual_block).
/// Therefore, the preference will be given to the field, which is in the left block (expected_block), therefore
/// in `diff` the column from `actual_block` will get.
/// Small heuristics: most often used when getting a difference for (expected_block, actual_block).
/// Therefore, the preference will be given to the field, which is in the left block (expected_block), therefore
/// in `diff` the column from `actual_block` will get.
if (lcs[l][r - 1] >= lcs[l - 1][r])
right_columns.push_back(rhs.safeGetByPosition(--r));
else
......
......@@ -3,11 +3,11 @@ namespace DB
namespace ErrorCodes
{
/** Previously, these constants were located in one enum.
* But in this case there is a problem: when you add a new constant, you need to recompile
* all translation units that use at least one constant (almost the whole project).
* Therefore it is made so that definitions of constants are located here, in one file,
* and their declaration are in different files, at the place of use.
/** Previously, these constants were located in one enum.
* But in this case there is a problem: when you add a new constant, you need to recompile
* all translation units that use at least one constant (almost the whole project).
* Therefore it is made so that definitions of constants are located here, in one file,
* and their declaration are in different files, at the place of use.
*/
extern const int UNSUPPORTED_METHOD = 1;
......
......@@ -115,7 +115,7 @@ NamesAndTypesList NamesAndTypesList::filter(const Names & names) const
NamesAndTypesList NamesAndTypesList::addTypes(const Names & names) const
{
/// NOTE It's better to make a map in `IStorage` than to create it here every time again.
/// NOTE It's better to make a map in `IStorage` than to create it here every time again.
google::dense_hash_map<StringRef, const DataTypePtr *, StringRefHash> types;
types.set_empty_key(StringRef());
......
......@@ -8,13 +8,13 @@ void AddingDefaultBlockOutputStream::write(const DB::Block & block)
{
Block res = block;
/// Computes explicitly specified values (in column_defaults) by default.
/// Computes explicitly specified values (in column_defaults) by default.
/** @todo if somehow block does not contain values for implicitly-defaulted columns that are prerequisites
* for explicitly-defaulted ones, exception will be thrown during evaluating such columns
* (implicitly-defaulted columns are evaluated on the line after following one. */
evaluateMissingDefaults(res, *required_columns, column_defaults, context);
/// Adds not specified default values.
/// Adds not specified default values.
if (!only_explicit_column_defaults)
/// @todo this line may be moved before `evaluateMissingDefaults` with passing {required_columns - explicitly-defaulted columns}
res.addDefaults(*required_columns);
......
......@@ -34,15 +34,15 @@ Block AggregatingBlockInputStream::readImpl()
}
else
{
/** If there are temporary files with partially-aggregated data on the disk,
* then read and merge them, spending the minimum amount of memory.
/** If there are temporary files with partially-aggregated data on the disk,
* then read and merge them, spending the minimum amount of memory.
*/
ProfileEvents::increment(ProfileEvents::ExternalAggregationMerge);
if (!isCancelled())
{
/// Flush data in the RAM to disk also. It's easier.
/// Flush data in the RAM to disk also. It's easier.
size_t rows = data_variants->sizeWithoutOverflowRow();
if (rows)
aggregator.writeToTemporaryFile(*data_variants, rows);
......
......@@ -26,19 +26,19 @@ Block AggregatingSortedBlockInputStream::readImpl()
{
next_key.columns.resize(description.size());
/// Fill in the column numbers that need to be aggregated.
/// Fill in the column numbers that need to be aggregated.
for (size_t i = 0; i < num_columns; ++i)
{
ColumnWithTypeAndName & column = merged_block.safeGetByPosition(i);
/// We leave only states of aggregate functions.
/// We leave only states of aggregate functions.
if (!startsWith(column.type->getName(), "AggregateFunction"))
{
column_numbers_not_to_aggregate.push_back(i);
continue;
}
/// Included into PK?
/// Included into PK?
SortDescription::const_iterator it = description.begin();
for (; it != description.end(); ++it)
if (it->column_name == column.name || (it->column_name.empty() && it->column_number == i))
......@@ -72,7 +72,7 @@ void AggregatingSortedBlockInputStream::merge(ColumnPlainPtrs & merged_columns,
{
size_t merged_rows = 0;
/// We take the rows in the correct order and put them in `merged_block`, while the rows are no more than `max_block_size`
/// We take the rows in the correct order and put them in `merged_block`, while the rows are no more than `max_block_size`
while (!queue.empty())
{
TSortCursor current = queue.top();
......@@ -81,7 +81,7 @@ void AggregatingSortedBlockInputStream::merge(ColumnPlainPtrs & merged_columns,
bool key_differs;
if (current_key.empty()) /// The first key encountered.
if (current_key.empty()) /// The first key encountered.
{
current_key.columns.resize(description.size());
setPrimaryKeyRef(current_key, current);
......@@ -100,7 +100,7 @@ void AggregatingSortedBlockInputStream::merge(ColumnPlainPtrs & merged_columns,
{
current_key.swap(next_key);
/// We will write the data for the group. We copy the values of ordinary columns.
/// We will write the data for the group. We copy the values of ordinary columns.
for (size_t i = 0, size = column_numbers_not_to_aggregate.size(); i < size; ++i)
{
size_t j = column_numbers_not_to_aggregate[i];
......
......@@ -94,8 +94,8 @@ void BlockStreamProfileInfo::calculateRowsBeforeLimit() const
{
applied_limit = true;
/** Take the number of lines read below `PartialSorting`, if any, or below `Limit`.
* This is necessary, because sorting can return only part of the rows.
/** Take the number of lines read below `PartialSorting`, if any, or below `Limit`.
* This is necessary, because sorting can return only part of the rows.
*/
BlockStreamProfileInfos partial_sortings;
collectInfosForStreamsWithName("PartialSorting", partial_sortings);
......@@ -108,7 +108,7 @@ void BlockStreamProfileInfo::calculateRowsBeforeLimit() const
}
else
{
/// Then the data about `rows_before_limit` can be in `RemoteBlockInputStream` (come from a remote server).
/// Then the data about `rows_before_limit` can be in `RemoteBlockInputStream` (come from a remote server).
BlockStreamProfileInfos remotes;
collectInfosForStreamsWithName("Remote", remotes);
......
......@@ -9,7 +9,7 @@ namespace DB
CollapsingFinalBlockInputStream::~CollapsingFinalBlockInputStream()
{
/// You must cancel all `MergingBlockPtr` so that they do not try to put blocks in `output_blocks`.
/// You must cancel all `MergingBlockPtr` so that they do not try to put blocks in `output_blocks`.
previous.block.cancel();
last_positive.block.cancel();
......
......@@ -33,7 +33,7 @@ void CollapsingSortedBlockInputStream::reportIncorrectData()
/** Fow now we limit ourselves to just logging such situations,
* since the data is generated by external programs.
* With inconsistent data, this is an unavoidable error that can not be easily corrected by admins. Therefore Warning.
* With inconsistent data, this is an unavoidable error that can not be easily corrected by admins. Therefore Warning.
*/
LOG_WARNING(log, s.rdbuf());
}
......@@ -111,7 +111,7 @@ Block CollapsingSortedBlockInputStream::readImpl()
if (merged_columns.empty())
return Block();
/// Additional initialization.
/// Additional initialization.
if (first_negative.empty())
{
first_negative.columns.resize(num_columns);
......@@ -135,7 +135,7 @@ void CollapsingSortedBlockInputStream::merge(ColumnPlainPtrs & merged_columns, s
{
size_t merged_rows = 0;
/// Take rows in correct order and put them into `merged_block` until the rows no more than `max_block_size`
/// Take rows in correct order and put them into `merged_block` until the rows no more than `max_block_size`
for (; !queue.empty(); ++current_pos)
{
TSortCursor current = queue.top();
......@@ -168,7 +168,7 @@ void CollapsingSortedBlockInputStream::merge(ColumnPlainPtrs & merged_columns, s
if (key_differs)
{
/// We write data for the previous primary key.
/// We write data for the previous primary key.
insertRows(merged_columns, merged_rows);
current_key.swap(next_key);
......@@ -213,7 +213,7 @@ void CollapsingSortedBlockInputStream::merge(ColumnPlainPtrs & merged_columns, s
}
else
{
/// We take next block from the corresponding source, if there is one.
/// We take next block from the corresponding source, if there is one.
fetchNextBlock(current, queue);
}
}
......
......@@ -61,19 +61,19 @@ Block FilterBlockInputStream::readImpl()
const Block & sample_block = expression->getSampleBlock();
/// Find the current position of the filter column in the block.
/** sample_block has the result structure of evaluating the expression.
* But this structure does not necessarily match expression->execute(res) below,
* because the expression can be applied to a block that also contains additional,
* columns unnecessary for this expression, but needed later, in the next stages of the query execution pipeline.
* There will be no such columns in sample_block.
* Therefore, the position of the filter column in it can be different.
/// Find the current position of the filter column in the block.
/** sample_block has the result structure of evaluating the expression.
* But this structure does not necessarily match expression->execute(res) below,
* because the expression can be applied to a block that also contains additional,
* columns unnecessary for this expression, but needed later, in the next stages of the query execution pipeline.
* There will be no such columns in sample_block.
* Therefore, the position of the filter column in it can be different.
*/
ssize_t filter_column_in_sample_block = filter_column;
if (filter_column_in_sample_block == -1)
filter_column_in_sample_block = sample_block.getPositionByName(filter_column_name);
/// Let's check if the filter column is a constant containing 0 or 1.
/// Let's check if the filter column is a constant containing 0 or 1.
ColumnPtr column = sample_block.safeGetByPosition(filter_column_in_sample_block).column;
if (column)
......@@ -99,7 +99,7 @@ Block FilterBlockInputStream::readImpl()
return res;
}
/// Until the block is encountered, after filtering which something remains, or the stream does not end.
/// Until the block is encountered, after filtering which something remains, or the stream does not end.
while (1)
{
res = children.back()->read();
......
......@@ -232,7 +232,7 @@ void GraphiteRollupSortedBlockInputStream::startNextRow(ColumnPlainPtrs & merged
void GraphiteRollupSortedBlockInputStream::finishCurrentRow(ColumnPlainPtrs & merged_columns)
{
/// Insert calculated values of the columns `time`, `value`, `version`.
/// Insert calculated values of the columns `time`, `value`, `version`.
merged_columns[time_column_num]->insert(UInt64(current_time_rounded));
merged_columns[version_column_num]->insert(current_max_version);
......
......@@ -81,7 +81,7 @@ void IBlockInputStream::dumpTree(std::ostream & ostr, size_t indent, size_t mult
{
String id = (*it)->getTreeID();
size_t & subtree_multiplier = multipliers[id];
if (subtree_multiplier != 0) /// Already printed subtrees are marked with zero in the array of multipliers.
if (subtree_multiplier != 0) /// Already printed subtrees are marked with zero in the array of multipliers.
{
(*it)->dumpTree(ostr, indent, subtree_multiplier);
subtree_multiplier = 0;
......
......@@ -31,7 +31,7 @@ Block IProfilingBlockInputStream::read()
if (const IProfilingBlockInputStream * p_child = dynamic_cast<const IProfilingBlockInputStream *>(&*child))
info.nested_infos.push_back(&p_child->info);
/// Note that after this, `children` elements can not be deleted before you might need to work with `nested_info`.
/// Note that after this, `children` elements can not be deleted before you might need to work with `nested_info`.
info.started = true;
}
......@@ -59,11 +59,11 @@ Block IProfilingBlockInputStream::read()
}
else
{
/** If the thread is over, then we will ask all children to abort the execution.
* This makes sense when running a query with LIMIT
* - there is a situation when all the necessary data has already been read,
* but `children sources are still working,
* herewith they can work in separate threads or even remotely.
/** If the thread is over, then we will ask all children to abort the execution.
* This makes sense when running a query with LIMIT
* - there is a situation when all the necessary data has already been read,
* but `children sources are still working,
* herewith they can work in separate threads or even remotely.
*/
cancel();
}
......@@ -201,7 +201,7 @@ void IProfilingBlockInputStream::checkQuota(Block & block)
switch (limits.mode)
{
case LIMITS_TOTAL:
/// Checked in `progress` method.
/// Checked in `progress` method.
break;
case LIMITS_CURRENT:
......@@ -239,8 +239,8 @@ void IProfilingBlockInputStream::progressImpl(const Progress & value)
size_t total_rows_estimate = std::max(rows_processed, process_list_elem->progress_in.total_rows.load(std::memory_order_relaxed));
/** Check the restrictions on the amount of data to read, the speed of the query, the quota on the amount of data to read.
* NOTE: Maybe it makes sense to have them checked directly in ProcessList?
/** Check the restrictions on the amount of data to read, the speed of the query, the quota on the amount of data to read.
* NOTE: Maybe it makes sense to have them checked directly in ProcessList?
*/
if (limits.mode == LIMITS_TOTAL
......@@ -260,7 +260,7 @@ void IProfilingBlockInputStream::progressImpl(const Progress & value)
}
else if (limits.read_overflow_mode == OverflowMode::BREAK)
{
/// For `break`, we will stop only if so many lines were actually read, and not just supposed to be read.
/// For `break`, we will stop only if so many lines were actually read, and not just supposed to be read.
if ((limits.max_rows_to_read && rows_processed > limits.max_rows_to_read)
|| (limits.max_bytes_to_read && bytes_processed > limits.max_bytes_to_read))
{
......
......@@ -36,7 +36,7 @@ static StringRef readName(ReadBuffer & buf, String & tmp)
if (next_pos != buf.buffer().end() && *next_pos != '\\')
{
/// The most likely option is that there is no escape sequence in the key name, and the entire name is placed in the buffer.
/// The most likely option is that there is no escape sequence in the key name, and the entire name is placed in the buffer.
assertChar('"', buf);
StringRef res(buf.position(), next_pos - buf.position());
buf.position() += next_pos - buf.position();
......@@ -68,7 +68,7 @@ bool JSONEachRowRowInputStream::read(Block & block)
size_t columns = block.columns();
/// Set of columns for which the values were read. The rest will be filled with default values.
/// Set of columns for which the values were read. The rest will be filled with default values.
/// TODO Ability to provide your DEFAULTs.
bool read_columns[columns];
memset(read_columns, 0, columns);
......@@ -127,7 +127,7 @@ bool JSONEachRowRowInputStream::read(Block & block)
if (!istr.eof() && *istr.position() == ',')
++istr.position();
/// Fill non-visited columns with the default values.
/// Fill non-visited columns with the default values.
for (size_t i = 0; i < columns; ++i)
if (!read_columns[i])
block.getByPosition(i).column.get()->insertDefault();
......
......@@ -85,7 +85,7 @@ void MergingSortedBlockInputStream::init(Block & merged_block, ColumnPlainPtrs &
/// Initialize the result.
/// We clone the structure of the first non-empty source block.
/// We clone the structure of the first non-empty source block.
{
auto it = source_blocks.cbegin();
for (; it != source_blocks.cend(); ++it)
......@@ -178,8 +178,8 @@ void MergingSortedBlockInputStream::merge(Block & merged_block, ColumnPlainPtrs
{
size_t merged_rows = 0;
/** Increase row counters.
* Return true if it's time to finish generating the current data block.
/** Increase row counters.
* Return true if it's time to finish generating the current data block.
*/
auto count_row_and_check_limit = [&, this]()
{
......@@ -202,7 +202,7 @@ void MergingSortedBlockInputStream::merge(Block & merged_block, ColumnPlainPtrs
return false;
};
/// Take rows in required order and put them into `merged_block`, while the rows are no more than `max_block_size`
/// Take rows in required order and put them into `merged_block`, while the rows are no more than `max_block_size`
while (!queue.empty())
{
TSortCursor current = queue.top();
......@@ -210,8 +210,8 @@ void MergingSortedBlockInputStream::merge(Block & merged_block, ColumnPlainPtrs
while (true)
{
/** And what if the block is smaller or equal than the rest for the current cursor?
* Or is there only one data source left in the queue? Then you can take the entire block of current cursor.
/** And what if the block is smaller or equal than the rest for the current cursor?
* Or is there only one data source left in the queue? Then you can take the entire block of current cursor.
*/
if (current.impl->isFirst() && (queue.empty() || current.totallyLessOrEquals(queue.top())))
{
......@@ -286,7 +286,7 @@ void MergingSortedBlockInputStream::merge(Block & merged_block, ColumnPlainPtrs
return;
}
/// Do not put the cursor back in the queue, but continue to work with the current cursor.
/// Do not put the cursor back in the queue, but continue to work with the current cursor.
// std::cerr << "current is still on top, using current row\n";
continue;
}
......
......@@ -126,7 +126,7 @@ Block NativeBlockInputStream::readImpl()
{
if (use_index)
{
/// If the current position is what is required, the real seek does not occur.
/// If the current position is what is required, the real seek does not occur.
istr_concrete->seek(index_column_it->location.offset_in_compressed_file, index_column_it->location.offset_in_decompressed_block);
}
......
......@@ -44,8 +44,8 @@ void NativeBlockOutputStream::flush()
void NativeBlockOutputStream::writeData(const IDataType & type, const ColumnPtr & column, WriteBuffer & ostr, size_t offset, size_t limit)
{
/** If there are columns-constants - then we materialize them.
* (Since the data type does not know how to serialize / deserialize constants.)
/** If there are columns-constants - then we materialize them.
* (Since the data type does not know how to serialize / deserialize constants.)
*/
ColumnPtr full_column;
......@@ -81,12 +81,12 @@ void NativeBlockOutputStream::writeData(const IDataType & type, const ColumnPtr
if (offset > offsets.size())
return;
/** offset - from which array to write.
* limit - how many arrays should be written, or 0, if you write everything that is.
* end - up to which array written part finishes.
/** offset - from which array to write.
* limit - how many arrays should be written, or 0, if you write everything that is.
* end - up to which array written part finishes.
*
* nested_offset - from which nested element to write.
* nested_limit - how many nested elements to write, or 0, if you write everything that is.
* nested_offset - from which nested element to write.
* nested_limit - how many nested elements to write, or 0, if you write everything that is.
*/
size_t end = std::min(offset + limit, offsets.size());
......@@ -130,7 +130,7 @@ void NativeBlockOutputStream::write(const Block & block)
writeVarUInt(rows, ostr);
/** The index has the same structure as the data stream.
* But instead of column values, it contains a mark that points to the location in the data file where this part of the column is located.
* But instead of column values, it contains a mark that points to the location in the data file where this part of the column is located.
*/
if (index_ostr)
{
......@@ -145,7 +145,7 @@ void NativeBlockOutputStream::write(const Block & block)
if (index_ostr)
{
ostr_concrete->next(); /// Finish compressed block.
ostr_concrete->next(); /// Finish compressed block.
mark.offset_in_compressed_file = initial_size_of_file + ostr_concrete->getCompressedBytes();
mark.offset_in_decompressed_block = ostr_concrete->getRemainingBytes();
}
......
......@@ -38,7 +38,7 @@ String ParallelAggregatingBlockInputStream::getID() const
for (size_t i = 0; i < children.size(); ++i)
children_ids[i] = children[i]->getID();
/// Order does not matter.
/// Order does not matter.
std::sort(children_ids.begin(), children_ids.end());
for (size_t i = 0; i < children_ids.size(); ++i)
......@@ -80,8 +80,8 @@ Block ParallelAggregatingBlockInputStream::readImpl()
}
else
{
/** If there are temporary files with partially-aggregated data on the disk,
* then read and merge them, spending the minimum amount of memory.
/** If there are temporary files with partially-aggregated data on the disk,
* then read and merge them, spending the minimum amount of memory.
*/
ProfileEvents::increment(ProfileEvents::ExternalAggregationMerge);
......@@ -133,7 +133,7 @@ void ParallelAggregatingBlockInputStream::Handler::onFinishThread(size_t thread_
{
if (!parent.isCancelled() && parent.aggregator.hasTemporaryFiles())
{
/// Flush data in the RAM to disk. So it's easier to unite them later.
/// Flush data in the RAM to disk. So it's easier to unite them later.
auto & data = *parent.many_data[thread_num];
if (data.isConvertibleToTwoLevel())
......@@ -149,8 +149,8 @@ void ParallelAggregatingBlockInputStream::Handler::onFinish()
{
if (!parent.isCancelled() && parent.aggregator.hasTemporaryFiles())
{
/// It may happen that some data has not yet been flushed,
/// because at the time of `onFinishThread` call, no data has been flushed to disk, and then some were.
/// It may happen that some data has not yet been flushed,
/// because at the time of `onFinishThread` call, no data has been flushed to disk, and then some were.
for (auto & data : parent.many_data)
{
if (data->isConvertibleToTwoLevel())
......
......@@ -101,7 +101,7 @@ void PrettyBlockOutputStream::write(const Block & block_)
return;
}
/// We will insert here columns with the calculated values of visible lengths.
/// We will insert here columns with the calculated values of visible lengths.
Block block = block_;
size_t rows = block.rows();
......
......@@ -15,7 +15,7 @@ void PrettySpaceBlockOutputStream::write(const Block & block_)
return;
}
/// We will insert here columns with the calculated values of visible lengths.
/// We will insert here columns with the calculated values of visible lengths.
Block block = block_;
size_t rows = block.rows();
......
......@@ -204,7 +204,7 @@ void RemoteBlockInputStream::readSuffixImpl()
* - nothing started to do;
* - received all packets before EndOfStream;
* - получили с одной реплики эксепшен;
* - received exception from one replica;
* - received exception from one replica;
* - received an unknown packet from one replica;
* then you do not need to read anything.
*/
......
......@@ -93,7 +93,7 @@ Block SummingSortedBlockInputStream::readImpl()
}
else
{
/// Leave only numeric types. Note that dates and datetime here are not considered such.
/// Leave only numeric types. Note that dates and datetime here are not considered such.
if (!column.type->isNumeric() ||
column.type->getName() == "Date" ||
column.type->getName() == "DateTime" ||
......
......@@ -54,7 +54,7 @@ static bool readName(ReadBuffer & buf, StringRef & ref, String & tmp)
bool have_value = *next_pos == '=';
if (tmp.empty())
{
/// No need to copy data, you can refer directly to the `buf`.
/// No need to copy data, you can refer directly to the `buf`.
ref = StringRef(buf.position(), next_pos - buf.position());
buf.position() += next_pos + have_value - buf.position();
}
......@@ -92,7 +92,7 @@ bool TSKVRowInputStream::read(Block & block)
size_t columns = block.columns();
/// Set of columns for which the values were read. The rest will be filled with default values.
/// Set of columns for which the values were read. The rest will be filled with default values.
/// TODO Ability to provide your DEFAULTs.
bool read_columns[columns];
memset(read_columns, 0, columns);
......
......@@ -54,7 +54,7 @@ const Block & TotalsHavingBlockInputStream::getTotals()
{
if (!totals)
{
/** If totals_mode == AFTER_HAVING_AUTO, you need to decide whether to add aggregates to TOTALS for strings,
/** If totals_mode == AFTER_HAVING_AUTO, you need to decide whether to add aggregates to TOTALS for strings,
* not passed max_rows_to_group_by.
*/
if (overflow_aggregates)
......
......@@ -41,9 +41,9 @@ bool ValuesRowInputStream::read(Block & block)
if (istr.eof() || *istr.position() == ';')
return false;
/** Typically, this is the usual format for streaming parsing.
* But as an exception, it also supports processing arbitrary expressions instead of values.
* This is very inefficient. But if there are no expressions, then there is no overhead.
/** Typically, this is the usual format for streaming parsing.
* But as an exception, it also supports processing arbitrary expressions instead of values.
* This is very inefficient. But if there are no expressions, then there is no overhead.
*/
ParserExpressionWithOptionalAlias parser(false);
......@@ -75,8 +75,8 @@ bool ValuesRowInputStream::read(Block & block)
if (!interpret_expressions)
throw;
/** The normal streaming parser could not parse the value.
* Let's try to parse it with a SQL parser as a constant expression.
/** The normal streaming parser could not parse the value.
* Let's try to parse it with a SQL parser as a constant expression.
* This is an exceptional case.
*/
if (e.code() == ErrorCodes::CANNOT_PARSE_INPUT_ASSERTION_FAILED
......
......@@ -19,8 +19,8 @@ XMLRowOutputStream::XMLRowOutputStream(WriteBuffer & ostr_, const Block & sample
if (!sample_.getByPosition(i).type->isNumeric())
have_non_numeric_columns = true;
/// As element names, we will use the column name if it has a valid form, or "field", otherwise.
/// The condition below is more strict than the XML standard requires.
/// As element names, we will use the column name if it has a valid form, or "field", otherwise.
/// The condition below is more strict than the XML standard requires.
bool is_column_name_suitable = true;
const char * begin = fields[i].name.data();
const char * end = begin + fields[i].name.size();
......
......@@ -84,7 +84,7 @@ try
{"WithHash", std::make_shared<DataTypeUInt8>()},
};
/// we create a description of how to read data from the tab separated dump
/// we create a description of how to read data from the tab separated dump
Block sample;
for (const auto & name_type : names_and_types_list)
......@@ -96,7 +96,7 @@ try
sample.insert(std::move(elem));
}
/// read the data from row tsv file and simultaneously write to the block tsv file
/// read the data from row tsv file and simultaneously write to the block tsv file
{
ReadBufferFromIStream in_buf(std::cin);
WriteBufferFromOStream out_buf(std::cout);
......
......@@ -105,12 +105,12 @@ void DataTypeArray::serializeBinaryBulk(const IColumn & column, WriteBuffer & os
if (offset > offsets.size())
return;
/** offset - from which array to write.
* limit - how many arrays should be written, or 0, if you write everything that is.
* end - up to which array the recorded piece ends.
/** offset - from which array to write.
* limit - how many arrays should be written, or 0, if you write everything that is.
* end - up to which array the recorded piece ends.
*
* nested_offset - from which element of the innards to write.
* nested_limit - how many elements of the innards to write, or 0, if you write everything that is.
* nested_offset - from which element of the innards to write.
* nested_limit - how many elements of the innards to write, or 0, if you write everything that is.
*/
size_t end = std::min(offset + limit, offsets.size());
......@@ -131,7 +131,7 @@ void DataTypeArray::deserializeBinaryBulk(IColumn & column, ReadBuffer & istr, s
ColumnArray::Offsets_t & offsets = column_array.getOffsets();
IColumn & nested_column = column_array.getData();
/// Number of values correlated with `offsets` must be read.
/// Number of values correlated with `offsets` must be read.
size_t last_offset = (offsets.empty() ? 0 : offsets.back());
if (last_offset < nested_column.size())
throw Exception("Nested column longer than last offset", ErrorCodes::LOGICAL_ERROR);
......@@ -390,7 +390,7 @@ ColumnPtr DataTypeArray::createColumn() const
ColumnPtr DataTypeArray::createConstColumn(size_t size, const Field & field) const
{
/// `this` can not be passed as the last argument.
/// `this` can not be passed as the last argument.
return std::make_shared<ColumnConstArray>(size, get<const Array &>(field), std::make_shared<DataTypeArray>(nested));
}
......
......@@ -133,7 +133,7 @@ Field DataTypeNumberBase<T>::getDefault() const
template <typename T>
void DataTypeNumberBase<T>::serializeBinary(const Field & field, WriteBuffer & ostr) const
{
/// ColumnVector<T>::value_type is a narrower type. For example, UInt8, when the Field type is UInt64
/// ColumnVector<T>::value_type is a narrower type. For example, UInt8, when the Field type is UInt64
typename ColumnVector<T>::value_type x = get<typename NearestFieldType<FieldType>::Type>(field);
writeBinary(x, ostr);
}
......
......@@ -126,7 +126,7 @@ static NO_INLINE void deserializeBinarySSE2(ColumnString::Chars_t & data, Column
if (size)
{
#if __SSE2__
/// An optimistic branch in which more efficient copying is possible.
/// An optimistic branch in which more efficient copying is possible.
if (offset + 16 * UNROLL_TIMES <= data.allocated_size() && istr.position() + size + 16 * UNROLL_TIMES <= istr.buffer().end())
{
const __m128i * sse_src_pos = reinterpret_cast<const __m128i *>(istr.position());
......@@ -184,8 +184,8 @@ void DataTypeString::deserializeBinaryBulk(IColumn & column, ReadBuffer & istr,
}
else
{
/** A small heuristic to evaluate that there are a lot of empty lines in the column.
* In this case, to save RAM, we will say that the average size of the value is small.
/** A small heuristic to evaluate that there are a lot of empty lines in the column.
* In this case, to save RAM, we will say that the average size of the value is small.
*/
if (istr.position() + sizeof(UInt32) <= istr.buffer().end()
&& unalignedLoad<UInt32>(istr.position()) == 0) /// The first 4 rows are in the buffer and are empty.
......
......@@ -69,13 +69,13 @@ DataTypePtr FieldToDataType::operator() (Array & x) const
if (x.empty())
throw Exception("Cannot infer type of empty array", ErrorCodes::EMPTY_DATA_PASSED);
/** The type of the array should be determined by the type of its elements.
* If the elements are numbers, then select the smallest common type, if any,
* or throw an exception.
* The code is similar to NumberTraits::ResultOfIf, but it's hard to use this code directly.
/** The type of the array should be determined by the type of its elements.
* If the elements are numbers, then select the smallest common type, if any,
* or throw an exception.
* The code is similar to NumberTraits::ResultOfIf, but it's hard to use this code directly.
*
* Also notice that Float32 is not output, only Float64 is used instead.
* This is done because Float32 type literals do not exist in the query.
* Also notice that Float32 is not output, only Float64 is used instead.
* This is done because Float32 type literals do not exist in the query.
*/
bool has_string = false;
......
......@@ -96,7 +96,7 @@ DatabaseCloud::DatabaseCloud(
void loadTables(Context & context, ThreadPool * thread_pool, bool has_force_restore_data_flag)
{
/// Do nothing - all tables are loaded lazily.
/// Do nothing - all tables are loaded lazily.
}
......@@ -148,11 +148,11 @@ String DatabaseCloud::getTableDefinitionFromHash(Hash hash) const
*/
struct TableDescription
{
/// Hash of the table structure. The structure itself is stored separately.
/// Hash of the table structure. The structure itself is stored separately.
Hash definition_hash;
/// The name of the local table to store data. It can be empty if nothing else has been written to the table.
/// The name of the local table to store data. It can be empty if nothing else has been written to the table.
String local_table_name;
/// The list of hosts on which the table data is located. It can be empty if nothing else has been written to the table.
/// The list of hosts on which the table data is located. It can be empty if nothing else has been written to the table.
std::vector<String> hosts;
void write(WriteBuffer & buf) const
......@@ -199,7 +199,7 @@ struct TableSet
{
writeCString("Version 1\n", buf);
CompressedWriteBuffer out(buf); /// NOTE You can reduce size of allocated buffer.
CompressedWriteBuffer out(buf); /// NOTE You can reduce size of allocated buffer.
for (const auto & kv : map)
{
writeBinary(kv.first, out);
......@@ -229,7 +229,7 @@ struct TableSet
*/
struct LocalTableSet
{
/// Hash of name -> hash of structure.
/// Hash of name -> hash of structure.
using Container = std::map<Hash, Hash>;
Container map;
......@@ -253,7 +253,7 @@ struct LocalTableSet
{
writeCString("Version 1\n", buf);
CompressedWriteBuffer out(buf); /// NOTE You can reduce size of allocated buffer.
CompressedWriteBuffer out(buf); /// NOTE You can reduce size of allocated buffer.
for (const auto & kv : map)
{
writePODBinary(kv.first, out);
......@@ -300,7 +300,7 @@ static void modifyTableSet(zkutil::ZooKeeperPtr & zookeeper, const String & path
if (code == ZOK)
break;
else if (code == ZBADVERSION)
continue; /// Node was changed meanwhile - we'll try again.
continue; /// Node was changed meanwhile - we'll try again.
else
throw zkutil::KeeperException(code, path);
}
......@@ -343,7 +343,7 @@ static void modifyTwoTableSets(zkutil::ZooKeeperPtr & zookeeper, const String &
if (code == ZOK)
break;
else if (code == ZBADVERSION)
continue; /// Node was changed meanwhile - we'll try again.
continue; /// Node was changed meanwhile - we'll try again.
else
throw zkutil::KeeperException(code, path1 + ", " + path2);
}
......@@ -352,8 +352,8 @@ static void modifyTwoTableSets(zkutil::ZooKeeperPtr & zookeeper, const String &
bool DatabaseCloud::isTableExist(const String & table_name) const
{
/// We are looking for a local table in the local table cache or in the file system in `path`.
/// If you do not find it, look for the cloud table in ZooKeeper.
/// We are looking for a local table in the local table cache or in the file system in `path`.
/// If you do not find it, look for the cloud table in ZooKeeper.
{
std::lock_guard<std::mutex> lock(local_tables_mutex);
......@@ -378,8 +378,8 @@ bool DatabaseCloud::isTableExist(const String & table_name) const
StoragePtr DatabaseCloud::tryGetTable(const String & table_name)
{
/// We are looking for a local table.
/// If you do not find it, look for the cloud table in ZooKeeper.
/// We are looking for a local table.
/// If you do not find it, look for the cloud table in ZooKeeper.
{
std::lock_guard<std::mutex> lock(local_tables_mutex);
......@@ -426,7 +426,7 @@ StoragePtr DatabaseCloud::tryGetTable(const String & table_name)
const TableDescription & description = tables_info.at(table_name);
String definition = getTableDefinitionFromHash(description.definition_hash);
/// TODO Initialization of `StorageCloud` object
/// TODO Initialization of `StorageCloud` object
return {};
}
}
......@@ -504,7 +504,7 @@ public:
{
String definition = parent().getTableDefinitionFromHash(table_set_iterator->second.definition_hash);
/// TODO Initialization of `StorageCloud` object
/// TODO Initialization of `StorageCloud` object
return {};
}
};
......@@ -610,10 +610,10 @@ void DatabaseCloud::createTable(
}
else
{
/// A rarer branch, since there are few unique table definitions.
/// There is a race condition in which the node already exists, but a check for a logical error (see above) will not be performed.
/// It does not matter.
/// By the way, nodes in `table_definitions` are never deleted.
/// A rarer branch, since there are few unique table definitions.
/// There is a race condition in which the node already exists, but a check for a logical error (see above) will not be performed.
/// It does not matter.
/// By the way, nodes in `table_definitions` are never deleted.
zookeeper->tryCreate(zookeeper_definition_path, definition, zkutil::CreateMode::Persistent);
}
......@@ -675,7 +675,7 @@ void DatabaseCloud::removeTable(const String & table_name)
{
Hash table_hash = getTableHash(table_name);
/// Delete information about the local table from ZK.
/// Delete information about the local table from ZK.
modifyTableSet<LocalTableSet>(
zookeeper,
zookeeper_path + "/local_tables/" + name + "/" + getNameOfNodeWithTables(table_name),
......@@ -683,7 +683,7 @@ void DatabaseCloud::removeTable(const String & table_name)
{
auto it = set.map.find(table_hash);
if (it == set.map.end())
return false; /// The table has already been deleted.
return false; /// The table has already been deleted.
set.map.erase(it);
return true;
......@@ -707,7 +707,7 @@ void DatabaseCloud::removeTable(const String & table_name)
{
auto it = set.map.find(table_name);
if (it == set.map.end())
return false; /// The table has already been deleted.
return false; /// The table has already been deleted.
description = it->second;
set.map.erase(it);
......@@ -716,7 +716,7 @@ void DatabaseCloud::removeTable(const String & table_name)
if (!description.local_table_name.empty() && !description.hosts.empty())
{
/// Deleting local tables. TODO Whether at once here, or in a separate background thread.
/// Deleting local tables. TODO Whether at once here, or in a separate background thread.
}
}
}
......@@ -725,8 +725,8 @@ void DatabaseCloud::removeTable(const String & table_name)
void DatabaseCloud::renameTable(
const Context & context, const String & table_name, IDatabase & to_database, const String & to_table_name, const Settings & settings)
{
/// Only cloud tables can be renamed.
/// The transfer between databases is not supported.
/// Only cloud tables can be renamed.
/// The transfer between databases is not supported.
if (&to_database != this)
throw Exception("Moving of tables in Cloud database between databases is not supported", ErrorCodes::NOT_IMPLEMENTED);
......
......@@ -100,7 +100,7 @@ ASTPtr DatabaseMemory::getCreateQuery(const String & table_name) const
void DatabaseMemory::shutdown()
{
/// You can not hold a lock during shutdown.
/// Because inside `shutdown` function tables can work with database, and mutex is not recursive.
/// Because inside `shutdown` function tables can work with database, and mutex is not recursive.
for (auto iterator = getIterator(); iterator->isValid(); iterator->next())
iterator->table()->shutdown();
......
......@@ -62,8 +62,8 @@ static void loadTable(
readStringUntilEOF(s, in);
}
/** Empty files with metadata are generated after a rough restart of the server.
* Remove these files to slightly reduce the work of the admins on startup.
/** Empty files with metadata are generated after a rough restart of the server.
* Remove these files to slightly reduce the work of the admins on startup.
*/
if (s.empty())
{
......@@ -110,11 +110,11 @@ void DatabaseOrdinary::loadTables(Context & context, ThreadPool * thread_pool, b
if (dir_it.name().at(0) == '.')
continue;
/// There are .sql.bak files - skip them.
/// There are .sql.bak files - skip them.
if (endsWith(dir_it.name(), ".sql.bak"))
continue;
/// There are files .sql.tmp - delete.
/// There are files .sql.tmp - delete.
if (endsWith(dir_it.name(), ".sql.tmp"))
{
LOG_INFO(log, "Removing file " << dir_it->path());
......@@ -122,7 +122,7 @@ void DatabaseOrdinary::loadTables(Context & context, ThreadPool * thread_pool, b
continue;
}
/// The required files have names like `table_name.sql`
/// The required files have names like `table_name.sql`
if (endsWith(dir_it.name(), ".sql"))
file_names.push_back(dir_it.name());
else
......@@ -130,9 +130,9 @@ void DatabaseOrdinary::loadTables(Context & context, ThreadPool * thread_pool, b
ErrorCodes::INCORRECT_FILE_NAME);
}
/** Tables load faster if they are loaded in sorted (by name) order.
* Otherwise (for the ext4 file system), `DirectoryIterator` iterates through them in some order,
* which does not correspond to order tables creation and does not correspond to order of their location on disk.
/** Tables load faster if they are loaded in sorted (by name) order.
* Otherwise (for the ext4 file system), `DirectoryIterator` iterates through them in some order,
* which does not correspond to order tables creation and does not correspond to order of their location on disk.
*/
std::sort(file_names.begin(), file_names.end());
......@@ -150,7 +150,7 @@ void DatabaseOrdinary::loadTables(Context & context, ThreadPool * thread_pool, b
{
const String & table = *it;
/// Messages, so that it's not boring to wait for the server to load for a long time.
/// Messages, so that it's not boring to wait for the server to load for a long time.
if ((++tables_processed) % PRINT_MESSAGE_EACH_N_TABLES == 0
|| watch.lockTestAndRestart(PRINT_MESSAGE_EACH_N_SECONDS))
{
......@@ -162,8 +162,8 @@ void DatabaseOrdinary::loadTables(Context & context, ThreadPool * thread_pool, b
}
};
/** `packaged_task` is used so that exceptions are automatically passed to the main thread.
* Disadvantage - exceptions fall into the main thread only after the end of all tasks.
/** `packaged_task` is used so that exceptions are automatically passed to the main thread.
* Disadvantage - exceptions fall into the main thread only after the end of all tasks.
*/
const size_t bunch_size = TABLES_PARALLEL_LOAD_BUNCH_SIZE;
......@@ -192,17 +192,17 @@ void DatabaseOrdinary::loadTables(Context & context, ThreadPool * thread_pool, b
void DatabaseOrdinary::createTable(
const String & table_name, const StoragePtr & table, const ASTPtr & query, const String & engine, const Settings & settings)
{
/// Create a file with metadata if necessary - if the query is not ATTACH.
/// Write the query of `ATTACH table` to it.
/// Create a file with metadata if necessary - if the query is not ATTACH.
/// Write the query of `ATTACH table` to it.
/** The code is based on the assumption that all threads share the same order of operations
* - creating the .sql.tmp file;
* - adding a table to `tables`;
* - rename .sql.tmp to .sql.
/** The code is based on the assumption that all threads share the same order of operations
* - creating the .sql.tmp file;
* - adding a table to `tables`;
* - rename .sql.tmp to .sql.
*/
/// A race condition would be possible if a table with the same name is simultaneously created using CREATE and using ATTACH.
/// But there is protection from it - see using DDLGuard in InterpreterCreateQuery.
/// A race condition would be possible if a table with the same name is simultaneously created using CREATE and using ATTACH.
/// But there is protection from it - see using DDLGuard in InterpreterCreateQuery.
{
std::lock_guard<std::mutex> lock(mutex);
......@@ -293,7 +293,7 @@ void DatabaseOrdinary::renameTable(
if (!table)
throw Exception("Table " + name + "." + table_name + " doesn't exist.", ErrorCodes::TABLE_ALREADY_EXISTS);
/// Notify the table that it is renamed. If the table does not support renaming, exception is thrown.
/// Notify the table that it is renamed. If the table does not support renaming, exception is thrown.
try
{
table->rename(context.getPath() + "/data/" + escapeForFileName(to_database_concrete->name) + "/",
......@@ -346,8 +346,8 @@ ASTPtr DatabaseOrdinary::getCreateQuery(const String & table_name) const
void DatabaseOrdinary::shutdown()
{
/// You can not hold a lock during shutdown.
/// Because inside `shutdown` function the tables can work with database, and mutex is not recursive.
/// You can not hold a lock during shutdown.
/// Because inside `shutdown` function the tables can work with database, and mutex is not recursive.
for (auto iterator = getIterator(); iterator->isValid(); iterator->next())
iterator->table()->shutdown();
......@@ -359,7 +359,7 @@ void DatabaseOrdinary::shutdown()
void DatabaseOrdinary::drop()
{
/// No additional removal actions are required.
/// No additional removal actions are required.
}
......@@ -413,7 +413,7 @@ void DatabaseOrdinary::alterTable(
try
{
/// rename atomically replaces the old file with the new one.
/// rename atomically replaces the old file with the new one.
Poco::File(table_metadata_tmp_path).renameTo(table_metadata_path);
}
catch (...)
......
......@@ -16,7 +16,7 @@ String getTableDefinitionFromCreateQuery(const ASTPtr & query)
ASTPtr query_clone = query->clone();
ASTCreateQuery & create = typeid_cast<ASTCreateQuery &>(*query_clone.get());
/// We remove everything that is not needed for ATTACH from the query.
/// We remove everything that is not needed for ATTACH from the query.
create.attach = true;
create.database.clear();
create.as_database.clear();
......@@ -26,7 +26,7 @@ String getTableDefinitionFromCreateQuery(const ASTPtr & query)
String engine = typeid_cast<ASTFunction &>(*create.storage).name;
/// For engine VIEW it is necessary to save the SELECT query itself, for the rest - on the contrary
/// For engine VIEW it is necessary to save the SELECT query itself, for the rest - on the contrary
if (engine != "View" && engine != "MaterializedView")
create.select = nullptr;
......@@ -52,9 +52,9 @@ std::pair<String, StoragePtr> createTableFromDefinition(
ast_create_query.attach = true;
ast_create_query.database = database_name;
/// We do not directly use `InterpreterCreateQuery::execute`, because
/// - the database has not been created yet;
/// - the code is simpler, since the query is already brought to a suitable form.
/// We do not directly use `InterpreterCreateQuery::execute`, because
/// - the database has not been created yet;
/// - the code is simpler, since the query is already brought to a suitable form.
InterpreterCreateQuery::ColumnsInfo columns_info = InterpreterCreateQuery::getColumnsInfo(ast_create_query.columns, context);
......
......@@ -19,9 +19,9 @@ bool isAttributeTypeConvertibleTo(AttributeUnderlyingType from, AttributeUnderly
if (from == to)
return true;
/** This enum can be somewhat incomplete and the meaning may not coincide with NumberTraits.h.
* (for example, because integers can not be converted to floats)
* This is normal for a limited usage scope.
/** This enum can be somewhat incomplete and the meaning may not coincide with NumberTraits.h.
* (for example, because integers can not be converted to floats)
* This is normal for a limited usage scope.
*/
if ( (from == AttributeUnderlyingType::UInt8 && to == AttributeUnderlyingType::UInt16)
|| (from == AttributeUnderlyingType::UInt8 && to == AttributeUnderlyingType::UInt32)
......
......@@ -69,8 +69,8 @@ void RegionsHierarchy::reload()
DB::assertChar('\t', in);
DB::readIntText(read_type, in);
/** Then there can be a newline (old version)
* or tab, the region's population, line feed (new version).
/** Then there can be a newline (old version)
* or tab, the region's population, line feed (new version).
*/
RegionPopulation population = 0;
if (!in.eof() && *in.position() == '\t')
......
......@@ -61,7 +61,7 @@ void RegionsNames::reload(const std::string & directory)
Chars new_chars;
StringRefs new_names_refs(initial_size, StringRef("", 0));
/// Allocate a continuous slice of memory, which is enough to store all names.
/// Allocate a continuous slice of memory, which is enough to store all names.
new_chars.reserve(Poco::File(path).getSize());
while (!in.eof())
......
......@@ -52,7 +52,7 @@ BlockInputStreamPtr MySQLDictionarySource::loadAll()
BlockInputStreamPtr MySQLDictionarySource::loadIds(const std::vector<UInt64> & ids)
{
/// We do not log in here and do not update the modification time, as the request can be large, and often called.
/// We do not log in here and do not update the modification time, as the request can be large, and often called.
const auto query = query_builder.composeLoadIdsQuery(ids);
return std::make_shared<MySQLBlockInputStream>(pool.Get(), query, sample_block, max_block_size);
......@@ -61,7 +61,7 @@ BlockInputStreamPtr MySQLDictionarySource::loadIds(const std::vector<UInt64> & i
BlockInputStreamPtr MySQLDictionarySource::loadKeys(
const ConstColumnPlainPtrs & key_columns, const std::vector<std::size_t> & requested_rows)
{
/// We do not log in here and do not update the modification time, as the request can be large, and often called.
/// We do not log in here and do not update the modification time, as the request can be large, and often called.
const auto query = query_builder.composeLoadKeysQuery(key_columns, requested_rows, ExternalQueryBuilder::AND_OR_CHAIN);
return std::make_shared<MySQLBlockInputStream>(pool.Get(), query, sample_block, max_block_size);
......
......@@ -2737,7 +2737,7 @@ void FunctionArrayReduce::executeImpl(Block & block, const ColumnNumbers & argum
size_t rows = block.rows();
/// Aggregate functions do not support constant columns. Therefore, we materialize them.
/// Aggregate functions do not support constant columns. Therefore, we materialize them.
std::vector<ColumnPtr> materialized_columns;
std::vector<const IColumn *> aggregate_arguments_vec(arguments.size() - 1);
......
......@@ -286,7 +286,7 @@ public:
return std::make_shared<DataTypeUInt64>();
}
/// apply function to the block.
/// apply function to the block.
void executeImpl(Block & block, const ColumnNumbers & arguments, size_t result) override
{
size_t size = block.rows();
......@@ -320,7 +320,7 @@ public:
return false;
}
/// Get the result type by argument type. If the function does not apply to these arguments, throw an exception.
/// Get the result type by argument type. If the function does not apply to these arguments, throw an exception.
DataTypePtr getReturnTypeImpl(const DataTypes & arguments) const override
{
return std::make_shared<DataTypeUInt64>();
......@@ -477,7 +477,7 @@ public:
return std::make_shared<DataTypeUInt8>();
}
/// apply function to the block.
/// apply function to the block.
void executeImpl(Block & block, const ColumnNumbers & arguments, size_t result) override
{
IColumn * col = block.safeGetByPosition(arguments[0]).column.get();
......@@ -880,7 +880,7 @@ public:
return 1;
}
/// Get the result type by argument type. If the function does not apply to these arguments, throw an exception.
/// Get the result type by argument type. If the function does not apply to these arguments, throw an exception.
DataTypePtr getReturnTypeImpl(const DataTypes & arguments) const override
{
return arguments.front()->clone();
......@@ -1024,7 +1024,7 @@ public:
/// apply function to the block.
void executeImpl(Block & block, const ColumnNumbers & arguments, size_t result) override
{
Int64 min = extractConstant<Int64>(block, arguments, 1, "Second"); /// The level at which the line has zero length.
Int64 min = extractConstant<Int64>(block, arguments, 1, "Second"); /// The level at which the line has zero length.
Int64 max = extractConstant<Int64>(block, arguments, 2, "Third"); /// The level at which the line has the maximum length.
/// The maximum width of the bar in characters, by default.
......
......@@ -414,7 +414,7 @@ void LowerUpperUTF8Impl<not_case_lower_bound, not_case_upper_bound, to_case, cyr
else if (src + 1 < src_end && src[0] == 0xC2u)
{
/// Punctuation U+0080 - U+00BF, UTF-8: C2 80 - C2 BF
*dst++ = *src++;
*dst++ = *src++;
*dst++ = *src++;
}
else if (src + 2 < src_end && src[0] == 0xE2u)
......
......@@ -390,13 +390,13 @@ struct MatchImpl
std::string required_substring;
bool is_trivial;
bool required_substring_is_prefix; /// for `anchored` execution of the regexp.
bool required_substring_is_prefix; /// for `anchored` execution of the regexp.
regexp->getAnalyzeResult(required_substring, is_trivial, required_substring_is_prefix);
if (required_substring.empty())
{
if (!Regexp->getRE2()) /// An empty regexp. Always matches.
if (!Regexp->getRE2()) /// An empty regexp. Always matches.
{
memset(&res[0], 1, size * sizeof(res[0]));
}
......
......@@ -48,7 +48,7 @@ bool CachedCompressedReadBuffer::nextImpl()
owned_cell->data.resize(size_decompressed);
decompress(owned_cell->data.m_data, size_decompressed, size_compressed_without_checksum);
/// Put data into cache.
/// Put data into cache.
cache->set(key, owned_cell);
}
}
......
......@@ -24,11 +24,11 @@ size_t CompressedReadBuffer::readBig(char * to, size_t n)
{
size_t bytes_read = 0;
/// If there are unread bytes in the buffer, then we copy necessary to `to`.
/// If there are unread bytes in the buffer, then we copy necessary to `to`.
if (pos < working_buffer.end())
bytes_read += read(to, std::min(static_cast<size_t>(working_buffer.end() - pos), n));
/// If you need to read more - we will, if possible, uncompress at once to `to`.
/// If you need to read more - we will, if possible, uncompress at once to `to`.
while (bytes_read < n)
{
size_t size_decompressed;
......
......@@ -33,7 +33,7 @@ void CompressedWriteBuffer::nextImpl()
size_t compressed_size = 0;
char * compressed_buffer_ptr = nullptr;
/** The format of compressed block - see CompressedStream.h
/** The format of compressed block - see CompressedStream.h
*/
switch (method)
......
......@@ -12,7 +12,7 @@ void IHashingBuffer<Buffer>::calculateHash(DB::BufferBase::Position data, size_t
{
if (len)
{
/// if the data is less than `block_size`, then put them into buffer and calculate hash later
/// if the data is less than `block_size`, then put them into buffer and calculate hash later
if (block_pos + len < block_size)
{
memcpy(&BufferWithOwnMemory<Buffer>::memory[block_pos], data, len);
......@@ -20,7 +20,7 @@ void IHashingBuffer<Buffer>::calculateHash(DB::BufferBase::Position data, size_t
}
else
{
/// if something is already written to the buffer, then we'll add it
/// if something is already written to the buffer, then we'll add it
if (block_pos)
{
size_t n = block_size - block_pos;
......@@ -38,7 +38,7 @@ void IHashingBuffer<Buffer>::calculateHash(DB::BufferBase::Position data, size_t
data += block_size;
}
/// write the remainder to its buffer
/// write the remainder to its buffer
if (len)
{
memcpy(&BufferWithOwnMemory<Buffer>::memory[0], data, len);
......
......@@ -83,7 +83,7 @@ void InterserverWriteBuffer::nextImpl()
if (!offset() || finalized)
return;
/// For correct work with AsynchronousWriteBuffer, which replaces buffers.
/// For correct work with AsynchronousWriteBuffer, which replaces buffers.
impl->set(buffer().begin(), buffer().size());
impl->position() = pos;
......
......@@ -45,7 +45,7 @@ WriteBufferAIO::WriteBufferAIO(const std::string & filename_, size_t buffer_size
flush_buffer(BufferWithOwnMemory<WriteBuffer>(this->memory.size(), nullptr, DEFAULT_AIO_FILE_BLOCK_SIZE)),
filename(filename_)
{
/// Correct the buffer size information so that additional pages do not touch the base class `BufferBase`.
/// Correct the buffer size information so that additional pages do not touch the base class `BufferBase`.
this->buffer().resize(this->buffer().size() - DEFAULT_AIO_FILE_BLOCK_SIZE);
this->internalBuffer().resize(this->internalBuffer().size() - DEFAULT_AIO_FILE_BLOCK_SIZE);
flush_buffer.buffer().resize(this->buffer().size() - DEFAULT_AIO_FILE_BLOCK_SIZE);
......@@ -92,7 +92,7 @@ void WriteBufferAIO::sync()
{
flush();
/// Ask OS to flush data to disk.
/// Ask OS to flush data to disk.
int res = ::fsync(fd);
if (res == -1)
throwFromErrno("Cannot fsync " + getFileName(), ErrorCodes::CANNOT_FSYNC);
......@@ -208,9 +208,9 @@ void WriteBufferAIO::prepare()
truncation_count = 0;
/*
A page on disk or in memory
A page on disk or in memory
start address (starting position in case of disk) is a multiply of DEFAULT_AIO_FILE_BLOCK_SIZE
start address (starting position in case of disk) is a multiply of DEFAULT_AIO_FILE_BLOCK_SIZE
:
:
+---------------+
......@@ -229,7 +229,7 @@ void WriteBufferAIO::prepare()
*/
/*
Representation of data on a disk
Representation of data on a disk
XXX : the data you want to write
ZZZ : data that is already on disk or zeros, if there is no data
......@@ -283,9 +283,9 @@ void WriteBufferAIO::prepare()
bytes_to_write = region_aligned_size;
/*
Representing data in the buffer before processing
Representing data in the buffer before processing
XXX : the data you want to write
XXX : the data you want to write
buffer_begin buffer_end
: :
......@@ -305,20 +305,20 @@ void WriteBufferAIO::prepare()
buffer_size
*/
/// The buffer of data that we want to write to the disk.
/// The buffer of data that we want to write to the disk.
buffer_begin = flush_buffer.buffer().begin();
Position buffer_end = buffer_begin + region_size;
size_t buffer_size = buffer_end - buffer_begin;
/// Process the buffer so that it reflects the structure of the disk region.
/// Process the buffer so that it reflects the structure of the disk region.
/*
Representation of data in the buffer after processing
Representation of data in the buffer after processing
XXX : the data you want to write
ZZZ : data from disk or zeros, if there is no data
`buffer_begin` `buffer_end` extra page
`buffer_begin` `buffer_end` extra page
: : :
: : :
+---:-----------+---------------+---------------+---------------+--:------------+
......
......@@ -27,7 +27,7 @@ void WriteBufferFromPocoSocket::nextImpl()
{
ssize_t res = 0;
/// Add more details to exceptions.
/// Add more details to exceptions.
try
{
res = socket.impl()->sendBytes(working_buffer.begin() + bytes_written, offset() - bytes_written);
......
......@@ -12,13 +12,13 @@ namespace
void copyDataImpl(ReadBuffer & from, WriteBuffer & to, bool check_bytes, size_t bytes, std::atomic<bool> * is_cancelled)
{
/// If read to the end of the buffer, eof() either fills the buffer with new data and moves the cursor to the beginning, or returns false.
/// If read to the end of the buffer, eof() either fills the buffer with new data and moves the cursor to the beginning, or returns false.
while (bytes > 0 && !from.eof())
{
if (is_cancelled && *is_cancelled)
return;
/// buffer() - a piece of data available for reading; position() - the cursor of the place to which you have already read.
/// buffer() - a piece of data available for reading; position() - the cursor of the place to which you have already read.
size_t count = std::min(bytes, static_cast<size_t>(from.buffer().end() - from.position()));
to.write(from.position(), count);
from.position() += count;
......@@ -31,13 +31,13 @@ void copyDataImpl(ReadBuffer & from, WriteBuffer & to, bool check_bytes, size_t
void copyDataImpl(ReadBuffer & from, WriteBuffer & to, bool check_bytes, size_t bytes, std::function<void()> cancellation_hook)
{
/// If read to the end of the buffer, eof() either fills the buffer with new data and moves the cursor to the beginning, or returns false.
/// If read to the end of the buffer, eof() either fills the buffer with new data and moves the cursor to the beginning, or returns false.
while (bytes > 0 && !from.eof())
{
if (cancellation_hook)
cancellation_hook();
/// buffer() - a piece of data available for reading; position() - the cursor of the place to which you have already read.
/// buffer() - a piece of data available for reading; position() - the cursor of the place to which you have already read.
size_t count = std::min(bytes, static_cast<size_t>(from.buffer().end() - from.position()));
to.write(from.position(), count);
from.position() += count;
......
......@@ -23,7 +23,7 @@ int main(int argc, char ** argv)
String test = "Hello, world! " + toString(rand);
/// Write to file as usual, read with O_DIRECT.
/// Write to file as usual, read with O_DIRECT.
{
WriteBufferFromFile wb("test1", BUF_SIZE);
......@@ -41,7 +41,7 @@ int main(int argc, char ** argv)
std::cerr << "test: " << test << ", res: " << res << ", bytes: " << rb.count() << std::endl;
}
/// Write to file with O_DIRECT, read as usual.
/// Write to file with O_DIRECT, read as usual.
{
WriteBufferFromFile wb("test2", BUF_SIZE, O_WRONLY | O_CREAT | O_TRUNC | O_DIRECT, 0666, nullptr, 4096);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册