diff --git a/src/Client/ConnectionPoolWithFailover.cpp b/src/Client/ConnectionPoolWithFailover.cpp index fc47138e4a330888b5ef84aaa786a44aa5f98b16..e59fab2a1623fa3cc53466e054a2d7b6219f818f 100644 --- a/src/Client/ConnectionPoolWithFailover.cpp +++ b/src/Client/ConnectionPoolWithFailover.cpp @@ -248,7 +248,7 @@ ConnectionPoolWithFailover::tryGetEntry( result.is_up_to_date = false; result.staleness = delay; - LOG_TRACE(log, "Server " << result.entry->getDescription() << " has unacceptable replica delay " << "for table " << table_to_check->database << "." << table_to_check->table << ": " << delay); + LOG_TRACE(log, "Server " << result.entry->getDescription() << " has unacceptable replica delay for table " << table_to_check->database << "." << table_to_check->table << ": " << delay); ProfileEvents::increment(ProfileEvents::DistributedConnectionStaleReplica); } } diff --git a/src/Common/tests/cow_compositions.cpp b/src/Common/tests/cow_compositions.cpp index 74369e8630011952f05aa63fbcc0008a31ad83b6..427fe9130e91bfd300ae271d6b4fd5a167e11086 100644 --- a/src/Common/tests/cow_compositions.cpp +++ b/src/Common/tests/cow_compositions.cpp @@ -95,7 +95,7 @@ int main(int, char **) mut->set(3); std::cerr << "refcounts: " << x->use_count() << ", " << mut->use_count() << "\n"; - std::cerr << "addresses: " << x.get() << ", " << ", " << mut.get() << "\n"; + std::cerr << "addresses: " << x.get() << ", " << mut.get() << "\n"; y = std::move(mut); } diff --git a/src/Common/tests/gtest_shell_command.cpp b/src/Common/tests/gtest_shell_command.cpp index 79b8309d0fafad2d946da9a5aad8bf119d8d7c9b..057a4d22648df4b881a0409dd14c25908b34c17d 100644 --- a/src/Common/tests/gtest_shell_command.cpp +++ b/src/Common/tests/gtest_shell_command.cpp @@ -62,6 +62,6 @@ TEST(ShellCommand, AutoWait) //command->wait(); // now automatic } - // std::cerr << "inspect me: ps auxwwf" << "\n"; + // std::cerr << "inspect me: ps auxwwf\n"; // std::this_thread::sleep_for(std::chrono::seconds(100)); } diff --git a/src/Compression/tests/gtest_compressionCodec.cpp b/src/Compression/tests/gtest_compressionCodec.cpp index eca1c3c0a8f20c851534e3cc2f1f0965ac6804c0..36fc7eba8f81affb03b8a0d9ac28bbbe9fdfe4a9 100644 --- a/src/Compression/tests/gtest_compressionCodec.cpp +++ b/src/Compression/tests/gtest_compressionCodec.cpp @@ -220,7 +220,7 @@ template if (l_size != r_size) { - result = ::testing::AssertionFailure() << "size mismatch" << " expected: " << l_size << " got:" << r_size; + result = ::testing::AssertionFailure() << "size mismatch expected: " << l_size << " got:" << r_size; } if (l_size == 0 || r_size == 0) { diff --git a/src/DataStreams/ParallelAggregatingBlockInputStream.cpp b/src/DataStreams/ParallelAggregatingBlockInputStream.cpp index 5fd89bbc3dc469e9449550f6cde4c52106d4899e..eb8cf912eecd56047dbb25ec0adbada175c726a2 100644 --- a/src/DataStreams/ParallelAggregatingBlockInputStream.cpp +++ b/src/DataStreams/ParallelAggregatingBlockInputStream.cpp @@ -177,12 +177,12 @@ void ParallelAggregatingBlockInputStream::execute() for (size_t i = 0; i < max_threads; ++i) { size_t rows = many_data[i]->size(); - LOG_TRACE(log, "Aggregated. " << threads_data[i].src_rows << " to " << rows << " rows" << " (from " << threads_data[i].src_bytes / 1048576.0 << " MiB)" << " in " << elapsed_seconds << " sec." << " (" << threads_data[i].src_rows / elapsed_seconds << " rows/sec., " << formatReadableSizeWithBinarySuffix(threads_data[i].src_bytes / elapsed_seconds) << "/sec.)"); + LOG_TRACE(log, "Aggregated. " << threads_data[i].src_rows << " to " << rows << " rows (from " << threads_data[i].src_bytes / 1048576.0 << " MiB) in " << elapsed_seconds << " sec. (" << threads_data[i].src_rows / elapsed_seconds << " rows/sec., " << formatReadableSizeWithBinarySuffix(threads_data[i].src_bytes / elapsed_seconds) << "/sec.)"); total_src_rows += threads_data[i].src_rows; total_src_bytes += threads_data[i].src_bytes; } - LOG_TRACE(log, "Total aggregated. " << total_src_rows << " rows (from " << total_src_bytes / 1048576.0 << " MiB)" << " in " << elapsed_seconds << " sec." << " (" << total_src_rows / elapsed_seconds << " rows/sec., " << formatReadableSizeWithBinarySuffix(total_src_bytes / elapsed_seconds) << "/sec.)"); + LOG_TRACE(log, "Total aggregated. " << total_src_rows << " rows (from " << total_src_bytes / 1048576.0 << " MiB) in " << elapsed_seconds << " sec. (" << total_src_rows / elapsed_seconds << " rows/sec., " << formatReadableSizeWithBinarySuffix(total_src_bytes / elapsed_seconds) << "/sec.)"); /// If there was no data, and we aggregate without keys, we must return single row with the result of empty aggregation. /// To do this, we pass a block with zero rows to aggregate. diff --git a/src/IO/WriteBufferFromS3.cpp b/src/IO/WriteBufferFromS3.cpp index 21db726d0744bf372af369e21bd5b652f030a92c..70cf8503254f1b90c2ff73ea893aea052663374f 100644 --- a/src/IO/WriteBufferFromS3.cpp +++ b/src/IO/WriteBufferFromS3.cpp @@ -135,7 +135,7 @@ void WriteBufferFromS3::writePart(const String & data) { auto etag = outcome.GetResult().GetETag(); part_tags.push_back(etag); - LOG_DEBUG(log, "Writing part finished. " << "Total parts: " << part_tags.size() << ", Upload_id: " << upload_id << ", Etag: " << etag); + LOG_DEBUG_FORMATTED(log, "Writing part finished. Total parts: {}, Upload_id: {}, Etag: {}", part_tags.size(), upload_id, etag); } else throw Exception(outcome.GetError().GetMessage(), ErrorCodes::S3_ERROR); diff --git a/src/Interpreters/Aggregator.cpp b/src/Interpreters/Aggregator.cpp index cfcdf009d29355243f4f685cf2cf3b98b8032fcc..72543f5a4cbbaa339f38d120b21a7564ee66feef 100644 --- a/src/Interpreters/Aggregator.cpp +++ b/src/Interpreters/Aggregator.cpp @@ -761,7 +761,7 @@ void Aggregator::writeToTemporaryFile(AggregatedDataVariants & data_variants, co ProfileEvents::increment(ProfileEvents::ExternalAggregationCompressedBytes, compressed_bytes); ProfileEvents::increment(ProfileEvents::ExternalAggregationUncompressedBytes, uncompressed_bytes); - LOG_TRACE(log, "Written part in " << elapsed_seconds << " sec., " << rows << " rows, " << formatReadableSizeWithBinarySuffix(uncompressed_bytes) << " uncompressed, " << formatReadableSizeWithBinarySuffix(compressed_bytes) << " compressed, " << (uncompressed_bytes / rows) << " uncompressed bytes per row, " << (compressed_bytes / rows) << " compressed bytes per row, " << "compression rate: " << (uncompressed_bytes / compressed_bytes) << " (" << (rows / elapsed_seconds) << " rows/sec., " << formatReadableSizeWithBinarySuffix(uncompressed_bytes / elapsed_seconds) << "/sec. uncompressed, " << formatReadableSizeWithBinarySuffix(compressed_bytes / elapsed_seconds) << "/sec. compressed)"); + LOG_TRACE(log, "Written part in " << elapsed_seconds << " sec., " << rows << " rows, " << formatReadableSizeWithBinarySuffix(uncompressed_bytes) << " uncompressed, " << formatReadableSizeWithBinarySuffix(compressed_bytes) << " compressed, " << (uncompressed_bytes / rows) << " uncompressed bytes per row, " << (compressed_bytes / rows) << " compressed bytes per row, compression rate: " << (uncompressed_bytes / compressed_bytes) << " (" << (rows / elapsed_seconds) << " rows/sec., " << formatReadableSizeWithBinarySuffix(uncompressed_bytes / elapsed_seconds) << "/sec. uncompressed, " << formatReadableSizeWithBinarySuffix(compressed_bytes / elapsed_seconds) << "/sec. compressed)"); } void Aggregator::writeToTemporaryFile(AggregatedDataVariants & data_variants) { @@ -927,7 +927,7 @@ void Aggregator::execute(const BlockInputStreamPtr & stream, AggregatedDataVaria double elapsed_seconds = watch.elapsedSeconds(); size_t rows = result.sizeWithoutOverflowRow(); - LOG_TRACE(log, "Aggregated. " << src_rows << " to " << rows << " rows (from " << formatReadableSizeWithBinarySuffix(src_bytes) << ")" << " in " << elapsed_seconds << " sec." << " (" << src_rows / elapsed_seconds << " rows/sec., " << formatReadableSizeWithBinarySuffix(src_bytes / elapsed_seconds) << "/sec.)"); + LOG_TRACE(log, "Aggregated. " << src_rows << " to " << rows << " rows (from " << formatReadableSizeWithBinarySuffix(src_bytes) << ") in " << elapsed_seconds << " sec. (" << src_rows / elapsed_seconds << " rows/sec., " << formatReadableSizeWithBinarySuffix(src_bytes / elapsed_seconds) << "/sec.)"); } @@ -1293,7 +1293,7 @@ BlocksList Aggregator::convertToBlocks(AggregatedDataVariants & data_variants, b } double elapsed_seconds = watch.elapsedSeconds(); - LOG_TRACE(log, "Converted aggregated data to blocks. " << rows << " rows, " << bytes / 1048576.0 << " MiB" << " in " << elapsed_seconds << " sec." << " (" << rows / elapsed_seconds << " rows/sec., " << formatReadableSizeWithBinarySuffix(bytes / elapsed_seconds) << "/sec.)"); + LOG_TRACE(log, "Converted aggregated data to blocks. " << rows << " rows, " << bytes / 1048576.0 << " MiB in " << elapsed_seconds << " sec. (" << rows / elapsed_seconds << " rows/sec., " << formatReadableSizeWithBinarySuffix(bytes / elapsed_seconds) << "/sec.)"); return blocks; } @@ -2155,7 +2155,7 @@ Block Aggregator::mergeBlocks(BlocksList & blocks, bool final) size_t rows = block.rows(); size_t bytes = block.bytes(); double elapsed_seconds = watch.elapsedSeconds(); - LOG_TRACE(log, "Merged partially aggregated blocks. " << rows << " rows, " << bytes / 1048576.0 << " MiB." << " in " << elapsed_seconds << " sec." << " (" << rows / elapsed_seconds << " rows/sec., " << formatReadableSizeWithBinarySuffix(bytes / elapsed_seconds) << "/sec.)"); + LOG_TRACE(log, "Merged partially aggregated blocks. " << rows << " rows, " << bytes / 1048576.0 << " MiB. in " << elapsed_seconds << " sec. (" << rows / elapsed_seconds << " rows/sec., " << formatReadableSizeWithBinarySuffix(bytes / elapsed_seconds) << "/sec.)"); if (isCancelled()) return {}; diff --git a/src/Interpreters/DDLWorker.cpp b/src/Interpreters/DDLWorker.cpp index 0ba1c184545e11d71f7c336679da1a4e9d6df8df..f065f381b38226d54e9b958a45adab008c8b698a 100644 --- a/src/Interpreters/DDLWorker.cpp +++ b/src/Interpreters/DDLWorker.cpp @@ -238,7 +238,7 @@ DDLWorker::DDLWorker(const std::string & zk_root_dir, Context & context_, const if (context.getSettingsRef().readonly) { - LOG_WARNING(log, "Distributed DDL worker is run with readonly settings, it will not be able to execute DDL queries" << " Set appropriate system_profile or distributed_ddl.profile to fix this."); + LOG_WARNING(log, "Distributed DDL worker is run with readonly settings, it will not be able to execute DDL queries Set appropriate system_profile or distributed_ddl.profile to fix this."); } host_fqdn = getFQDNOrHostName(); @@ -940,7 +940,7 @@ String DDLWorker::enqueueQuery(DDLLogEntry & entry) } catch (...) { - LOG_INFO(log, "An error occurred while creating auxiliary ZooKeeper directories in " << node_path << " . They will be created later" << ". Error : " << getCurrentExceptionMessage(true)); + LOG_INFO_FORMATTED(log, "An error occurred while creating auxiliary ZooKeeper directories in {} . They will be created later. Error : {}", node_path, getCurrentExceptionMessage(true)); } return node_path; @@ -1217,7 +1217,7 @@ private: if (!ignoring_hosts.count(host)) { ignoring_hosts.emplace(host); - LOG_INFO(log, "Unexpected host " << host << " appeared " << " in task " << node_path); + LOG_INFO_FORMATTED(log, "Unexpected host {} appeared in task {}", host, node_path); } continue; } diff --git a/src/Interpreters/SystemLog.h b/src/Interpreters/SystemLog.h index a7c488fab86ac8af872198b0bf28f7b14607b9f4..033a46d39600f1d09f0f2ffe4cb9d6997f89badb 100644 --- a/src/Interpreters/SystemLog.h +++ b/src/Interpreters/SystemLog.h @@ -256,7 +256,7 @@ void SystemLog::add(const LogElement & element) // TextLog sets its logger level to 0, so this log is a noop and // there is no recursive logging. - LOG_ERROR(log, "Queue is full for system log '" << demangle(typeid(*this).name()) << "'" << " at " << queue_front_index); + LOG_ERROR_FORMATTED(log, "Queue is full for system log '{}' at {}", demangle(typeid(*this).name()), queue_front_index); } return; diff --git a/src/Interpreters/executeQuery.cpp b/src/Interpreters/executeQuery.cpp index 4a629babb6099119c7fba2ec16c25f4071da7c4e..1a10edf3ab300ae284f74061e80a156c9c84f200 100644 --- a/src/Interpreters/executeQuery.cpp +++ b/src/Interpreters/executeQuery.cpp @@ -140,7 +140,7 @@ static void setExceptionStackTrace(QueryLogElement & elem) /// Log exception (with query info) into text log (not into system table). static void logException(Context & context, QueryLogElement & elem) { - LOG_ERROR(&Logger::get("executeQuery"), elem.exception << " (from " << context.getClientInfo().current_address.toString() << ")" << " (in query: " << joinLines(elem.query) << ")" << (!elem.stack_trace.empty() ? ", Stack trace (when copying this message, always include the lines below):\n\n" + elem.stack_trace : "")); + LOG_ERROR(&Logger::get("executeQuery"), elem.exception << " (from " << context.getClientInfo().current_address.toString() << ") (in query: " << joinLines(elem.query) << ")" << (!elem.stack_trace.empty() ? ", Stack trace (when copying this message, always include the lines below):\n\n" + elem.stack_trace : "")); } diff --git a/src/Parsers/ASTWatchQuery.h b/src/Parsers/ASTWatchQuery.h index c4046a8771f1570585c78fac05cbff2ae3f17a0c..c6e6908cf97d4a12d02fe67ee35bf404d6f49d48 100644 --- a/src/Parsers/ASTWatchQuery.h +++ b/src/Parsers/ASTWatchQuery.h @@ -41,7 +41,7 @@ protected: { std::string indent_str = s.one_line ? "" : std::string(4 * frame.indent, ' '); - s.ostr << (s.hilite ? hilite_keyword : "") << "WATCH" << " " << (s.hilite ? hilite_none : "") + s.ostr << (s.hilite ? hilite_keyword : "") << "WATCH " << (s.hilite ? hilite_none : "") << (!database.empty() ? backQuoteIfNeed(database) + "." : "") << backQuoteIfNeed(table); if (is_watch_events) diff --git a/src/Processors/Executors/PipelineExecutor.cpp b/src/Processors/Executors/PipelineExecutor.cpp index d190f441b65efe2429a6a0a92fc0f0f59746a33e..ba7883f01d67ac47670ec6129361abc2bf39d3c6 100644 --- a/src/Processors/Executors/PipelineExecutor.cpp +++ b/src/Processors/Executors/PipelineExecutor.cpp @@ -547,7 +547,7 @@ void PipelineExecutor::executeSingleThread(size_t thread_num, size_t num_threads #ifndef NDEBUG auto & context = executor_contexts[thread_num]; - LOG_TRACE(log, "Thread finished." << " Total time: " << (context->total_time_ns / 1e9) << " sec." << " Execution time: " << (context->execution_time_ns / 1e9) << " sec." << " Processing time: " << (context->processing_time_ns / 1e9) << " sec." << " Wait time: " << (context->wait_time_ns / 1e9) << " sec."); + LOG_TRACE(log, "Thread finished. Total time: " << (context->total_time_ns / 1e9) << " sec. Execution time: " << (context->execution_time_ns / 1e9) << " sec. Processing time: " << (context->processing_time_ns / 1e9) << " sec. Wait time: " << (context->wait_time_ns / 1e9) << " sec."); #endif } diff --git a/src/Processors/Transforms/AggregatingTransform.cpp b/src/Processors/Transforms/AggregatingTransform.cpp index 71063050a40908eff72248459ab80977322cf8cd..bea1ca0be76127049406ddc86fdd398987003613 100644 --- a/src/Processors/Transforms/AggregatingTransform.cpp +++ b/src/Processors/Transforms/AggregatingTransform.cpp @@ -540,7 +540,7 @@ void AggregatingTransform::initGenerate() double elapsed_seconds = watch.elapsedSeconds(); size_t rows = variants.sizeWithoutOverflowRow(); - LOG_TRACE(log, "Aggregated. " << src_rows << " to " << rows << " rows (from " << src_bytes / 1048576.0 << " MiB)" << " in " << elapsed_seconds << " sec." << " (" << src_rows / elapsed_seconds << " rows/sec., " << formatReadableSizeWithBinarySuffix(src_bytes / elapsed_seconds) << "/sec.)"); + LOG_TRACE(log, "Aggregated. " << src_rows << " to " << rows << " rows (from " << src_bytes / 1048576.0 << " MiB) in " << elapsed_seconds << " sec. (" << src_rows / elapsed_seconds << " rows/sec., " << formatReadableSizeWithBinarySuffix(src_bytes / elapsed_seconds) << "/sec.)"); if (params->aggregator.hasTemporaryFiles()) { diff --git a/src/Processors/printPipeline.h b/src/Processors/printPipeline.h index f7e350de3dc78812e0907d8860704069c20f937c..ce7306ec4cf95b4bba219a9b717f6e470eadc24d 100644 --- a/src/Processors/printPipeline.h +++ b/src/Processors/printPipeline.h @@ -48,7 +48,7 @@ void printPipeline(const Processors & processors, const Statuses & statuses, Wri const IProcessor & curr = *processor; const IProcessor & next = port.getInputPort().getProcessor(); - out << "n" << get_proc_id(curr) << " -> " << "n" << get_proc_id(next) << ";\n"; + out << "n" << get_proc_id(curr) << " -> n" << get_proc_id(next) << ";\n"; } } out << "}\n"; diff --git a/src/Storages/Distributed/DirectoryMonitor.cpp b/src/Storages/Distributed/DirectoryMonitor.cpp index 30879433024161da8e4120aec7a15adcfb353375..c699fde552e486e4a9803c3518da171549698ad5 100644 --- a/src/Storages/Distributed/DirectoryMonitor.cpp +++ b/src/Storages/Distributed/DirectoryMonitor.cpp @@ -317,7 +317,7 @@ void StorageDistributedDirectoryMonitor::readHeader( readVarUInt(initiator_revision, header_buf); if (ClickHouseRevision::get() < initiator_revision) { - LOG_WARNING(log, "ClickHouse shard version is older than ClickHouse initiator version. " << "It may lack support for new features."); + LOG_WARNING(log, "ClickHouse shard version is older than ClickHouse initiator version. It may lack support for new features."); } readStringBinary(insert_query, header_buf); diff --git a/src/Storages/Distributed/DistributedBlockOutputStream.cpp b/src/Storages/Distributed/DistributedBlockOutputStream.cpp index 569452f3ba8ef53ffed802eb83f273cb8669179d..a5811b3986516a735cf7e1f5137823b2c762cbe4 100644 --- a/src/Storages/Distributed/DistributedBlockOutputStream.cpp +++ b/src/Storages/Distributed/DistributedBlockOutputStream.cpp @@ -114,7 +114,7 @@ void DistributedBlockOutputStream::write(const Block & block) if (ordinary_block.has(col.name)) { ordinary_block.erase(col.name); - LOG_DEBUG(log, storage.getStorageID().getNameForLogs() << ": column " + col.name + " will be removed, " << "because it is MATERIALIZED"); + LOG_DEBUG(log, storage.getStorageID().getNameForLogs() << ": column " + col.name + " will be removed, because it is MATERIALIZED"); } } @@ -411,7 +411,7 @@ void DistributedBlockOutputStream::writeSuffix() auto log_performance = [this] () { double elapsed = watch.elapsedSeconds(); - LOG_DEBUG(log, "It took " << std::fixed << std::setprecision(1) << elapsed << " sec. to insert " << inserted_blocks << " blocks" << ", " << std::fixed << std::setprecision(1) << inserted_rows / elapsed << " rows per second" << ". " << getCurrentStateDescription()); + LOG_DEBUG(log, "It took " << std::fixed << std::setprecision(1) << elapsed << " sec. to insert " << inserted_blocks << " blocks, " << std::fixed << std::setprecision(1) << inserted_rows / elapsed << " rows per second. " << getCurrentStateDescription()); }; if (insert_sync && pool) diff --git a/src/Storages/MergeTree/IMergeTreeDataPart.cpp b/src/Storages/MergeTree/IMergeTreeDataPart.cpp index 494e5809683356b3f992b4cdb1086c7581578cf0..d2b637943d1f70deb97691fae75d9b80bd556228 100644 --- a/src/Storages/MergeTree/IMergeTreeDataPart.cpp +++ b/src/Storages/MergeTree/IMergeTreeDataPart.cpp @@ -668,7 +668,7 @@ void IMergeTreeDataPart::renameTo(const String & new_relative_path, bool remove_ Names files; volume->getDisk()->listFiles(to, files); - LOG_WARNING(storage.log, "Part directory " << fullPath(volume->getDisk(), to) << " already exists" << " and contains " << files.size() << " files. Removing it."); + LOG_WARNING(storage.log, "Part directory " << fullPath(volume->getDisk(), to) << " already exists and contains " << files.size() << " files. Removing it."); volume->getDisk()->removeRecursive(to); } diff --git a/src/Storages/MergeTree/MergeTreeData.cpp b/src/Storages/MergeTree/MergeTreeData.cpp index c35cb29ca622e0d0aa802a09430e51add7bb81ed..dd5319962bf76735c09a1b354742112d42f6c9d2 100644 --- a/src/Storages/MergeTree/MergeTreeData.cpp +++ b/src/Storages/MergeTree/MergeTreeData.cpp @@ -2088,7 +2088,7 @@ restore_covered) if (error) { - LOG_ERROR(log, "The set of parts restored in place of " << part->name << " looks incomplete." << " There might or might not be a data loss." << (error_parts.empty() ? "" : " Suspicious parts: " + error_parts)); + LOG_ERROR_FORMATTED(log, "The set of parts restored in place of {} looks incomplete. There might or might not be a data loss.{}", part->name, (error_parts.empty() ? "" : " Suspicious parts: " + error_parts)); } } } diff --git a/src/Storages/MergeTree/MergeTreePartsMover.cpp b/src/Storages/MergeTree/MergeTreePartsMover.cpp index 60883429b329294edaf50af352826d857a1024de..e18bbe0fdce8168224ad91dccef6fbb2d24f9c9a 100644 --- a/src/Storages/MergeTree/MergeTreePartsMover.cpp +++ b/src/Storages/MergeTree/MergeTreePartsMover.cpp @@ -215,7 +215,7 @@ void MergeTreePartsMover::swapClonedPart(const MergeTreeData::DataPartPtr & clon /// It's ok, because we don't block moving parts for merges or mutations if (!active_part || active_part->name != cloned_part->name) { - LOG_INFO(log, "Failed to swap " << cloned_part->name << ". Active part doesn't exist." << " Possible it was merged or mutated. Will remove copy on path '" << cloned_part->getFullPath() << "'."); + LOG_INFO_FORMATTED(log, "Failed to swap {}. Active part doesn't exist. Possible it was merged or mutated. Will remove copy on path '{}'.", cloned_part->name, cloned_part->getFullPath()); return; } diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp b/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp index 9ee4f7d3f9a6dc53d411985063ed544b6c2d01b9..b99df9a335247da02931f5e6b5b5c50429baf574 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp +++ b/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp @@ -320,7 +320,7 @@ void ReplicatedMergeTreeQueue::updateTimesInZooKeeper( auto code = zookeeper->tryMulti(ops, responses); if (code) - LOG_ERROR(log, "Couldn't set value of nodes for insert times (" << replica_path << "/min_unprocessed_insert_time, max_processed_insert_time)" << ": " << zkutil::ZooKeeper::error2string(code) + ". This shouldn't happen often."); + LOG_ERROR_FORMATTED(log, "Couldn't set value of nodes for insert times ({}/min_unprocessed_insert_time, max_processed_insert_time): {}", replica_path, zkutil::ZooKeeper::error2string(code) + ". This shouldn't happen often."); } } @@ -1885,7 +1885,7 @@ bool ReplicatedMergeTreeMergePredicate::isMutationFinished(const ReplicatedMerge partition_it->second.begin(), partition_it->second.lower_bound(block_num)); if (blocks_count) { - LOG_TRACE(queue.log, "Mutation " << mutation.znode_name << " is not done yet because " << "in partition ID " << partition_id << " there are still " << blocks_count << " uncommitted blocks."); + LOG_TRACE(queue.log, "Mutation " << mutation.znode_name << " is not done yet because in partition ID " << partition_id << " there are still " << blocks_count << " uncommitted blocks."); return false; } } diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeTableMetadata.cpp b/src/Storages/MergeTree/ReplicatedMergeTreeTableMetadata.cpp index fe4ea79d10665eda0f54dfa8b809d0c6321e4916..ba9bbd1da61d87ca260e40f4482aab657b30e2b5 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeTableMetadata.cpp +++ b/src/Storages/MergeTree/ReplicatedMergeTreeTableMetadata.cpp @@ -66,7 +66,7 @@ ReplicatedMergeTreeTableMetadata::ReplicatedMergeTreeTableMetadata(const MergeTr void ReplicatedMergeTreeTableMetadata::write(WriteBuffer & out) const { - out << "metadata format version: 1" << "\n" + out << "metadata format version: 1\n" << "date column: " << date_column << "\n" << "sampling expression: " << sampling_expression << "\n" << "index granularity: " << index_granularity << "\n" diff --git a/src/Storages/StorageBuffer.cpp b/src/Storages/StorageBuffer.cpp index 1bb30201e82f753bc282ccc7b6c43cb11df7b61a..5708e5f2b05e4912f713b27a63f76ec1a635484b 100644 --- a/src/Storages/StorageBuffer.cpp +++ b/src/Storages/StorageBuffer.cpp @@ -447,7 +447,7 @@ void StorageBuffer::startup() { if (global_context.getSettingsRef().readonly) { - LOG_WARNING(log, "Storage " << getName() << " is run with readonly settings, it will not be able to insert data." << " Set appropriate system_profile to fix this."); + LOG_WARNING(log, "Storage " << getName() << " is run with readonly settings, it will not be able to insert data. Set appropriate system_profile to fix this."); } diff --git a/src/Storages/StorageReplicatedMergeTree.cpp b/src/Storages/StorageReplicatedMergeTree.cpp index 6adadc6ac7272a1edb8bd6244036b095ac4e2cab..094569e91ddddfdcd462f7cb0e51e688f690d7a2 100644 --- a/src/Storages/StorageReplicatedMergeTree.cpp +++ b/src/Storages/StorageReplicatedMergeTree.cpp @@ -830,7 +830,7 @@ void StorageReplicatedMergeTree::checkPartChecksumsAndAddCommitOps(const zkutil: } else { - LOG_WARNING(log, "checkPartAndAddToZooKeeper: node " << replica_path + "/parts/" + part_name << " already exists." << " Will not commit any nodes."); + LOG_WARNING(log, "checkPartAndAddToZooKeeper: node " << replica_path + "/parts/" + part_name << " already exists. Will not commit any nodes."); } } @@ -1370,7 +1370,7 @@ bool StorageReplicatedMergeTree::executeFetch(LogEntry & entry) } else { - LOG_WARNING(log, "No active replica has part " << entry.new_part_name << ", but that part needs quorum and /quorum/status contains entry about another part " << quorum_entry.part_name << ". It means that part was successfully written to " << entry.quorum << " replicas, but then all of them goes offline." << " Or it is a bug."); + LOG_WARNING(log, "No active replica has part " << entry.new_part_name << ", but that part needs quorum and /quorum/status contains entry about another part " << quorum_entry.part_name << ". It means that part was successfully written to " << entry.quorum << " replicas, but then all of them goes offline. Or it is a bug."); } } } @@ -1580,7 +1580,7 @@ bool StorageReplicatedMergeTree::executeReplaceRange(const LogEntry & entry) } catch (Exception &) { - LOG_INFO(log, "Can't use " << source_table_id.getNameForLogs() << " as source table for REPLACE PARTITION command. Will fetch all parts." << " Reason: " << getCurrentExceptionMessage(false)); + LOG_INFO_FORMATTED(log, "Can't use {} as source table for REPLACE PARTITION command. Will fetch all parts. Reason: {}", source_table_id.getNameForLogs(), getCurrentExceptionMessage(false)); return 0; } @@ -2297,7 +2297,7 @@ bool StorageReplicatedMergeTree::createLogEntryToMergeParts( const auto & part = parts[i]; if (part->modification_time + MAX_AGE_OF_LOCAL_PART_THAT_WASNT_ADDED_TO_ZOOKEEPER < time(nullptr)) { - LOG_WARNING(log, "Part " << part->name << " (that was selected for merge)" << " with age " << (time(nullptr) - part->modification_time) << " seconds exists locally but not in ZooKeeper." << " Won't do merge with that part and will check it."); + LOG_WARNING(log, "Part " << part->name << " (that was selected for merge) with age " << (time(nullptr) - part->modification_time) << " seconds exists locally but not in ZooKeeper. Won't do merge with that part and will check it."); enqueuePartForCheck(part->name); } } @@ -2339,7 +2339,7 @@ bool StorageReplicatedMergeTree::createLogEntryToMutatePart(const IMergeTreeData { if (part.modification_time + MAX_AGE_OF_LOCAL_PART_THAT_WASNT_ADDED_TO_ZOOKEEPER < time(nullptr)) { - LOG_WARNING(log, "Part " << part.name << " (that was selected for mutation)" << " with age " << (time(nullptr) - part.modification_time) << " seconds exists locally but not in ZooKeeper." << " Won't mutate that part and will check it."); + LOG_WARNING(log, "Part " << part.name << " (that was selected for mutation) with age " << (time(nullptr) - part.modification_time) << " seconds exists locally but not in ZooKeeper. Won't mutate that part and will check it."); enqueuePartForCheck(part.name); } @@ -4305,7 +4305,7 @@ void StorageReplicatedMergeTree::fetchPartition(const ASTPtr & partition, const if (best_replica.empty()) throw Exception("Logical error: cannot choose best replica.", ErrorCodes::LOGICAL_ERROR); - LOG_INFO(log, "Found " << replicas.size() << " replicas, " << active_replicas.size() << " of them are active." << " Selected " << best_replica << " to fetch from."); + LOG_INFO(log, "Found " << replicas.size() << " replicas, " << active_replicas.size() << " of them are active. Selected " << best_replica << " to fetch from."); String best_replica_path = from + "/replicas/" + best_replica;