diff --git a/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp b/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp index 98a08abab654a0556b1abc31b45dd8d19585d2a9..58193ec040fde844125ba2da991f69cd0f1491ba 100644 --- a/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp +++ b/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp @@ -833,15 +833,14 @@ QueryPlanPtr MergeTreeDataSelectExecutor::readFromParts( namespace { +/// Marks are placed whenever threshold on rows or bytes is met. +/// So we have to return the number of marks on whatever estimate is higher - by rows or by bytes. size_t roundRowsOrBytesToMarks( size_t rows_setting, size_t bytes_setting, size_t rows_granularity, size_t bytes_granularity) { - /// Marks are placed whenever threshold on rows or bytes is met. - /// So we have to return the number of marks on whatever estimate is higher - by rows or by bytes. - size_t res = (rows_setting + rows_granularity - 1) / rows_granularity; if (bytes_granularity == 0) @@ -849,6 +848,34 @@ size_t roundRowsOrBytesToMarks( else return std::max(res, (bytes_setting + bytes_granularity - 1) / bytes_granularity); } +/// Same as roundRowsOrBytesToMarks() but do not return more then max_marks +size_t minMarksForConcurrentRead( + size_t rows_setting, + size_t bytes_setting, + size_t rows_granularity, + size_t bytes_granularity, + size_t max_marks) +{ + size_t marks = 1; + + if (rows_setting + rows_granularity <= rows_setting) /// overflow + marks = max_marks; + else if (rows_setting) + marks = (rows_setting + rows_granularity - 1) / rows_granularity; + + if (bytes_granularity == 0) + return marks; + else + { + /// Overflow + if (bytes_setting + bytes_granularity <= bytes_setting) /// overflow + return max_marks; + if (bytes_setting) + return std::max(marks, (bytes_setting + bytes_granularity - 1) / bytes_granularity); + else + return marks; + } +} } @@ -904,11 +931,12 @@ QueryPlanPtr MergeTreeDataSelectExecutor::spreadMarkRangesAmongStreams( data_settings->index_granularity, index_granularity_bytes); - const size_t min_marks_for_concurrent_read = roundRowsOrBytesToMarks( + const size_t min_marks_for_concurrent_read = minMarksForConcurrentRead( settings.merge_tree_min_rows_for_concurrent_read, settings.merge_tree_min_bytes_for_concurrent_read, data_settings->index_granularity, - index_granularity_bytes); + index_granularity_bytes, + sum_marks); if (sum_marks > max_marks_to_use_cache) use_uncompressed_cache = false; @@ -1035,11 +1063,12 @@ QueryPlanPtr MergeTreeDataSelectExecutor::spreadMarkRangesAmongStreamsWithOrder( data_settings->index_granularity, index_granularity_bytes); - const size_t min_marks_for_concurrent_read = roundRowsOrBytesToMarks( + const size_t min_marks_for_concurrent_read = minMarksForConcurrentRead( settings.merge_tree_min_rows_for_concurrent_read, settings.merge_tree_min_bytes_for_concurrent_read, data_settings->index_granularity, - index_granularity_bytes); + index_granularity_bytes, + sum_marks); if (sum_marks > max_marks_to_use_cache) use_uncompressed_cache = false; diff --git a/tests/queries/0_stateless/01665_merge_tree_min_for_concurrent_read.reference b/tests/queries/0_stateless/01665_merge_tree_min_for_concurrent_read.reference new file mode 100644 index 0000000000000000000000000000000000000000..6ed281c757a969ffe22f3dcfa5830c532479c726 --- /dev/null +++ b/tests/queries/0_stateless/01665_merge_tree_min_for_concurrent_read.reference @@ -0,0 +1,2 @@ +1 +1 diff --git a/tests/queries/0_stateless/01665_merge_tree_min_for_concurrent_read.sql b/tests/queries/0_stateless/01665_merge_tree_min_for_concurrent_read.sql new file mode 100644 index 0000000000000000000000000000000000000000..ca324acdce353dff23ef67eab57d122d454fda9f --- /dev/null +++ b/tests/queries/0_stateless/01665_merge_tree_min_for_concurrent_read.sql @@ -0,0 +1,6 @@ +DROP TABLE IF EXISTS data_01655; +CREATE TABLE data_01655 (key Int) Engine=MergeTree() ORDER BY key; +INSERT INTO data_01655 VALUES (1); +SELECT * FROM data_01655 SETTINGS merge_tree_min_rows_for_concurrent_read=0, merge_tree_min_bytes_for_concurrent_read=0; +-- UINT64_MAX +SELECT * FROM data_01655 SETTINGS merge_tree_min_rows_for_concurrent_read=18446744073709551615, merge_tree_min_bytes_for_concurrent_read=18446744073709551615;