diff --git a/dbms/include/DB/Client/Connection.h b/dbms/include/DB/Client/Connection.h index 9f81b5a7feb414e8691746eb1b469111ca8b956a..2d40c2e6141f3c2e380e0010bb6520e39e210252 100644 --- a/dbms/include/DB/Client/Connection.h +++ b/dbms/include/DB/Client/Connection.h @@ -12,8 +12,6 @@ #include #include -#include - #include #include #include @@ -50,7 +48,6 @@ class Connection : private boost::noncopyable public: Connection(const String & host_, UInt16 port_, const String & default_database_, const String & user_, const String & password_, - const DataTypeFactory & data_type_factory_, const String & client_name_ = "client", Protocol::Compression::Enum compression_ = Protocol::Compression::Enable, Poco::Timespan connect_timeout_ = Poco::Timespan(DBMS_DEFAULT_CONNECT_TIMEOUT_SEC, 0), @@ -61,7 +58,7 @@ public: host(host_), port(port_), default_database(default_database_), user(user_), password(password_), client_name(client_name_), - compression(compression_), data_type_factory(data_type_factory_), + compression(compression_), connect_timeout(connect_timeout_), receive_timeout(receive_timeout_), send_timeout(send_timeout_), ping_timeout(ping_timeout_), log_wrapper(host, port) @@ -172,8 +169,6 @@ private: /// каким алгоритмом сжимать данные при INSERT и данные внешних таблиц CompressionMethod network_compression_method = CompressionMethod::LZ4; - const DataTypeFactory & data_type_factory; - /** Если не nullptr, то используется, чтобы ограничить сетевой трафик. * Учитывается только трафик при передаче блоков. Другие пакеты не учитываются. */ diff --git a/dbms/include/DB/Client/ConnectionPool.h b/dbms/include/DB/Client/ConnectionPool.h index 11a6eb14afabc6068ca91b1246d3a7eed190ed34..aa779815bf74975065f3db8204e3d3ef69539b4e 100644 --- a/dbms/include/DB/Client/ConnectionPool.h +++ b/dbms/include/DB/Client/ConnectionPool.h @@ -56,7 +56,6 @@ public: ConnectionPool(unsigned max_connections_, const String & host_, UInt16 port_, const String & default_database_, const String & user_, const String & password_, - const DataTypeFactory & data_type_factory_, const String & client_name_ = "client", Protocol::Compression::Enum compression_ = Protocol::Compression::Enable, Poco::Timespan connect_timeout_ = Poco::Timespan(DBMS_DEFAULT_CONNECT_TIMEOUT_SEC, 0), @@ -65,7 +64,7 @@ public: : Base(max_connections_, &Logger::get("ConnectionPool (" + Poco::Net::SocketAddress(host_, port_).toString() + ")")), host(host_), port(port_), default_database(default_database_), user(user_), password(password_), - client_name(client_name_), compression(compression_), data_type_factory(data_type_factory_), + client_name(client_name_), compression(compression_), connect_timeout(connect_timeout_), receive_timeout(receive_timeout_), send_timeout(send_timeout_) { } @@ -91,7 +90,7 @@ protected: { return new Connection( host, port, default_database, user, password, - data_type_factory, client_name, compression, + client_name, compression, connect_timeout, receive_timeout, send_timeout); } @@ -105,8 +104,6 @@ private: String client_name; Protocol::Compression::Enum compression; /// Сжимать ли данные при взаимодействии с сервером. - const DataTypeFactory & data_type_factory; - Poco::Timespan connect_timeout; Poco::Timespan receive_timeout; Poco::Timespan send_timeout; diff --git a/dbms/include/DB/Common/ExternalTable.h b/dbms/include/DB/Common/ExternalTable.h index 4c6ff8c010a7b6c4a66578aecf978e6079744236..bd984397211e99d95d1f881eee5a876ddf192584 100644 --- a/dbms/include/DB/Common/ExternalTable.h +++ b/dbms/include/DB/Common/ExternalTable.h @@ -42,11 +42,13 @@ public: /// Инициализировать sample_block по структуре таблицы сохраненной в structure virtual void initSampleBlock(const Context & context) { + const DataTypeFactory & data_type_factory = DataTypeFactory::instance(); + for (size_t i = 0; i < structure.size(); ++i) { ColumnWithNameAndType column; column.name = structure[i].first; - column.type = context.getDataTypeFactory().get(structure[i].second); + column.type = data_type_factory.get(structure[i].second); column.column = column.type->createColumn(); sample_block.insert(column); } @@ -58,7 +60,7 @@ public: initReadBuffer(); initSampleBlock(context); ExternalTableData res = std::make_pair(new AsynchronousBlockInputStream(context.getFormatFactory().getInput( - format, *read_buffer, sample_block, DEFAULT_BLOCK_SIZE, context.getDataTypeFactory())), name); + format, *read_buffer, sample_block, DEFAULT_BLOCK_SIZE)), name); return res; } diff --git a/dbms/include/DB/Core/NamesAndTypes.h b/dbms/include/DB/Core/NamesAndTypes.h index ad3c60defa7de7705a7c97220f2035342c911c0d..3669164b1d3a5a771b6a5b06a8f4bfea23db2719 100644 --- a/dbms/include/DB/Core/NamesAndTypes.h +++ b/dbms/include/DB/Core/NamesAndTypes.h @@ -9,7 +9,6 @@ #include #include -#include #include #include "Names.h" @@ -45,11 +44,11 @@ class NamesAndTypesList : public std::list public: using std::list::list; - void readText(ReadBuffer & buf, const DataTypeFactory & data_type_factory); + void readText(ReadBuffer & buf); void writeText(WriteBuffer & buf) const; String toString() const; - static NamesAndTypesList parse(const String & s, const DataTypeFactory & data_type_factory); + static NamesAndTypesList parse(const String & s); /// Все элементы rhs должны быть различны. bool isSubsetOf(const NamesAndTypesList & rhs) const; diff --git a/dbms/include/DB/DataStreams/FormatFactory.h b/dbms/include/DB/DataStreams/FormatFactory.h index 1b51b8af153bab3189547a589bb96f5c6b0810de..d8e224e1cb0941ab4314e7fa59412fba64e0c912 100644 --- a/dbms/include/DB/DataStreams/FormatFactory.h +++ b/dbms/include/DB/DataStreams/FormatFactory.h @@ -1,7 +1,5 @@ #pragma once -#include - #include #include @@ -16,8 +14,8 @@ class FormatFactory { public: BlockInputStreamPtr getInput(const String & name, ReadBuffer & buf, - Block & sample, size_t max_block_size, const DataTypeFactory & data_type_factory) const; - + Block & sample, size_t max_block_size) const; + BlockOutputStreamPtr getOutput(const String & name, WriteBuffer & buf, Block & sample) const; }; diff --git a/dbms/include/DB/DataStreams/MergeSortingBlockInputStream.h b/dbms/include/DB/DataStreams/MergeSortingBlockInputStream.h index a87d81a01f35bded9fb0c558831f9db0f847b824..7a51097af10fcb0f1dbfd493b4c8ddf715ca8a78 100644 --- a/dbms/include/DB/DataStreams/MergeSortingBlockInputStream.h +++ b/dbms/include/DB/DataStreams/MergeSortingBlockInputStream.h @@ -66,9 +66,9 @@ public: /// limit - если не 0, то можно выдать только первые limit строк в сортированном порядке. MergeSortingBlockInputStream(BlockInputStreamPtr input_, SortDescription & description_, size_t max_merged_block_size_, size_t limit_, - size_t max_bytes_before_external_sort_, const std::string & tmp_path_, const DataTypeFactory & data_type_factory_) + size_t max_bytes_before_external_sort_, const std::string & tmp_path_) : description(description_), max_merged_block_size(max_merged_block_size_), limit(limit_), - max_bytes_before_external_sort(max_bytes_before_external_sort_), tmp_path(tmp_path_), data_type_factory(data_type_factory_) + max_bytes_before_external_sort(max_bytes_before_external_sort_), tmp_path(tmp_path_) { children.push_back(input_); } @@ -97,7 +97,6 @@ private: size_t max_bytes_before_external_sort; const std::string tmp_path; - const DataTypeFactory & data_type_factory; Logger * log = &Logger::get("MergeSortingBlockInputStream"); @@ -115,8 +114,8 @@ private: CompressedReadBuffer compressed_in; BlockInputStreamPtr block_in; - TemporaryFileStream(const std::string & path, const DataTypeFactory & data_type_factory) - : file_in(path), compressed_in(file_in), block_in(new NativeBlockInputStream(compressed_in, data_type_factory)) {} + TemporaryFileStream(const std::string & path) + : file_in(path), compressed_in(file_in), block_in(new NativeBlockInputStream(compressed_in)) {} }; std::vector> temporary_inputs; diff --git a/dbms/include/DB/DataStreams/NativeBlockInputStream.h b/dbms/include/DB/DataStreams/NativeBlockInputStream.h index ee387680627a36782f642d0dfbc44a7e39deefcf..cb277a57939a47f1d91cc61fedd9147054370222 100644 --- a/dbms/include/DB/DataStreams/NativeBlockInputStream.h +++ b/dbms/include/DB/DataStreams/NativeBlockInputStream.h @@ -1,6 +1,5 @@ #pragma once -#include #include @@ -16,8 +15,8 @@ public: /** В случае указания ненулевой server_revision, может ожидаться и считываться дополнительная информация о блоке, * в зависимости от поддерживаемой для указанной ревизии. */ - NativeBlockInputStream(ReadBuffer & istr_, const DataTypeFactory & data_type_factory_, UInt64 server_revision_ = 0) - : istr(istr_), data_type_factory(data_type_factory_), server_revision(server_revision_) {} + NativeBlockInputStream(ReadBuffer & istr_, UInt64 server_revision_ = 0) + : istr(istr_), server_revision(server_revision_) {} String getName() const override { return "NativeBlockInputStream"; } @@ -35,7 +34,6 @@ protected: private: ReadBuffer & istr; - const DataTypeFactory & data_type_factory; UInt64 server_revision; }; diff --git a/dbms/include/DB/Dictionaries/ClickHouseDictionarySource.h b/dbms/include/DB/Dictionaries/ClickHouseDictionarySource.h index e1c47462d95cf5d9ca971871b3c6820312a368f0..e57248de428b63c10f062575d6595347cef693e1 100644 --- a/dbms/include/DB/Dictionaries/ClickHouseDictionarySource.h +++ b/dbms/include/DB/Dictionaries/ClickHouseDictionarySource.h @@ -35,7 +35,7 @@ public: sample_block{sample_block}, context(context), is_local{isLocalAddress({ host, port })}, pool{is_local ? nullptr : std::make_unique( - max_connections, host, port, db, user, password, context.getDataTypeFactory(), + max_connections, host, port, db, user, password, "ClickHouseDictionarySource") }, load_all_query{composeLoadAllQuery()} @@ -50,7 +50,7 @@ public: sample_block{other.sample_block}, context(other.context), is_local{other.is_local}, pool{is_local ? nullptr : std::make_unique( - max_connections, host, port, db, user, password, context.getDataTypeFactory(), + max_connections, host, port, db, user, password, "ClickHouseDictionarySource")}, load_all_query{other.load_all_query} {} diff --git a/dbms/include/DB/Dictionaries/FileDictionarySource.h b/dbms/include/DB/Dictionaries/FileDictionarySource.h index cadb3fb085c8a9bef1656ca07f3e6d05d8036555..41e55f64de56940473354c643f630f0ccf70973c 100644 --- a/dbms/include/DB/Dictionaries/FileDictionarySource.h +++ b/dbms/include/DB/Dictionaries/FileDictionarySource.h @@ -34,7 +34,7 @@ public: { auto in_ptr = std::make_unique(filename); auto stream = context.getFormatFactory().getInput( - format, *in_ptr, sample_block, max_block_size, context.getDataTypeFactory()); + format, *in_ptr, sample_block, max_block_size); last_modification = getLastModification(); return new OwningBufferBlockInputStream{stream, std::move(in_ptr)}; diff --git a/dbms/include/DB/Interpreters/Cluster.h b/dbms/include/DB/Interpreters/Cluster.h index 1bb84dabb9a1fb8e5774516ceb038f003b843c5f..0bd307812694e5b4aa1ff581b73427f096afc08d 100644 --- a/dbms/include/DB/Interpreters/Cluster.h +++ b/dbms/include/DB/Interpreters/Cluster.h @@ -2,7 +2,6 @@ #include #include -#include #include #include #include @@ -16,10 +15,10 @@ namespace DB class Cluster : private boost::noncopyable { public: - Cluster(const Settings & settings, const DataTypeFactory & data_type_factory, const String & cluster_name); + Cluster(const Settings & settings, const String & cluster_name); /// Построить кластер по именам шардов и реплик. Локальные обрабатываются так же как удаленные. - Cluster(const Settings & settings, const DataTypeFactory & data_type_factory, std::vector> names, + Cluster(const Settings & settings, std::vector> names, const String & username, const String & password); /// количество узлов clickhouse сервера, расположенных локально @@ -98,8 +97,7 @@ struct Clusters typedef std::map Impl; Impl impl; - Clusters(const Settings & settings, const DataTypeFactory & data_type_factory, - const String & config_name = "remote_servers"); + Clusters(const Settings & settings, const String & config_name = "remote_servers"); }; } diff --git a/dbms/include/DB/Interpreters/Context.h b/dbms/include/DB/Interpreters/Context.h index 2fb66570878b2be4d32db70dfa75c668e1160749..f646c627ecbb0c78316380f3d31a897001527e3e 100644 --- a/dbms/include/DB/Interpreters/Context.h +++ b/dbms/include/DB/Interpreters/Context.h @@ -160,7 +160,6 @@ public: const TableFunctionFactory & getTableFunctionFactory() const; const AggregateFunctionFactory & getAggregateFunctionFactory() const; - const DataTypeFactory & getDataTypeFactory() const; const FormatFactory & getFormatFactory() const; const Dictionaries & getDictionaries() const; const ExternalDictionaries & getExternalDictionaries() const; diff --git a/dbms/include/DB/Interpreters/InterpreterAlterQuery.h b/dbms/include/DB/Interpreters/InterpreterAlterQuery.h index d6a4ec264f9f7fd52e4d5b6ec901f4b6ce72733f..4ead0b62955aaba0accdda81a6110cc8b399875d 100644 --- a/dbms/include/DB/Interpreters/InterpreterAlterQuery.h +++ b/dbms/include/DB/Interpreters/InterpreterAlterQuery.h @@ -77,7 +77,7 @@ private: Context context; - static void parseAlter(const ASTAlterQuery::ParameterContainer & params, const DataTypeFactory & data_type_factory, + static void parseAlter(const ASTAlterQuery::ParameterContainer & params, AlterCommands & out_alter_commands, PartitionCommands & out_partition_commands); }; diff --git a/dbms/include/DB/Storages/ColumnsDescription.h b/dbms/include/DB/Storages/ColumnsDescription.h index 5816a8bebe6c8dd73d2835cf5119b8c1b3a32706..aa7b08fa394d235925bc714c21d608492e153320 100644 --- a/dbms/include/DB/Storages/ColumnsDescription.h +++ b/dbms/include/DB/Storages/ColumnsDescription.h @@ -21,7 +21,7 @@ struct ColumnsDescription String toString() const; - static ColumnsDescription parse(const String & str, const DataTypeFactory & data_type_factory); + static ColumnsDescription parse(const String & str); }; diff --git a/dbms/include/DB/Storages/Distributed/DirectoryMonitor.h b/dbms/include/DB/Storages/Distributed/DirectoryMonitor.h index a7b6f7cf74c2827b8c1e0972ce1e3c386e72a529..52cbfebe9364ddcf5e75fdfb664c04ff2ddd7f30 100644 --- a/dbms/include/DB/Storages/Distributed/DirectoryMonitor.h +++ b/dbms/include/DB/Storages/Distributed/DirectoryMonitor.h @@ -126,7 +126,7 @@ private: const std::string & user, const std::string & password) { return new ConnectionPool{ 1, host, port, "", - user, password, storage.context.getDataTypeFactory(), + user, password, storage.getName() + '_' + name}; }; diff --git a/dbms/include/DB/Storages/MergeTree/MergeTreeData.h b/dbms/include/DB/Storages/MergeTree/MergeTreeData.h index 8605f10a42ea632fe8f51b0cfaee4357d23bd093..8497279cf7d30f7b8faaeced76c144a2ca3b2088 100644 --- a/dbms/include/DB/Storages/MergeTree/MergeTreeData.h +++ b/dbms/include/DB/Storages/MergeTree/MergeTreeData.h @@ -477,7 +477,7 @@ public: } ReadBufferFromFile file(path, std::min(static_cast(DBMS_DEFAULT_BUFFER_SIZE), Poco::File(path).getSize())); - columns.readText(file, storage.context.getDataTypeFactory()); + columns.readText(file); } void checkNotBroken(bool require_part_metadata) diff --git a/dbms/include/DB/Storages/MergeTree/MergeTreePartChecker.h b/dbms/include/DB/Storages/MergeTree/MergeTreePartChecker.h index bf653f2a7bb874e6dfc9dd5290f2a6ed3550eacd..8561f62627d596e27714b613923025bac6a2db36 100644 --- a/dbms/include/DB/Storages/MergeTree/MergeTreePartChecker.h +++ b/dbms/include/DB/Storages/MergeTree/MergeTreePartChecker.h @@ -28,8 +28,7 @@ public: * - Проверяет правильность засечек. * Бросает исключение, если кусок испорчен или если проверить не получилось (TODO: можно попробовать разделить эти случаи). */ - static void checkDataPart(String path, const Settings & settings, const DataTypeFactory & data_type_factory, - MergeTreeData::DataPart::Checksums * out_checksums = nullptr); + static void checkDataPart(String path, const Settings & settings, MergeTreeData::DataPart::Checksums * out_checksums = nullptr); }; } diff --git a/dbms/include/DB/Storages/StorageSet.h b/dbms/include/DB/Storages/StorageSet.h index 7b1ade807e5e649968aa15ad3f5221e02f129db4..fa3f87a6a6a9ba5125d6cb5d4c7b1644fb5e55b6 100644 --- a/dbms/include/DB/Storages/StorageSet.h +++ b/dbms/include/DB/Storages/StorageSet.h @@ -45,7 +45,7 @@ protected: void restore(); private: - void restoreFromFile(const String & file_path, const DataTypeFactory & data_type_factory); + void restoreFromFile(const String & file_path); /// Вставить блок в состояние. virtual void insertBlock(const Block & block) = 0; diff --git a/dbms/include/DB/TableFunctions/TableFunctionRemote.h b/dbms/include/DB/TableFunctions/TableFunctionRemote.h index 0939186cf60bf41aeb8aeb78e5af7069ee2598f4..f407d70027fd1fe3f5b4e5c2044312caef43fd95 100644 --- a/dbms/include/DB/TableFunctions/TableFunctionRemote.h +++ b/dbms/include/DB/TableFunctions/TableFunctionRemote.h @@ -3,6 +3,7 @@ #include #include #include +#include #include #include #include @@ -117,7 +118,7 @@ public: if (names.empty()) throw Exception("Shard list is empty after parsing first argument", ErrorCodes::BAD_ARGUMENTS); - SharedPtr cluster = new Cluster(context.getSettings(), context.getDataTypeFactory(), names, username, password); + SharedPtr cluster = new Cluster(context.getSettings(), names, username, password); return StorageDistributed::create(getName(), chooseColumns(*cluster, remote_database, remote_table, context), remote_database, remote_table, cluster, context); @@ -140,6 +141,8 @@ private: }; input->readPrefix(); + const DataTypeFactory & data_type_factory = DataTypeFactory::instance(); + while (true) { Block current = input->read(); @@ -153,7 +156,7 @@ private: String column_name = (*name)[i].get(); String data_type_name = (*type)[i].get(); - res.emplace_back(column_name, context.getDataTypeFactory().get(data_type_name)); + res.emplace_back(column_name, data_type_factory.get(data_type_name)); } } diff --git a/dbms/src/Client/Benchmark.cpp b/dbms/src/Client/Benchmark.cpp index 5cfb0c4086a6de7e072deb2786210f9a0b2dcdc9..4c88eebae410952d64c9e5b03793c83ede8c1db9 100644 --- a/dbms/src/Client/Benchmark.cpp +++ b/dbms/src/Client/Benchmark.cpp @@ -52,7 +52,7 @@ public: const String & host_, UInt16 port_, const String & default_database_, const String & user_, const String & password_, const Settings & settings_) : concurrency(concurrency_), delay(delay_), queue(concurrency), - connections(concurrency, host_, port_, default_database_, user_, password_, data_type_factory), + connections(concurrency, host_, port_, default_database_, user_, password_), settings(settings_), pool(concurrency) { std::cerr << std::fixed << std::setprecision(3); @@ -73,7 +73,6 @@ private: typedef ConcurrentBoundedQueue Queue; Queue queue; - DataTypeFactory data_type_factory; ConnectionPool connections; Settings settings; diff --git a/dbms/src/Client/Client.cpp b/dbms/src/Client/Client.cpp index f3bcedf88df9fde224174bd04e5162ccf222d8be..6039609e75d35a82ce4c11eb91fe30f2eb200d64 100644 --- a/dbms/src/Client/Client.cpp +++ b/dbms/src/Client/Client.cpp @@ -336,7 +336,7 @@ private: << (!user.empty() ? " as user " + user : "") << "." << std::endl; - connection = new Connection(host, port, default_database, user, password, context.getDataTypeFactory(), "client", compression, + connection = new Connection(host, port, default_database, user, password, "client", compression, Poco::Timespan(config().getInt("connect_timeout", DBMS_DEFAULT_CONNECT_TIMEOUT_SEC), 0), Poco::Timespan(config().getInt("receive_timeout", DBMS_DEFAULT_RECEIVE_TIMEOUT_SEC), 0), Poco::Timespan(config().getInt("send_timeout", DBMS_DEFAULT_SEND_TIMEOUT_SEC), 0)); @@ -698,7 +698,7 @@ private: current_format = insert->format; BlockInputStreamPtr block_input = context.getFormatFactory().getInput( - current_format, buf, sample, insert_format_max_block_size, context.getDataTypeFactory()); + current_format, buf, sample, insert_format_max_block_size); BlockInputStreamPtr async_block_input = new AsynchronousBlockInputStream(block_input); diff --git a/dbms/src/Client/Connection.cpp b/dbms/src/Client/Connection.cpp index 9bf7de70ebf371903735c52eb866274970a3a5b3..9e22eda4225fc7c903aaf622899aa4bee7884690 100644 --- a/dbms/src/Client/Connection.cpp +++ b/dbms/src/Client/Connection.cpp @@ -166,30 +166,30 @@ void Connection::forceConnected() struct PingTimeoutSetter { - PingTimeoutSetter(Poco::Net::StreamSocket & socket_, const Poco::Timespan & ping_timeout_) + PingTimeoutSetter(Poco::Net::StreamSocket & socket_, const Poco::Timespan & ping_timeout_) : socket(socket_), ping_timeout(ping_timeout_) { old_send_timeout = socket.getSendTimeout(); old_receive_timeout = socket.getReceiveTimeout(); - + if (old_send_timeout > ping_timeout) socket.setSendTimeout(ping_timeout); if (old_receive_timeout > ping_timeout) socket.setReceiveTimeout(ping_timeout); } - + ~PingTimeoutSetter() { socket.setSendTimeout(old_send_timeout); socket.setReceiveTimeout(old_receive_timeout); } - + Poco::Net::StreamSocket & socket; Poco::Timespan ping_timeout; Poco::Timespan old_send_timeout; Poco::Timespan old_receive_timeout; }; - + bool Connection::ping() { // LOG_TRACE(log_wrapper.get(), "Ping (" << getServerAddress() << ")"); @@ -237,7 +237,7 @@ bool Connection::ping() void Connection::sendQuery(const String & query, const String & query_id_, UInt64 stage, const Settings * settings, bool with_pending_data) { network_compression_method = settings ? settings->network_compression_method.value : CompressionMethod::LZ4; - + forceConnected(); query_id = query_id_; @@ -494,7 +494,7 @@ void Connection::initBlockInput() else maybe_compressed_in = in; - block_in = new NativeBlockInputStream(*maybe_compressed_in, data_type_factory, server_revision); + block_in = new NativeBlockInputStream(*maybe_compressed_in, server_revision); } } diff --git a/dbms/src/Core/NamesAndTypes.cpp b/dbms/src/Core/NamesAndTypes.cpp index d70e6a9932a7fea7ba2c3c8b1e805d226afcc2f0..57dfe42dd0bd556690cd0ea2ae29c8f31d01c9c4 100644 --- a/dbms/src/Core/NamesAndTypes.cpp +++ b/dbms/src/Core/NamesAndTypes.cpp @@ -1,10 +1,14 @@ #include +#include + namespace DB { -void NamesAndTypesList::readText(ReadBuffer & buf, const DataTypeFactory & data_type_factory) +void NamesAndTypesList::readText(ReadBuffer & buf) { + const DataTypeFactory & data_type_factory = DataTypeFactory::instance(); + DB::assertString("columns format version: 1\n", buf); size_t count; DB::readText(count, buf); @@ -45,11 +49,11 @@ String NamesAndTypesList::toString() const return s; } -NamesAndTypesList NamesAndTypesList::parse(const String & s, const DataTypeFactory & data_type_factory) +NamesAndTypesList NamesAndTypesList::parse(const String & s) { ReadBufferFromString in(s); NamesAndTypesList res; - res.readText(in, data_type_factory); + res.readText(in); assertEOF(in); return res; } diff --git a/dbms/src/DataStreams/FormatFactory.cpp b/dbms/src/DataStreams/FormatFactory.cpp index 8f265dc273700c7f34dac7dbc7f9dc230404718f..6db0e39373188039fe0d6cd91bd21b63ebee9355 100644 --- a/dbms/src/DataStreams/FormatFactory.cpp +++ b/dbms/src/DataStreams/FormatFactory.cpp @@ -25,10 +25,10 @@ namespace DB { BlockInputStreamPtr FormatFactory::getInput(const String & name, ReadBuffer & buf, - Block & sample, size_t max_block_size, const DataTypeFactory & data_type_factory) const + Block & sample, size_t max_block_size) const { if (name == "Native") - return new NativeBlockInputStream(buf, data_type_factory); + return new NativeBlockInputStream(buf); else if (name == "TabSeparated") return new BlockInputStreamFromRowInputStream(new TabSeparatedRowInputStream(buf, sample), sample, max_block_size); else if (name == "RowBinary") diff --git a/dbms/src/DataStreams/MergeSortingBlockInputStream.cpp b/dbms/src/DataStreams/MergeSortingBlockInputStream.cpp index 1ed7eb40c03f33409d58a1ef09c5eece752764b7..b56832894d8e54adc17da61c4e5c71cefab583d0 100644 --- a/dbms/src/DataStreams/MergeSortingBlockInputStream.cpp +++ b/dbms/src/DataStreams/MergeSortingBlockInputStream.cpp @@ -65,7 +65,7 @@ Block MergeSortingBlockInputStream::readImpl() /// Сформируем сортированные потоки для слияния. for (const auto & file : temporary_files) { - temporary_inputs.emplace_back(new TemporaryFileStream(file->path(), data_type_factory)); + temporary_inputs.emplace_back(new TemporaryFileStream(file->path())); inputs_to_merge.emplace_back(temporary_inputs.back()->block_in); } diff --git a/dbms/src/DataStreams/NativeBlockInputStream.cpp b/dbms/src/DataStreams/NativeBlockInputStream.cpp index 078e7a2e1e70b0d8ed45881d1a61ad11f8bbe66d..b9fa55aa449113d54d59280b92c2c4b9bb14df88 100644 --- a/dbms/src/DataStreams/NativeBlockInputStream.cpp +++ b/dbms/src/DataStreams/NativeBlockInputStream.cpp @@ -5,6 +5,7 @@ #include #include +#include #include @@ -44,6 +45,8 @@ Block NativeBlockInputStream::readImpl() { Block res; + const DataTypeFactory & data_type_factory = DataTypeFactory::instance(); + if (istr.eof()) return res; diff --git a/dbms/src/DataStreams/tests/native_streams.cpp b/dbms/src/DataStreams/tests/native_streams.cpp index 5a2709d4eb476d1bd6f2a761600d235fc9780c98..d23796c54c9b1629c16a2159ba8abcc6bf2f0d06 100644 --- a/dbms/src/DataStreams/tests/native_streams.cpp +++ b/dbms/src/DataStreams/tests/native_streams.cpp @@ -15,7 +15,6 @@ #include #include #include -#include #include #include @@ -117,8 +116,6 @@ int main(int argc, char ** argv) /// читаем данные из native файла и одновременно пишем в таблицу if (argc == 2 && 0 == strcmp(argv[1], "write")) { - DataTypeFactory factory; - ReadBufferFromFileDescriptor in1(STDIN_FILENO); CompressedReadBuffer in2(in1); NativeBlockInputStream in3(in2, factory, Revision::get()); diff --git a/dbms/src/DataStreams/tests/sorting_stream.cpp b/dbms/src/DataStreams/tests/sorting_stream.cpp index d19931f98f31cc05678800c213e1961f7e6c86d1..b503ae6ec7d2334060676d130fb72b36e40cdcc1 100644 --- a/dbms/src/DataStreams/tests/sorting_stream.cpp +++ b/dbms/src/DataStreams/tests/sorting_stream.cpp @@ -148,11 +148,10 @@ int main(int argc, char ** argv) sort_columns.push_back(SortColumnDescription(3, 1)); QueryProcessingStage::Enum stage; - DataTypeFactory data_type_factory; Poco::SharedPtr in = table->read(column_names, 0, Context{}, Settings(), stage, argc == 2 ? atoi(argv[1]) : 1048576)[0]; in = new PartialSortingBlockInputStream(in, sort_columns); - in = new MergeSortingBlockInputStream(in, sort_columns, DEFAULT_BLOCK_SIZE, 0, 0, "", data_type_factory); + in = new MergeSortingBlockInputStream(in, sort_columns, DEFAULT_BLOCK_SIZE, 0, 0, ""); //in = new LimitBlockInputStream(in, 10); WriteBufferFromOStream ob(std::cout); diff --git a/dbms/src/Interpreters/Cluster.cpp b/dbms/src/Interpreters/Cluster.cpp index 51e0a1309473c0b44dbf6cc73f6dcae7035856bd..9b71d0a7cdfbc81180bffb25ac071c9113837dd6 100644 --- a/dbms/src/Interpreters/Cluster.cpp +++ b/dbms/src/Interpreters/Cluster.cpp @@ -47,7 +47,7 @@ namespace } -Clusters::Clusters(const Settings & settings, const DataTypeFactory & data_type_factory, const String & config_name) +Clusters::Clusters(const Settings & settings, const String & config_name) { Poco::Util::AbstractConfiguration & config = Poco::Util::Application::instance().config(); Poco::Util::AbstractConfiguration::Keys config_keys; @@ -56,11 +56,11 @@ Clusters::Clusters(const Settings & settings, const DataTypeFactory & data_type_ for (Poco::Util::AbstractConfiguration::Keys::const_iterator it = config_keys.begin(); it != config_keys.end(); ++it) impl.emplace(std::piecewise_construct, std::forward_as_tuple(*it), - std::forward_as_tuple(settings, data_type_factory, config_name + "." + *it)); + std::forward_as_tuple(settings, config_name + "." + *it)); } -Cluster::Cluster(const Settings & settings, const DataTypeFactory & data_type_factory, const String & cluster_name) +Cluster::Cluster(const Settings & settings, const String & cluster_name) { Poco::Util::AbstractConfiguration & config = Poco::Util::Application::instance().config(); Poco::Util::AbstractConfiguration::Keys config_keys; @@ -179,7 +179,7 @@ Cluster::Cluster(const Settings & settings, const DataTypeFactory & data_type_fa replicas.emplace_back(new ConnectionPool( settings.distributed_connections_pool_size, replica.host_port.host().toString(), replica.host_port.port(), "", replica.user, replica.password, - data_type_factory, "server", Protocol::Compression::Enable, + "server", Protocol::Compression::Enable, saturate(settings.connect_timeout_with_failover_ms, settings.limits.max_execution_time), saturate(settings.receive_timeout, settings.limits.max_execution_time), saturate(settings.send_timeout, settings.limits.max_execution_time))); @@ -205,7 +205,7 @@ Cluster::Cluster(const Settings & settings, const DataTypeFactory & data_type_fa pools.emplace_back(new ConnectionPool( settings.distributed_connections_pool_size, address.host_port.host().toString(), address.host_port.port(), "", address.user, address.password, - data_type_factory, "server", Protocol::Compression::Enable, + "server", Protocol::Compression::Enable, saturate(settings.connect_timeout, settings.limits.max_execution_time), saturate(settings.receive_timeout, settings.limits.max_execution_time), saturate(settings.send_timeout, settings.limits.max_execution_time))); @@ -217,7 +217,7 @@ Cluster::Cluster(const Settings & settings, const DataTypeFactory & data_type_fa } -Cluster::Cluster(const Settings & settings, const DataTypeFactory & data_type_factory, std::vector> names, +Cluster::Cluster(const Settings & settings, std::vector> names, const String & username, const String & password) { for (const auto & shard : names) @@ -238,7 +238,7 @@ Cluster::Cluster(const Settings & settings, const DataTypeFactory & data_type_fa replicas.emplace_back(new ConnectionPool( settings.distributed_connections_pool_size, replica.host_port.host().toString(), replica.host_port.port(), "", replica.user, replica.password, - data_type_factory, "server", Protocol::Compression::Enable, + "server", Protocol::Compression::Enable, saturate(settings.connect_timeout_with_failover_ms, settings.limits.max_execution_time), saturate(settings.receive_timeout, settings.limits.max_execution_time), saturate(settings.send_timeout, settings.limits.max_execution_time))); diff --git a/dbms/src/Interpreters/Context.cpp b/dbms/src/Interpreters/Context.cpp index 47a346ba4f71cac896462a7df9a4dd824074d9a9..c67c36e2cd05c12ab5d90eb08d58709d2d0b7d86 100644 --- a/dbms/src/Interpreters/Context.cpp +++ b/dbms/src/Interpreters/Context.cpp @@ -13,7 +13,6 @@ #include #include #include -#include #include #include #include @@ -72,7 +71,6 @@ struct ContextShared Databases databases; /// Список БД и таблиц в них. TableFunctionFactory table_function_factory; /// Табличные функции. AggregateFunctionFactory aggregate_function_factory; /// Агрегатные функции. - DataTypeFactory data_type_factory; /// Типы данных. FormatFactory format_factory; /// Форматы. mutable SharedPtr dictionaries; /// Словари Метрики. Инициализируются лениво. mutable SharedPtr external_dictionaries; @@ -155,7 +153,6 @@ Context::~Context() = default; const TableFunctionFactory & Context::getTableFunctionFactory() const { return shared->table_function_factory; } const AggregateFunctionFactory & Context::getAggregateFunctionFactory() const { return shared->aggregate_function_factory; } -const DataTypeFactory & Context::getDataTypeFactory() const { return shared->data_type_factory; } const FormatFactory & Context::getFormatFactory() const { return shared->format_factory; } InterserverIOHandler & Context::getInterserverIOHandler() { return shared->interserver_io_handler; } Poco::Mutex & Context::getMutex() const { return shared->mutex; } @@ -800,7 +797,7 @@ void Context::initClusters() { Poco::ScopedLock lock(shared->mutex); if (!shared->clusters) - shared->clusters = new Clusters(settings, shared->data_type_factory); + shared->clusters = new Clusters(settings); } Cluster & Context::getCluster(const std::string & cluster_name) diff --git a/dbms/src/Interpreters/InterpreterAlterQuery.cpp b/dbms/src/Interpreters/InterpreterAlterQuery.cpp index adfaf7573ef3b92a813ed05a3dd659a363e12494..c5e140abe0bff2b066e967af7c2cf1f9bd2ca1bf 100644 --- a/dbms/src/Interpreters/InterpreterAlterQuery.cpp +++ b/dbms/src/Interpreters/InterpreterAlterQuery.cpp @@ -11,6 +11,7 @@ #include #include #include +#include #include #include @@ -35,7 +36,7 @@ void InterpreterAlterQuery::execute() AlterCommands alter_commands; PartitionCommands partition_commands; - parseAlter(alter.parameters, context.getDataTypeFactory(), alter_commands, partition_commands); + parseAlter(alter.parameters, alter_commands, partition_commands); for (const PartitionCommand & command : partition_commands) { @@ -71,9 +72,11 @@ void InterpreterAlterQuery::execute() } void InterpreterAlterQuery::parseAlter( - const ASTAlterQuery::ParameterContainer & params_container, const DataTypeFactory & data_type_factory, + const ASTAlterQuery::ParameterContainer & params_container, AlterCommands & out_alter_commands, PartitionCommands & out_partition_commands) { + const DataTypeFactory & data_type_factory = DataTypeFactory::instance(); + for (const auto & params : params_container) { if (params.type == ASTAlterQuery::ADD_COLUMN) diff --git a/dbms/src/Interpreters/InterpreterCreateQuery.cpp b/dbms/src/Interpreters/InterpreterCreateQuery.cpp index ae74e1f1807cde29c49a631f0f7676c036ac38f4..0ba822511b24cd7ea52ef9cd93f8bd50e7403a05 100644 --- a/dbms/src/Interpreters/InterpreterCreateQuery.cpp +++ b/dbms/src/Interpreters/InterpreterCreateQuery.cpp @@ -25,9 +25,11 @@ #include #include #include + #include #include #include +#include namespace DB @@ -272,6 +274,8 @@ InterpreterCreateQuery::ColumnsAndDefaults InterpreterCreateQuery::parseColumns( ASTPtr default_expr_list{new ASTExpressionList}; default_expr_list->children.reserve(column_list_ast.children.size()); + const DataTypeFactory & data_type_factory = DataTypeFactory::instance(); + for (auto & ast : column_list_ast.children) { auto & col_decl = typeid_cast(*ast); @@ -280,7 +284,7 @@ InterpreterCreateQuery::ColumnsAndDefaults InterpreterCreateQuery::parseColumns( { const auto & type_range = col_decl.type->range; columns.emplace_back(col_decl.name, - context.getDataTypeFactory().get({ type_range.first, type_range.second })); + data_type_factory.get({ type_range.first, type_range.second })); } else /// we're creating dummy DataTypeUInt8 in order to prevent the NullPointerException in ExpressionActions diff --git a/dbms/src/Interpreters/InterpreterInsertQuery.cpp b/dbms/src/Interpreters/InterpreterInsertQuery.cpp index 12a216e0764ed3a92e65b70989bc3e03a074d7f6..04ab185b338ac4f94e4095696f944d3ec72b09fd 100644 --- a/dbms/src/Interpreters/InterpreterInsertQuery.cpp +++ b/dbms/src/Interpreters/InterpreterInsertQuery.cpp @@ -114,8 +114,7 @@ void InterpreterInsertQuery::execute(ReadBuffer * remaining_data_istr) BlockInputStreamPtr in{ context.getFormatFactory().getInput( - format, istr, sample, context.getSettings().max_insert_block_size, - context.getDataTypeFactory())}; + format, istr, sample, context.getSettings().max_insert_block_size)}; copyData(*in, *out); } diff --git a/dbms/src/Interpreters/InterpreterSelectQuery.cpp b/dbms/src/Interpreters/InterpreterSelectQuery.cpp index 00430dd88fa23f599dfa6ab99d0ad6176031d8ac..63de3f21ba02a7987f24eb43e5fe76dd979c9d62 100644 --- a/dbms/src/Interpreters/InterpreterSelectQuery.cpp +++ b/dbms/src/Interpreters/InterpreterSelectQuery.cpp @@ -887,7 +887,7 @@ void InterpreterSelectQuery::executeOrder(BlockInputStreams & streams) /// Сливаем сортированные блоки. stream = new MergeSortingBlockInputStream( stream, order_descr, settings.max_block_size, limit, - settings.limits.max_bytes_before_external_sort, context.getTemporaryPath(), context.getDataTypeFactory()); + settings.limits.max_bytes_before_external_sort, context.getTemporaryPath()); } diff --git a/dbms/src/Interpreters/tests/expression_analyzer.cpp b/dbms/src/Interpreters/tests/expression_analyzer.cpp index e63b073e9428d99cd1786be70f436ae963008c84..48a11137fd404c9e1da0c5ccba4c7842b2e75b69 100644 --- a/dbms/src/Interpreters/tests/expression_analyzer.cpp +++ b/dbms/src/Interpreters/tests/expression_analyzer.cpp @@ -3,6 +3,7 @@ #include #include #include +#include int main(int argc, char ** argv) @@ -25,7 +26,7 @@ int main(int argc, char ** argv) { NameAndTypePair col; col.name = argv[i]; - col.type = context.getDataTypeFactory().get(argv[i + 1]); + col.type = DataTypeFactory::instance().get(argv[i + 1]); columns.push_back(col); } diff --git a/dbms/src/Server/TCPHandler.cpp b/dbms/src/Server/TCPHandler.cpp index 1961e0c0b9b7467549e090ea5031f627bd3cf5ee..1b784cd3505a7c71c9716a61efe166e131e2da17 100644 --- a/dbms/src/Server/TCPHandler.cpp +++ b/dbms/src/Server/TCPHandler.cpp @@ -606,7 +606,6 @@ void TCPHandler::initBlockInput() state.block_in = new NativeBlockInputStream( *state.maybe_compressed_in, - query_context.getDataTypeFactory(), client_revision); } } diff --git a/dbms/src/Storages/ColumnsDescription.cpp b/dbms/src/Storages/ColumnsDescription.cpp index 977cc74d31271fbf94966b81cfdafe854cbd8482..49a50d3c2b2463d23813059bfb6179fdffa60e96 100644 --- a/dbms/src/Storages/ColumnsDescription.cpp +++ b/dbms/src/Storages/ColumnsDescription.cpp @@ -1,6 +1,7 @@ #include #include #include +#include namespace DB @@ -50,7 +51,7 @@ String ColumnsDescription::toString() const template <> -ColumnsDescription ColumnsDescription::parse(const String & str, const DataTypeFactory & data_type_factory) +ColumnsDescription ColumnsDescription::parse(const String & str) { ReadBufferFromString buf{str}; @@ -60,6 +61,7 @@ ColumnsDescription ColumnsDescription::parse(const String & str, con assertString(" columns:\n", buf); ParserTernaryOperatorExpression expr_parser; + const DataTypeFactory & data_type_factory = DataTypeFactory::instance(); ColumnsDescription result{}; for (size_t i = 0; i < count; ++i) diff --git a/dbms/src/Storages/MergeTree/MergeTreeData.cpp b/dbms/src/Storages/MergeTree/MergeTreeData.cpp index 7086c6ee88aa83a3c34bbe5bed92ed99e35d43e0..abac777d7a0bc7e492e5f21c928ff97584bf6119 100644 --- a/dbms/src/Storages/MergeTree/MergeTreeData.cpp +++ b/dbms/src/Storages/MergeTree/MergeTreeData.cpp @@ -1037,7 +1037,7 @@ MergeTreeData::MutableDataPartPtr MergeTreeData::loadPartAndFixMetadata(const St MergeTreePartChecker::Settings settings; settings.setIndexGranularity(index_granularity); settings.setRequireColumnFiles(true); - MergeTreePartChecker::checkDataPart(full_path + relative_path, settings, context.getDataTypeFactory(), &part->checksums); + MergeTreePartChecker::checkDataPart(full_path + relative_path, settings, &part->checksums); { WriteBufferFromFile out(full_path + relative_path + "/checksums.txt.tmp", 4096); diff --git a/dbms/src/Storages/MergeTree/MergeTreePartChecker.cpp b/dbms/src/Storages/MergeTree/MergeTreePartChecker.cpp index 5138b9bf457b9130016f7627f8d362aeb5837d40..83300b4a6ac65e3e980fbea46dd5fe9f26c480f6 100644 --- a/dbms/src/Storages/MergeTree/MergeTreePartChecker.cpp +++ b/dbms/src/Storages/MergeTree/MergeTreePartChecker.cpp @@ -249,8 +249,7 @@ static size_t checkColumn(const String & path, const String & name, DataTypePtr } } -void MergeTreePartChecker::checkDataPart(String path, const Settings & settings, const DataTypeFactory & data_type_factory, - MergeTreeData::DataPart::Checksums * out_checksums) +void MergeTreePartChecker::checkDataPart(String path, const Settings & settings, MergeTreeData::DataPart::Checksums * out_checksums) { if (!path.empty() && path.back() != '/') path += "/"; @@ -262,7 +261,7 @@ void MergeTreePartChecker::checkDataPart(String path, const Settings & settings, { ReadBufferFromFile buf(path + "columns.txt"); - columns.readText(buf, data_type_factory); + columns.readText(buf); assertEOF(buf); } @@ -275,12 +274,11 @@ void MergeTreePartChecker::checkDataPart(String path, const Settings & settings, /// Реальные чексуммы по содержимому данных. Их несоответствие checksums_txt будет говорить о битых данных. MergeTreeData::DataPart::Checksums checksums_data; - size_t primary_idx_size; { ReadBufferFromFile file_buf(path + "primary.idx"); HashingReadBuffer hashing_buf(file_buf); - primary_idx_size = hashing_buf.tryIgnore(std::numeric_limits::max()); + size_t primary_idx_size = hashing_buf.tryIgnore(std::numeric_limits::max()); checksums_data.files["primary.idx"] = MergeTreeData::DataPart::Checksums::Checksum(primary_idx_size, hashing_buf.getHash()); } @@ -345,9 +343,9 @@ void MergeTreePartChecker::checkDataPart(String path, const Settings & settings, if (rows == Stream::UNKNOWN) throw Exception("No columns", ErrorCodes::EMPTY_LIST_OF_COLUMNS_PASSED); - if (primary_idx_size % ((rows - 1) / settings.index_granularity + 1)) +/* if (primary_idx_size % ((rows - 1) / settings.index_granularity + 1)) throw Exception("primary.idx size (" + toString(primary_idx_size) + ") not divisible by number of marks (" - + toString(rows) + "/" + toString(settings.index_granularity) + " rounded up)", ErrorCodes::CORRUPTED_DATA); + + toString(rows) + "/" + toString(settings.index_granularity) + " rounded up)", ErrorCodes::CORRUPTED_DATA);*/ if (settings.require_checksums || !checksums_txt.files.empty()) checksums_txt.checkEqual(checksums_data, true); diff --git a/dbms/src/Storages/StorageReplicatedMergeTree.cpp b/dbms/src/Storages/StorageReplicatedMergeTree.cpp index 294b61651fc7635585b9704e82d5f4b5d0af94f6..c03eefa60cbc2cd955a05a742ed337bed88d7e33 100644 --- a/dbms/src/Storages/StorageReplicatedMergeTree.cpp +++ b/dbms/src/Storages/StorageReplicatedMergeTree.cpp @@ -268,8 +268,7 @@ void StorageReplicatedMergeTree::checkTableStructure(bool skip_sanity_checks, bo assertEOF(buf); zkutil::Stat stat; - auto columns_desc = ColumnsDescription::parse( - zookeeper->get(zookeeper_path + "/columns", &stat), context.getDataTypeFactory()); + auto columns_desc = ColumnsDescription::parse(zookeeper->get(zookeeper_path + "/columns", &stat)); auto & columns = columns_desc.columns; auto & materialized_columns = columns_desc.materialized; @@ -1459,7 +1458,7 @@ void StorageReplicatedMergeTree::alterThread() zkutil::Stat stat; const String columns_str = zookeeper->get(zookeeper_path + "/columns", &stat, alter_thread_event); - auto columns_desc = ColumnsDescription::parse(columns_str, context.getDataTypeFactory()); + auto columns_desc = ColumnsDescription::parse(columns_str); auto & columns = columns_desc.columns; auto & materialized_columns = columns_desc.materialized; @@ -1809,7 +1808,7 @@ void StorageReplicatedMergeTree::partCheckThread() zk_checksums.checkEqual(part->checksums, true); auto zk_columns = NamesAndTypesList::parse( - zookeeper->get(replica_path + "/parts/" + part_name + "/columns"), context.getDataTypeFactory()); + zookeeper->get(replica_path + "/parts/" + part_name + "/columns")); if (part->columns != zk_columns) throw Exception("Columns of local part " + part_name + " are different from ZooKeeper"); @@ -1818,7 +1817,7 @@ void StorageReplicatedMergeTree::partCheckThread() settings.setRequireChecksums(true); settings.setRequireColumnFiles(true); MergeTreePartChecker::checkDataPart( - data.getFullPath() + part_name, settings, context.getDataTypeFactory()); + data.getFullPath() + part_name, settings); LOG_INFO(log, "Part " << part_name << " looks good."); } diff --git a/dbms/src/Storages/StorageSet.cpp b/dbms/src/Storages/StorageSet.cpp index f92e13e0fdb77088253b060556805ee746c88033..8923c40dcd400ed45918e2e568370ee4f41f4221 100644 --- a/dbms/src/Storages/StorageSet.cpp +++ b/dbms/src/Storages/StorageSet.cpp @@ -87,8 +87,6 @@ void StorageSetOrJoinBase::restore() constexpr auto file_suffix = ".bin"; constexpr auto file_suffix_size = strlen(file_suffix); - DataTypeFactory data_type_factory; - Poco::DirectoryIterator dir_end; for (Poco::DirectoryIterator dir_it(path); dir_end != dir_it; ++dir_it) { @@ -104,17 +102,17 @@ void StorageSetOrJoinBase::restore() if (file_num > increment) increment = file_num; - restoreFromFile(dir_it->path(), data_type_factory); + restoreFromFile(dir_it->path()); } } } -void StorageSetOrJoinBase::restoreFromFile(const String & file_path, const DataTypeFactory & data_type_factory) +void StorageSetOrJoinBase::restoreFromFile(const String & file_path) { ReadBufferFromFile backup_buf(file_path); CompressedReadBuffer compressed_backup_buf(backup_buf); - NativeBlockInputStream backup_stream(compressed_backup_buf, data_type_factory); + NativeBlockInputStream backup_stream(compressed_backup_buf); backup_stream.readPrefix(); while (Block block = backup_stream.read()) diff --git a/dbms/src/Storages/tests/part_checker.cpp b/dbms/src/Storages/tests/part_checker.cpp index 2d58812030ccb96b817590bf16dee673c4b0aeb8..57a66f3dbd587245ce5e2ed2eee2efffe4a6502c 100644 --- a/dbms/src/Storages/tests/part_checker.cpp +++ b/dbms/src/Storages/tests/part_checker.cpp @@ -23,7 +23,7 @@ int main(int argc, char ** argv) settings.setRequireColumnFiles(argv[2][0] == '1'); settings.setVerbose(true); - DB::MergeTreePartChecker::checkDataPart(argv[1], settings, DB::DataTypeFactory()); + DB::MergeTreePartChecker::checkDataPart(argv[1], settings); } catch (...) {