diff --git a/dbms/src/Databases/DatabaseDictionary.cpp b/dbms/src/Databases/DatabaseDictionary.cpp index 2979c6f83e7808165cfe641f6a236a3f88cde3dc..db15e700093315da5a052e1f910e1d648aab24ea 100644 --- a/dbms/src/Databases/DatabaseDictionary.cpp +++ b/dbms/src/Databases/DatabaseDictionary.cpp @@ -171,4 +171,9 @@ void DatabaseDictionary::drop() /// Additional actions to delete database are not required. } +String DatabaseDictionary::getDataPath(const Context & context) const +{ + return {}; +} + } diff --git a/dbms/src/Databases/DatabaseDictionary.h b/dbms/src/Databases/DatabaseDictionary.h index 0aeba8db629ec8c8bbebf97c5d5f6ff85b7cc385..d6a8944fa0dd696c6e5a017018ef7a06880db271 100644 --- a/dbms/src/Databases/DatabaseDictionary.h +++ b/dbms/src/Databases/DatabaseDictionary.h @@ -93,6 +93,8 @@ public: const Context & context, const String & table_name) const override; + String getDataPath(const Context & context) const override; + void shutdown() override; void drop() override; }; diff --git a/dbms/src/Databases/DatabaseFactory.cpp b/dbms/src/Databases/DatabaseFactory.cpp index c9259642e03e660077274d948dc780c91d26248d..f9976de9029ebe7edd77aaaf955a171b7f39ab36 100644 --- a/dbms/src/Databases/DatabaseFactory.cpp +++ b/dbms/src/Databases/DatabaseFactory.cpp @@ -15,11 +15,11 @@ namespace ErrorCodes DatabasePtr DatabaseFactory::get( const String & engine_name, const String & database_name, - const String & path, + const String & metadata_path, Context & context) { if (engine_name == "Ordinary") - return std::make_shared(database_name, path); + return std::make_shared(database_name, metadata_path, context); else if (engine_name == "Memory") return std::make_shared(database_name); else if (engine_name == "Dictionary") diff --git a/dbms/src/Databases/DatabaseFactory.h b/dbms/src/Databases/DatabaseFactory.h index 5e8d02ed13805c3eea1650fc0a18fbe28312b6af..00265a2454b20d60f33deeded8904baae50d57ee 100644 --- a/dbms/src/Databases/DatabaseFactory.h +++ b/dbms/src/Databases/DatabaseFactory.h @@ -13,7 +13,7 @@ public: static DatabasePtr get( const String & engine_name, const String & database_name, - const String & path, + const String & metadata_path, Context & context); }; diff --git a/dbms/src/Databases/DatabaseMemory.cpp b/dbms/src/Databases/DatabaseMemory.cpp index 5c2adeddbb753e298b278a34556c1fd4e9b988a0..2528aba32ca61920f5c5d9faac7d25088e7cbacd 100644 --- a/dbms/src/Databases/DatabaseMemory.cpp +++ b/dbms/src/Databases/DatabaseMemory.cpp @@ -152,4 +152,9 @@ void DatabaseMemory::drop() /// Additional actions to delete database are not required. } +String DatabaseMemory::getDataPath(const Context & context) const +{ + return {}; +} + } diff --git a/dbms/src/Databases/DatabaseMemory.h b/dbms/src/Databases/DatabaseMemory.h index 662d8d0b6ae0e28c3e5cdf9e51e10bb5413fd3e9..ef8207c86bf0e7bea139d651c13f41d919aca5a2 100644 --- a/dbms/src/Databases/DatabaseMemory.h +++ b/dbms/src/Databases/DatabaseMemory.h @@ -84,6 +84,8 @@ public: const Context & context, const String & table_name) const override; + String getDataPath(const Context & context) const override; + void shutdown() override; void drop() override; }; diff --git a/dbms/src/Databases/DatabaseOrdinary.cpp b/dbms/src/Databases/DatabaseOrdinary.cpp index cdca40cc7871d6c6ef64959ebbbc08c81c60f879..6c038259d45c8b9131e5440697196110f4f8f03a 100644 --- a/dbms/src/Databases/DatabaseOrdinary.cpp +++ b/dbms/src/Databases/DatabaseOrdinary.cpp @@ -90,10 +90,11 @@ static void loadTable( } -DatabaseOrdinary::DatabaseOrdinary( - const String & name_, const String & path_) - : DatabaseMemory(name_), path(path_) +DatabaseOrdinary::DatabaseOrdinary(const String & name_, const String & metadata_path, const Context & context) + : DatabaseMemory(name_), metadata_path(metadata_path) { + data_path = context.getPath() + "data/" + escapeForFileName(name) + "/"; + Poco::File(data_path).createDirectory(); } @@ -108,7 +109,7 @@ void DatabaseOrdinary::loadTables( FileNames file_names; Poco::DirectoryIterator dir_end; - for (Poco::DirectoryIterator dir_it(path); dir_it != dir_end; ++dir_it) + for (Poco::DirectoryIterator dir_it(metadata_path); dir_it != dir_end; ++dir_it) { /// For '.svn', '.gitignore' directory and similar. if (dir_it.name().at(0) == '.') @@ -130,7 +131,7 @@ void DatabaseOrdinary::loadTables( if (endsWith(dir_it.name(), ".sql")) file_names.push_back(dir_it.name()); else - throw Exception("Incorrect file extension: " + dir_it.name() + " in metadata directory " + path, + throw Exception("Incorrect file extension: " + dir_it.name() + " in metadata directory " + metadata_path, ErrorCodes::INCORRECT_FILE_NAME); } @@ -162,7 +163,7 @@ void DatabaseOrdinary::loadTables( watch.restart(); } - loadTable(context, path, *this, name, data_path, table, has_force_restore_data_flag); + loadTable(context, metadata_path, *this, name, data_path, table, has_force_restore_data_flag); } }; @@ -269,7 +270,7 @@ void DatabaseOrdinary::createTable( throw Exception("Table " + name + "." + table_name + " already exists.", ErrorCodes::TABLE_ALREADY_EXISTS); } - String table_metadata_path = getTableMetadataPath(path, table_name); + String table_metadata_path = getTableMetadataPath(metadata_path, table_name); String table_metadata_tmp_path = table_metadata_path + ".tmp"; String statement; @@ -312,7 +313,7 @@ void DatabaseOrdinary::removeTable( { StoragePtr res = detachTable(table_name); - String table_metadata_path = getTableMetadataPath(path, table_name); + String table_metadata_path = getTableMetadataPath(metadata_path, table_name); try { @@ -374,7 +375,7 @@ void DatabaseOrdinary::renameTable( throw Exception{e}; } - ASTPtr ast = getCreateQueryImpl(path, table_name); + ASTPtr ast = getCreateQueryImpl(metadata_path, table_name); ASTCreateQuery & ast_create_query = typeid_cast(*ast); ast_create_query.table = to_table_name; @@ -388,7 +389,7 @@ time_t DatabaseOrdinary::getTableMetadataModificationTime( const Context & context, const String & table_name) { - String table_metadata_path = getTableMetadataPath(path, table_name); + String table_metadata_path = getTableMetadataPath(metadata_path, table_name); Poco::File meta_file(table_metadata_path); if (meta_file.exists()) @@ -406,7 +407,7 @@ ASTPtr DatabaseOrdinary::getCreateQuery( const Context & context, const String & table_name) const { - ASTPtr ast = getCreateQueryImpl(path, table_name); + ASTPtr ast = getCreateQueryImpl(metadata_path, table_name); ASTCreateQuery & ast_create_query = typeid_cast(*ast); ast_create_query.attach = false; @@ -454,8 +455,8 @@ void DatabaseOrdinary::alterTable( /// Read the definition of the table and replace the necessary parts with new ones. String table_name_escaped = escapeForFileName(name); - String table_metadata_tmp_path = path + "/" + table_name_escaped + ".sql.tmp"; - String table_metadata_path = path + "/" + table_name_escaped + ".sql"; + String table_metadata_tmp_path = metadata_path + "/" + table_name_escaped + ".sql.tmp"; + String table_metadata_path = metadata_path + "/" + table_name_escaped + ".sql"; String statement; { @@ -498,4 +499,9 @@ void DatabaseOrdinary::alterTable( } } +String DatabaseOrdinary::getDataPath(const Context & context) const +{ + return data_path; +} + } diff --git a/dbms/src/Databases/DatabaseOrdinary.h b/dbms/src/Databases/DatabaseOrdinary.h index 323e012b2691c14e1f3f484cb74bf94334044573..983725813734c9476edd6096681cbe9822f4c3d1 100644 --- a/dbms/src/Databases/DatabaseOrdinary.h +++ b/dbms/src/Databases/DatabaseOrdinary.h @@ -13,10 +13,11 @@ namespace DB class DatabaseOrdinary : public DatabaseMemory { protected: - const String path; + const String metadata_path; + String data_path; public: - DatabaseOrdinary(const String & name_, const String & path_); + DatabaseOrdinary(const String & name_, const String & metadata_path, const Context & context); String getEngineName() const override { return "Ordinary"; } @@ -58,6 +59,8 @@ public: const Context & context, const String & table_name) const override; + String getDataPath(const Context & context) const override; + void shutdown() override; void drop() override; diff --git a/dbms/src/Databases/IDatabase.h b/dbms/src/Databases/IDatabase.h index f1c3b90f6aa2a27b094b7be7dc45187fc1ac9aeb..3f30e83dfcbcd199ef918041888368f2c415da20 100644 --- a/dbms/src/Databases/IDatabase.h +++ b/dbms/src/Databases/IDatabase.h @@ -129,6 +129,9 @@ public: const Context & context, const String & name) const = 0; + /// Returns path for persistent data storage if the database supports it, empty string otherwise + virtual String getDataPath(const Context & context) const = 0; + /// Ask all tables to complete the background threads they are using and delete all table objects. virtual void shutdown() = 0; diff --git a/dbms/src/Interpreters/Cluster.cpp b/dbms/src/Interpreters/Cluster.cpp index 4d3b28a8caabbdef725c34be2bf55b773dbb2384..18126cb7bebf6e9552ca23512152983a0ded38e2 100644 --- a/dbms/src/Interpreters/Cluster.cpp +++ b/dbms/src/Interpreters/Cluster.cpp @@ -151,6 +151,13 @@ ClusterPtr Clusters::getCluster(const std::string & cluster_name) const } +void Clusters::setCluster(const String & cluster_name, const std::shared_ptr & cluster) +{ + std::lock_guard lock(mutex); + impl[cluster_name] = cluster; +} + + void Clusters::updateClusters(Poco::Util::AbstractConfiguration & config, const Settings & settings, const String & config_name) { Poco::Util::AbstractConfiguration::Keys config_keys; @@ -180,6 +187,7 @@ Clusters::Impl Clusters::getContainer() const return impl; } + /// Implementation of `Cluster` class Cluster::Cluster(Poco::Util::AbstractConfiguration & config, const Settings & settings, const String & cluster_name) diff --git a/dbms/src/Interpreters/Cluster.h b/dbms/src/Interpreters/Cluster.h index fd18d52303319015196346b50a51746bbd69f4f4..7d28ad48e8f389e47f0de252c3330679da3d6c07 100644 --- a/dbms/src/Interpreters/Cluster.h +++ b/dbms/src/Interpreters/Cluster.h @@ -172,6 +172,7 @@ public: Clusters & operator=(const Clusters &) = delete; ClusterPtr getCluster(const std::string & cluster_name) const; + void setCluster(const String & cluster_name, const ClusterPtr & cluster); void updateClusters(Poco::Util::AbstractConfiguration & config, const Settings & settings, const String & config_name); diff --git a/dbms/src/Interpreters/Context.cpp b/dbms/src/Interpreters/Context.cpp index 548ea313e9fc0faeb99f294d67744008323a2f56..cd429dba59b4181e7bb84bc280c0223958ad079b 100644 --- a/dbms/src/Interpreters/Context.cpp +++ b/dbms/src/Interpreters/Context.cpp @@ -878,7 +878,7 @@ ASTPtr Context::getCreateQuery(const String & database_name, const String & tabl auto lock = getLock(); String db = resolveDatabase(database_name, current_database); - assertTableExists(db, table_name); + assertDatabaseExists(db); return shared->databases[db]->getCreateQuery(*this, table_name); } @@ -1359,6 +1359,17 @@ void Context::setClustersConfig(const ConfigurationPtr & config, const String & } +void Context::setCluster(const String & cluster_name, const std::shared_ptr & cluster) +{ + std::lock_guard lock(shared->clusters_mutex); + + if (!shared->clusters) + throw Exception("Clusters are not set", ErrorCodes::LOGICAL_ERROR); + + shared->clusters->setCluster(cluster_name, cluster); +} + + Compiler & Context::getCompiler() { auto lock = getLock(); diff --git a/dbms/src/Interpreters/Context.h b/dbms/src/Interpreters/Context.h index 683cd9d03d5a985dd41f2e7c22402c247c7686b3..bebf0a5bd0cc80f20f61274dbd4e9d3bca7bd7e5 100644 --- a/dbms/src/Interpreters/Context.h +++ b/dbms/src/Interpreters/Context.h @@ -319,6 +319,8 @@ public: std::shared_ptr getCluster(const std::string & cluster_name) const; std::shared_ptr tryGetCluster(const std::string & cluster_name) const; void setClustersConfig(const ConfigurationPtr & config, const String & config_name = "remote_servers"); + /// Sets custom cluster, but doesn't update configuration + void setCluster(const String & cluster_name, const std::shared_ptr & cluster); Compiler & getCompiler(); QueryLog & getQueryLog(); diff --git a/dbms/src/Interpreters/InterpreterCreateQuery.cpp b/dbms/src/Interpreters/InterpreterCreateQuery.cpp index 4cbf6b0864c9839fe50c2248be892c40d76efbfb..ede0e5d28f856ca33ecb4cf00a141d2967401c81 100644 --- a/dbms/src/Interpreters/InterpreterCreateQuery.cpp +++ b/dbms/src/Interpreters/InterpreterCreateQuery.cpp @@ -103,13 +103,10 @@ BlockIO InterpreterCreateQuery::createDatabase(ASTCreateQuery & create) String database_name_escaped = escapeForFileName(database_name); - /// Create directories for tables data and metadata. + /// Create directories for tables metadata. String path = context.getPath(); - String data_path = path + "data/" + database_name_escaped + "/"; String metadata_path = path + "metadata/" + database_name_escaped + "/"; - Poco::File(metadata_path).createDirectory(); - Poco::File(data_path).createDirectory(); DatabasePtr database = DatabaseFactory::get(database_engine_name, database_name, metadata_path, context); @@ -463,13 +460,9 @@ BlockIO InterpreterCreateQuery::createTable(ASTCreateQuery & create) String current_database = context.getCurrentDatabase(); String database_name = create.database.empty() ? current_database : create.database; - String database_name_escaped = escapeForFileName(database_name); String table_name = create.table; String table_name_escaped = escapeForFileName(table_name); - String data_path = path + "data/" + database_name_escaped + "/"; - String metadata_path = path + "metadata/" + database_name_escaped + "/" + table_name_escaped + ".sql"; - // If this is a stub ATTACH query, read the query definition from the database if (create.attach && !create.storage && !create.columns) { @@ -515,9 +508,13 @@ BlockIO InterpreterCreateQuery::createTable(ASTCreateQuery & create) { std::unique_ptr guard; + String data_path; + DatabasePtr database; + if (!create.is_temporary) { - context.assertDatabaseExists(database_name); + database = context.getDatabase(database_name); + data_path = database->getDataPath(context); /** If the table already exists, and the request specifies IF NOT EXISTS, * then we allow concurrent CREATE queries (which do nothing). @@ -544,7 +541,7 @@ BlockIO InterpreterCreateQuery::createTable(ASTCreateQuery & create) if (create.is_temporary) context.getSessionContext().addExternalTable(table_name, res); else - context.getDatabase(database_name)->createTable(context, table_name, res, query_ptr); + database->createTable(context, table_name, res, query_ptr); } res->startup(); diff --git a/dbms/src/Interpreters/loadMetadata.cpp b/dbms/src/Interpreters/loadMetadata.cpp index 80b670f4c3f0f8292335e68349232449611acaf2..7042b7eae874e5a2097b4c884ab1c01885727a9a 100644 --- a/dbms/src/Interpreters/loadMetadata.cpp +++ b/dbms/src/Interpreters/loadMetadata.cpp @@ -135,7 +135,7 @@ void loadMetadataSystem(Context & context) Poco::File(global_path + "data/" SYSTEM_DATABASE).createDirectories(); Poco::File(global_path + "metadata/" SYSTEM_DATABASE).createDirectories(); - auto system_database = std::make_shared(SYSTEM_DATABASE, global_path + "metadata/" SYSTEM_DATABASE); + auto system_database = std::make_shared(SYSTEM_DATABASE, global_path + "metadata/" SYSTEM_DATABASE, context); context.addDatabase(SYSTEM_DATABASE, system_database); } diff --git a/dbms/src/Storages/StorageDistributed.cpp b/dbms/src/Storages/StorageDistributed.cpp index 995a70e1b7da8ad7391c87c26ca60041784a2edc..feb539972ee09c164b750bf4cb594c9440c16c92 100644 --- a/dbms/src/Storages/StorageDistributed.cpp +++ b/dbms/src/Storages/StorageDistributed.cpp @@ -51,6 +51,8 @@ namespace ErrorCodes extern const int RESHARDING_NO_WORKER; extern const int RESHARDING_INVALID_PARAMETERS; extern const int RESHARDING_INITIATOR_CHECK_FAILED; + extern const int BAD_ARGUMENTS; + extern const int READONLY; } @@ -223,16 +225,24 @@ BlockInputStreams StorageDistributed::read( BlockOutputStreamPtr StorageDistributed::write(const ASTPtr & query, const Settings & settings) { - auto cluster = context.getCluster(cluster_name); + if (owned_cluster && context.getApplicationType() != Context::ApplicationType::LOCAL) + throw Exception( + "Method write is not supported by storage " + getName() + + " created via a table function", ErrorCodes::READONLY); - /// TODO: !path.empty() can be replaced by !owned_cluster or !cluster_name.empty() ? - bool write_enabled = !path.empty() && (((cluster->getLocalShardCount() + cluster->getRemoteShardCount()) < 2) || has_sharding_key); + auto cluster = (owned_cluster) ? owned_cluster : context.getCluster(cluster_name); - if (!write_enabled) - throw Exception{ + bool is_sharding_key_ok = has_sharding_key || ((cluster->getLocalShardCount() + cluster->getRemoteShardCount()) < 2); + if (!is_sharding_key_ok) + throw Exception( "Method write is not supported by storage " + getName() + " with more than one shard and no sharding key provided", - ErrorCodes::STORAGE_REQUIRES_PARAMETER}; + ErrorCodes::STORAGE_REQUIRES_PARAMETER); + + if (path.empty() && !settings.insert_distributed_sync.value) + throw Exception( + "Data path should be set for storage " + getName() + + " to enable asynchronous inserts", ErrorCodes::BAD_ARGUMENTS); /// DistributedBlockOutputStream will not own cluster, but will own ConnectionPools of the cluster return std::make_shared( diff --git a/dbms/src/Storages/StorageFactory.cpp b/dbms/src/Storages/StorageFactory.cpp index 5450075fcabfb7a881980eed397bd712cc465cfd..398139ce3a4232ebe32e346278807608a4ce98dc 100644 --- a/dbms/src/Storages/StorageFactory.cpp +++ b/dbms/src/Storages/StorageFactory.cpp @@ -607,7 +607,7 @@ StoragePtr StorageFactory::get( args[0] = evaluateConstantExpressionOrIdentifierAsLiteral(args[0], local_context); args[1] = evaluateConstantExpressionAsLiteral(args[1], local_context); - String source_database = static_cast(*args[0]).value.safeGet(); + String source_database = static_cast(*args[0]).value.safeGet(); String table_name_regexp = static_cast(*args[1]).value.safeGet(); return StorageMerge::create( @@ -640,7 +640,7 @@ StoragePtr StorageFactory::get( args[1] = evaluateConstantExpressionOrIdentifierAsLiteral(args[1], local_context); args[2] = evaluateConstantExpressionOrIdentifierAsLiteral(args[2], local_context); - String remote_database = static_cast(*args[1]).value.safeGet(); + String remote_database = static_cast(*args[1]).value.safeGet(); String remote_table = static_cast(*args[2]).value.safeGet(); const auto & sharding_key = args.size() == 4 ? args[3] : nullptr; @@ -686,7 +686,7 @@ StoragePtr StorageFactory::get( args[1] = evaluateConstantExpressionOrIdentifierAsLiteral(args[1], local_context); String destination_database = static_cast(*args[0]).value.safeGet(); - String destination_table = static_cast(*args[1]).value.safeGet(); + String destination_table = static_cast(*args[1]).value.safeGet(); UInt64 num_buckets = applyVisitor(FieldVisitorConvertToNumber(), typeid_cast(*args[2]).value); diff --git a/dbms/src/Storages/StorageFile.cpp b/dbms/src/Storages/StorageFile.cpp index febd73f83e9f097fa50bbb55a4762716b493e7f8..a7fff1a3ef4bbe643d3b0fdf904d749bd7ed43a0 100644 --- a/dbms/src/Storages/StorageFile.cpp +++ b/dbms/src/Storages/StorageFile.cpp @@ -20,6 +20,8 @@ namespace ErrorCodes extern const int CANNOT_WRITE_TO_FILE_DESCRIPTOR; extern const int CANNOT_SEEK_THROUGH_FILE; extern const int DATABASE_ACCESS_DENIED; + extern const int INCORRECT_FILE_NAME; + extern const int EMPTY_LIST_OF_COLUMNS_PASSED; }; @@ -49,6 +51,9 @@ StorageFile::StorageFile( : IStorage(materialized_columns_, alias_columns_, column_defaults_), table_name(table_name_), format_name(format_name_), columns(columns_), context_global(context_), table_fd(table_fd_) { + if (columns->empty()) + throw Exception("Empty list of columns passed to storage " + getName() + " constructor", ErrorCodes::EMPTY_LIST_OF_COLUMNS_PASSED); + if (table_fd < 0) /// Will use file { use_table_fd = false; @@ -61,6 +66,9 @@ StorageFile::StorageFile( } else /// Is DB's file { + if (db_dir_path.empty()) + throw Exception("Storage " + getName() + " requires data path", ErrorCodes::INCORRECT_FILE_NAME); + path = getTablePath(db_dir_path, table_name, format_name); is_db_table = true; Poco::File(Poco::Path(path).parent()).createDirectories(); diff --git a/dbms/src/Storages/StorageLog.cpp b/dbms/src/Storages/StorageLog.cpp index 93af0b9a8c4bec8fe7b2d8bacfd34763d9167dfc..495bbbecb3ea4e5c6404e19380e9f7d886994aa4 100644 --- a/dbms/src/Storages/StorageLog.cpp +++ b/dbms/src/Storages/StorageLog.cpp @@ -46,6 +46,7 @@ namespace ErrorCodes extern const int EMPTY_LIST_OF_COLUMNS_PASSED; extern const int DUPLICATE_COLUMN; extern const int SIZES_OF_MARKS_FILES_ARE_INCONSISTENT; + extern const int INCORRECT_FILE_NAME; } @@ -579,6 +580,9 @@ StorageLog::StorageLog( if (columns->empty()) throw Exception("Empty list of columns passed to StorageLog constructor", ErrorCodes::EMPTY_LIST_OF_COLUMNS_PASSED); + if (path.empty()) + throw Exception("Storage " + getName() + " requires data path", ErrorCodes::INCORRECT_FILE_NAME); + /// create files if they do not exist Poco::File(path + escapeForFileName(name) + '/').createDirectories(); diff --git a/dbms/src/Storages/StorageMergeTree.cpp b/dbms/src/Storages/StorageMergeTree.cpp index 035d01de38db7942d90020c5f4dd21b3a4b03f3b..1e49881078b12909f66fc21228514530a65dbe58 100644 --- a/dbms/src/Storages/StorageMergeTree.cpp +++ b/dbms/src/Storages/StorageMergeTree.cpp @@ -27,6 +27,7 @@ namespace ErrorCodes extern const int ABORTED; extern const int BAD_ARGUMENTS; extern const int INCORRECT_DATA; + extern const int INCORRECT_FILE_NAME; } @@ -59,6 +60,9 @@ StorageMergeTree::StorageMergeTree( reader(data), writer(data), merger(data, context.getBackgroundPool()), log(&Logger::get(database_name_ + "." + table_name + " (StorageMergeTree)")) { + if (path_.empty()) + throw Exception("MergeTree storages require data path", ErrorCodes::INCORRECT_FILE_NAME); + data.loadDataParts(has_force_restore_data_flag); if (!attach) diff --git a/dbms/src/Storages/StorageReplicatedMergeTree.cpp b/dbms/src/Storages/StorageReplicatedMergeTree.cpp index 8e74572143467027a4cf9023f78f1259b0eaae32..87f5b9f274b4d4340d1e8ae135c4b2cf996ec1ae 100644 --- a/dbms/src/Storages/StorageReplicatedMergeTree.cpp +++ b/dbms/src/Storages/StorageReplicatedMergeTree.cpp @@ -109,6 +109,8 @@ namespace ErrorCodes extern const int RECEIVED_ERROR_TOO_MANY_REQUESTS; extern const int TOO_MUCH_FETCHES; extern const int BAD_DATA_PART_NAME; + extern const int PART_IS_TEMPORARILY_LOCKED; + extern const int INCORRECT_FILE_NAME; } @@ -209,6 +211,9 @@ StorageReplicatedMergeTree::StorageReplicatedMergeTree( shutdown_event(false), part_check_thread(*this), log(&Logger::get(database_name + "." + table_name + " (StorageReplicatedMergeTree)")) { + if (path_.empty()) + throw Exception("ReplicatedMergeTree storages require data path", ErrorCodes::INCORRECT_FILE_NAME); + if (!zookeeper_path.empty() && zookeeper_path.back() == '/') zookeeper_path.resize(zookeeper_path.size() - 1); replica_path = zookeeper_path + "/replicas/" + replica_name; diff --git a/dbms/src/Storages/StorageSet.cpp b/dbms/src/Storages/StorageSet.cpp index 063e40ecef1c329e46e4dfbb361df96e2daa202e..179f9c118f645cf33255c727bd3ecdcc01a81d3b 100644 --- a/dbms/src/Storages/StorageSet.cpp +++ b/dbms/src/Storages/StorageSet.cpp @@ -15,6 +15,12 @@ namespace DB { +namespace ErrorCodes +{ + extern const int INCORRECT_FILE_NAME; +} + + class SetOrJoinBlockOutputStream : public IBlockOutputStream { public: @@ -81,8 +87,12 @@ StorageSetOrJoinBase::StorageSetOrJoinBase( const NamesAndTypesList & alias_columns_, const ColumnDefaults & column_defaults_) : IStorage{materialized_columns_, alias_columns_, column_defaults_}, - path(path_ + escapeForFileName(name_) + '/'), name(name_), columns(columns_) + name(name_), columns(columns_) { + if (path_.empty()) + throw Exception("Join and Set storages require data path", ErrorCodes::INCORRECT_FILE_NAME); + + path = path_ + escapeForFileName(name_) + '/'; } diff --git a/dbms/src/Storages/StorageStripeLog.cpp b/dbms/src/Storages/StorageStripeLog.cpp index 3442eed79669d7b0f5ce3d4bdf72f2117fdcff51..97b8193fd4794086bb0b2f6372b64aab042e5350 100644 --- a/dbms/src/Storages/StorageStripeLog.cpp +++ b/dbms/src/Storages/StorageStripeLog.cpp @@ -37,6 +37,7 @@ namespace ErrorCodes { extern const int EMPTY_LIST_OF_COLUMNS_PASSED; extern const int CANNOT_CREATE_DIRECTORY; + extern const int INCORRECT_FILE_NAME; } @@ -190,6 +191,9 @@ StorageStripeLog::StorageStripeLog( if (columns->empty()) throw Exception("Empty list of columns passed to StorageStripeLog constructor", ErrorCodes::EMPTY_LIST_OF_COLUMNS_PASSED); + if (path.empty()) + throw Exception("Storage " + getName() + " requires data path", ErrorCodes::INCORRECT_FILE_NAME); + String full_path = path + escapeForFileName(name) + '/'; if (!attach) { diff --git a/dbms/src/Storages/StorageTinyLog.cpp b/dbms/src/Storages/StorageTinyLog.cpp index a7422918b0a218e0ff055cdcd4b45f087fcc094a..993338a718285ee308c22d984e216905fdac4e34 100644 --- a/dbms/src/Storages/StorageTinyLog.cpp +++ b/dbms/src/Storages/StorageTinyLog.cpp @@ -49,6 +49,7 @@ namespace ErrorCodes extern const int CANNOT_READ_ALL_DATA; extern const int DUPLICATE_COLUMN; extern const int LOGICAL_ERROR; + extern const int INCORRECT_FILE_NAME; } @@ -454,6 +455,9 @@ StorageTinyLog::StorageTinyLog( if (columns->empty()) throw Exception("Empty list of columns passed to StorageTinyLog constructor", ErrorCodes::EMPTY_LIST_OF_COLUMNS_PASSED); + if (path.empty()) + throw Exception("Storage " + getName() + " requires data path", ErrorCodes::INCORRECT_FILE_NAME); + String full_path = path + escapeForFileName(name) + '/'; if (!attach) {