未验证 提交 1e261654 编写于 作者: V Vladimir Chebotarev 提交者: GitHub

Added `allow_merges` option for volumes in multi-disk configuration (#13402)

上级 8f1ba521
......@@ -503,6 +503,7 @@ namespace ErrorCodes
extern const int CANNOT_RESTORE_FROM_FIELD_DUMP = 536;
extern const int ILLEGAL_MYSQL_VARIABLE = 537;
extern const int MYSQL_SYNTAX_ERROR = 538;
extern const int INVALID_RAID_TYPE = 539;
extern const int KEEPER_EXCEPTION = 999;
extern const int POCO_EXCEPTION = 1000;
......
......@@ -23,8 +23,11 @@ public:
DiskSelector(const Poco::Util::AbstractConfiguration & config, const String & config_prefix, const Context & context);
DiskSelector(const DiskSelector & from) : disks(from.disks) { }
DiskSelectorPtr
updateFromConfig(const Poco::Util::AbstractConfiguration & config, const String & config_prefix, const Context & context) const;
DiskSelectorPtr updateFromConfig(
const Poco::Util::AbstractConfiguration & config,
const String & config_prefix,
const Context & context
) const;
/// Get disk by name
DiskPtr get(const String & name) const;
......
......@@ -9,7 +9,7 @@ namespace DB
{
namespace ErrorCodes
{
extern const int EXCESSIVE_ELEMENT_IN_CONFIG;
extern const int NO_ELEMENTS_IN_CONFIG;
extern const int INCONSISTENT_RESERVATIONS;
extern const int NO_RESERVATIONS_PROVIDED;
extern const int UNKNOWN_VOLUME_TYPE;
......@@ -51,7 +51,7 @@ IVolume::IVolume(
}
if (disks.empty())
throw Exception("Volume must contain at least one disk.", ErrorCodes::EXCESSIVE_ELEMENT_IN_CONFIG);
throw Exception("Volume must contain at least one disk", ErrorCodes::NO_ELEMENTS_IN_CONFIG);
}
UInt64 IVolume::getMaxUnreservedFreeSpace() const
......
......@@ -63,6 +63,10 @@ public:
virtual DiskPtr getDisk(size_t i) const { return disks[i]; }
const Disks & getDisks() const { return disks; }
virtual bool areMergesAllowed() const { return true; }
virtual void setAllowMergesFromQuery(bool /*allow*/) {}
protected:
Disks disks;
const String name;
......
......@@ -8,7 +8,7 @@ namespace DB
class SingleDiskVolume : public IVolume
{
public:
SingleDiskVolume(const String & name_, DiskPtr disk): IVolume(name_, {disk})
SingleDiskVolume(const String & name_, DiskPtr disk, size_t max_data_part_size_): IVolume(name_, {disk}, max_data_part_size_)
{
}
......
......@@ -11,6 +11,13 @@
#include <Poco/File.h>
namespace
{
const auto DEFAULT_STORAGE_POLICY_NAME = "default";
const auto DEFAULT_VOLUME_NAME = "default";
const auto DEFAULT_DISK_NAME = "default";
}
namespace DB
{
......@@ -18,6 +25,7 @@ namespace ErrorCodes
{
extern const int BAD_ARGUMENTS;
extern const int EXCESSIVE_ELEMENT_IN_CONFIG;
extern const int NO_ELEMENTS_IN_CONFIG;
extern const int UNKNOWN_DISK;
extern const int UNKNOWN_POLICY;
extern const int LOGICAL_ERROR;
......@@ -30,26 +38,39 @@ StoragePolicy::StoragePolicy(
DiskSelectorPtr disks)
: name(std::move(name_))
{
Poco::Util::AbstractConfiguration::Keys keys;
String volumes_prefix = config_prefix + ".volumes";
if (!config.has(volumes_prefix))
throw Exception("StoragePolicy must contain at least one volume (.volumes)", ErrorCodes::EXCESSIVE_ELEMENT_IN_CONFIG);
Poco::Util::AbstractConfiguration::Keys keys;
config.keys(volumes_prefix, keys);
if (!config.has(volumes_prefix))
{
if (name != DEFAULT_STORAGE_POLICY_NAME)
throw Exception("Storage policy " + backQuote(name) + " must contain at least one volume (.volumes)", ErrorCodes::NO_ELEMENTS_IN_CONFIG);
}
else
{
config.keys(volumes_prefix, keys);
}
for (const auto & attr_name : keys)
{
if (!std::all_of(attr_name.begin(), attr_name.end(), isWordCharASCII))
throw Exception(
"Volume name can contain only alphanumeric and '_' (" + attr_name + ")", ErrorCodes::EXCESSIVE_ELEMENT_IN_CONFIG);
"Volume name can contain only alphanumeric and '_' in storage policy" + backQuote(name) + " (" + attr_name + ")", ErrorCodes::EXCESSIVE_ELEMENT_IN_CONFIG);
volumes.push_back(std::make_shared<VolumeJBOD>(attr_name, config, volumes_prefix + "." + attr_name, disks));
if (volumes_names.find(attr_name) != volumes_names.end())
throw Exception("Volumes names must be unique (" + attr_name + " duplicated)", ErrorCodes::UNKNOWN_POLICY);
throw Exception("Volumes names must be unique in storage policy" + backQuote(name) + " (" + attr_name + " duplicated)", ErrorCodes::UNKNOWN_POLICY);
volumes_names[attr_name] = volumes.size() - 1;
}
if (volumes.empty() && name == DEFAULT_STORAGE_POLICY_NAME)
{
auto default_volume = std::make_shared<VolumeJBOD>(DEFAULT_VOLUME_NAME, std::vector<DiskPtr>{disks->get(DEFAULT_DISK_NAME)}, 0, true);
volumes.emplace_back(std::move(default_volume));
volumes_names.emplace(DEFAULT_VOLUME_NAME, 0);
}
if (volumes.empty())
throw Exception("StoragePolicy must contain at least one volume.", ErrorCodes::EXCESSIVE_ELEMENT_IN_CONFIG);
throw Exception("Storage policy " + backQuote(name) + " must contain at least one volume.", ErrorCodes::NO_ELEMENTS_IN_CONFIG);
/// Check that disks are unique in Policy
std::set<String> disk_names;
......@@ -59,15 +80,16 @@ StoragePolicy::StoragePolicy(
{
if (disk_names.find(disk->getName()) != disk_names.end())
throw Exception(
"Duplicate disk '" + disk->getName() + "' in storage policy '" + name + "'", ErrorCodes::EXCESSIVE_ELEMENT_IN_CONFIG);
"Duplicate disk " + backQuote(disk->getName()) + " in storage policy " + backQuote(name), ErrorCodes::EXCESSIVE_ELEMENT_IN_CONFIG);
disk_names.insert(disk->getName());
}
}
move_factor = config.getDouble(config_prefix + ".move_factor", 0.1);
const double default_move_factor = volumes.size() > 1 ? 0.1 : 0.0;
move_factor = config.getDouble(config_prefix + ".move_factor", default_move_factor);
if (move_factor > 1)
throw Exception("Disk move factor have to be in [0., 1.] interval, but set to " + toString(move_factor), ErrorCodes::LOGICAL_ERROR);
throw Exception("Disk move factor have to be in [0., 1.] interval, but set to " + toString(move_factor) + " in storage policy " + backQuote(name), ErrorCodes::LOGICAL_ERROR);
}
......@@ -75,38 +97,70 @@ StoragePolicy::StoragePolicy(String name_, Volumes volumes_, double move_factor_
: volumes(std::move(volumes_)), name(std::move(name_)), move_factor(move_factor_)
{
if (volumes.empty())
throw Exception("StoragePolicy must contain at least one Volume.", ErrorCodes::UNKNOWN_POLICY);
throw Exception("Storage policy " + backQuote(name) + " must contain at least one Volume.", ErrorCodes::NO_ELEMENTS_IN_CONFIG);
if (move_factor > 1)
throw Exception("Disk move factor have to be in [0., 1.] interval, but set to " + toString(move_factor), ErrorCodes::LOGICAL_ERROR);
throw Exception("Disk move factor have to be in [0., 1.] interval, but set to " + toString(move_factor) + " in storage policy " + backQuote(name), ErrorCodes::LOGICAL_ERROR);
for (size_t i = 0; i < volumes.size(); ++i)
{
if (volumes_names.find(volumes[i]->getName()) != volumes_names.end())
throw Exception("Volumes names must be unique (" + volumes[i]->getName() + " duplicated).", ErrorCodes::UNKNOWN_POLICY);
throw Exception("Volumes names must be unique in storage policy " + backQuote(name) + " (" + volumes[i]->getName() + " duplicated).", ErrorCodes::UNKNOWN_POLICY);
volumes_names[volumes[i]->getName()] = i;
}
}
StoragePolicy::StoragePolicy(const StoragePolicy & storage_policy,
const Poco::Util::AbstractConfiguration & config,
const String & config_prefix,
DiskSelectorPtr disks)
: StoragePolicy(storage_policy.getName(), config, config_prefix, disks)
{
for (auto & volume : volumes)
{
if (storage_policy.volumes_names.count(volume->getName()) > 0)
{
auto old_volume = storage_policy.getVolumeByName(volume->getName());
try
{
auto new_volume = updateVolumeFromConfig(old_volume, config, config_prefix + ".volumes." + volume->getName(), disks);
volume = std::move(new_volume);
}
catch (Exception & e)
{
/// Default policies are allowed to be missed in configuration.
if (e.code() != ErrorCodes::NO_ELEMENTS_IN_CONFIG || !storage_policy.isDefaultPolicy())
throw;
Poco::Util::AbstractConfiguration::Keys keys;
config.keys(config_prefix, keys);
if (!keys.empty())
throw;
}
}
}
}
bool StoragePolicy::isDefaultPolicy() const
{
/// Guessing if this policy is default, not 100% correct though.
if (getName() != "default")
if (getName() != DEFAULT_STORAGE_POLICY_NAME)
return false;
if (volumes.size() != 1)
return false;
if (volumes[0]->getName() != "default")
if (volumes[0]->getName() != DEFAULT_VOLUME_NAME)
return false;
const auto & disks = volumes[0]->getDisks();
if (disks.size() != 1)
return false;
if (disks[0]->getName() != "default")
if (disks[0]->getName() != DEFAULT_DISK_NAME)
return false;
return true;
......@@ -128,10 +182,10 @@ DiskPtr StoragePolicy::getAnyDisk() const
/// StoragePolicy must contain at least one Volume
/// Volume must contain at least one Disk
if (volumes.empty())
throw Exception("StoragePolicy has no volumes. It's a bug.", ErrorCodes::LOGICAL_ERROR);
throw Exception("Storage policy " + backQuote(name) + " has no volumes. It's a bug.", ErrorCodes::LOGICAL_ERROR);
if (volumes[0]->getDisks().empty())
throw Exception("Volume '" + volumes[0]->getName() + "' has no disks. It's a bug.", ErrorCodes::LOGICAL_ERROR);
throw Exception("Volume " + backQuote(name) + "." + backQuote(volumes[0]->getName()) + " has no disks. It's a bug.", ErrorCodes::LOGICAL_ERROR);
return volumes[0]->getDisks()[0];
}
......@@ -204,7 +258,7 @@ void StoragePolicy::checkCompatibleWith(const StoragePolicyPtr & new_storage_pol
for (const auto & volume : getVolumes())
{
if (new_volume_names.count(volume->getName()) == 0)
throw Exception("New storage policy shall contain volumes of old one", ErrorCodes::BAD_ARGUMENTS);
throw Exception("New storage policy " + backQuote(name) + " shall contain volumes of old one", ErrorCodes::BAD_ARGUMENTS);
std::unordered_set<String> new_disk_names;
for (const auto & disk : new_storage_policy->getVolumeByName(volume->getName())->getDisks())
......@@ -212,7 +266,7 @@ void StoragePolicy::checkCompatibleWith(const StoragePolicyPtr & new_storage_pol
for (const auto & disk : volume->getDisks())
if (new_disk_names.count(disk->getName()) == 0)
throw Exception("New storage policy shall contain disks of old one", ErrorCodes::BAD_ARGUMENTS);
throw Exception("New storage policy " + backQuote(name) + " shall contain disks of old one", ErrorCodes::BAD_ARGUMENTS);
}
}
......@@ -226,7 +280,7 @@ size_t StoragePolicy::getVolumeIndexByDisk(const DiskPtr & disk_ptr) const
if (disk->getName() == disk_ptr->getName())
return i;
}
throw Exception("No disk " + disk_ptr->getName() + " in policy " + name, ErrorCodes::UNKNOWN_DISK);
throw Exception("No disk " + backQuote(disk_ptr->getName()) + " in policy " + backQuote(name), ErrorCodes::UNKNOWN_DISK);
}
......@@ -242,44 +296,40 @@ StoragePolicySelector::StoragePolicySelector(
{
if (!std::all_of(name.begin(), name.end(), isWordCharASCII))
throw Exception(
"StoragePolicy name can contain only alphanumeric and '_' (" + name + ")", ErrorCodes::EXCESSIVE_ELEMENT_IN_CONFIG);
"Storage policy name can contain only alphanumeric and '_' (" + name + ")", ErrorCodes::EXCESSIVE_ELEMENT_IN_CONFIG);
policies.emplace(name, std::make_shared<StoragePolicy>(name, config, config_prefix + "." + name, disks));
LOG_INFO(&Poco::Logger::get("StoragePolicySelector"), "Storage policy {} loaded", backQuote(name));
}
constexpr auto default_storage_policy_name = "default";
constexpr auto default_volume_name = "default";
constexpr auto default_disk_name = "default";
/// Add default policy if it's not specified explicetly
if (policies.find(default_storage_policy_name) == policies.end())
/// Add default policy if it isn't explicitly specified.
if (policies.find(DEFAULT_STORAGE_POLICY_NAME) == policies.end())
{
auto default_volume = std::make_shared<VolumeJBOD>(default_volume_name, std::vector<DiskPtr>{disks->get(default_disk_name)}, 0);
auto default_policy = std::make_shared<StoragePolicy>(default_storage_policy_name, Volumes{default_volume}, 0.0);
policies.emplace(default_storage_policy_name, default_policy);
auto default_policy = std::make_shared<StoragePolicy>(DEFAULT_STORAGE_POLICY_NAME, config, config_prefix + "." + DEFAULT_STORAGE_POLICY_NAME, disks);
policies.emplace(DEFAULT_STORAGE_POLICY_NAME, std::move(default_policy));
}
}
StoragePolicySelectorPtr StoragePolicySelector::updateFromConfig(const Poco::Util::AbstractConfiguration & config, const String & config_prefix, DiskSelectorPtr disks) const
{
Poco::Util::AbstractConfiguration::Keys keys;
config.keys(config_prefix, keys);
std::shared_ptr<StoragePolicySelector> result = std::make_shared<StoragePolicySelector>(config, config_prefix, disks);
constexpr auto default_storage_policy_name = "default";
/// First pass, check.
for (const auto & [name, policy] : policies)
{
if (name != default_storage_policy_name && result->policies.count(name) == 0)
if (result->policies.count(name) == 0)
throw Exception("Storage policy " + backQuote(name) + " is missing in new configuration", ErrorCodes::BAD_ARGUMENTS);
policy->checkCompatibleWith(result->policies[name]);
}
/// Second pass, load.
for (const auto & [name, policy] : policies)
{
result->policies[name] = std::make_shared<StoragePolicy>(*policy, config, config_prefix + "." + name, disks);
}
return result;
}
......@@ -288,7 +338,7 @@ StoragePolicyPtr StoragePolicySelector::get(const String & name) const
{
auto it = policies.find(name);
if (it == policies.end())
throw Exception("Unknown StoragePolicy " + name, ErrorCodes::UNKNOWN_POLICY);
throw Exception("Unknown storage policy " + backQuote(name), ErrorCodes::UNKNOWN_POLICY);
return it->second;
}
......
......@@ -36,6 +36,13 @@ public:
StoragePolicy(String name_, Volumes volumes_, double move_factor_);
StoragePolicy(
const StoragePolicy & storage_policy,
const Poco::Util::AbstractConfiguration & config,
const String & config_prefix,
DiskSelectorPtr disks
);
bool isDefaultPolicy() const;
/// Returns disks ordered by volumes priority
......
......@@ -53,6 +53,18 @@ VolumeJBOD::VolumeJBOD(
static constexpr UInt64 MIN_PART_SIZE = 8u * 1024u * 1024u;
if (max_data_part_size != 0 && max_data_part_size < MIN_PART_SIZE)
LOG_WARNING(logger, "Volume {} max_data_part_size is too low ({} < {})", backQuote(name), ReadableSize(max_data_part_size), ReadableSize(MIN_PART_SIZE));
are_merges_allowed = config.getBool(config_prefix + ".allow_merges", true);
}
VolumeJBOD::VolumeJBOD(const VolumeJBOD & volume_jbod,
const Poco::Util::AbstractConfiguration & config,
const String & config_prefix,
DiskSelectorPtr disk_selector)
: VolumeJBOD(volume_jbod.name, config, config_prefix, disk_selector)
{
are_merges_allowed_user_override = volume_jbod.are_merges_allowed_user_override;
last_used = volume_jbod.last_used.load(std::memory_order_relaxed);
}
DiskPtr VolumeJBOD::getDisk(size_t /* index */) const
......@@ -84,4 +96,17 @@ ReservationPtr VolumeJBOD::reserve(UInt64 bytes)
return {};
}
bool VolumeJBOD::areMergesAllowed() const
{
if (are_merges_allowed_user_override)
return *are_merges_allowed_user_override;
else
return are_merges_allowed;
}
void VolumeJBOD::setAllowMergesFromQuery(bool allow)
{
are_merges_allowed_user_override = allow;
}
}
#pragma once
#include <memory>
#include <optional>
#include <Disks/IVolume.h>
namespace DB
{
class VolumeJBOD;
using VolumeJBODPtr = std::shared_ptr<VolumeJBOD>;
using VolumesJBOD = std::vector<VolumeJBODPtr>;
/**
* Implements something similar to JBOD (https://en.wikipedia.org/wiki/Non-RAID_drive_architectures#JBOD).
* When MergeTree engine wants to write part — it requests VolumeJBOD to reserve space on the next available
......@@ -13,8 +22,9 @@ namespace DB
class VolumeJBOD : public IVolume
{
public:
VolumeJBOD(String name_, Disks disks_, UInt64 max_data_part_size_)
VolumeJBOD(String name_, Disks disks_, UInt64 max_data_part_size_, bool are_merges_allowed_)
: IVolume(name_, disks_, max_data_part_size_)
, are_merges_allowed(are_merges_allowed_)
{
}
......@@ -25,6 +35,13 @@ public:
DiskSelectorPtr disk_selector
);
VolumeJBOD(
const VolumeJBOD & volume_jbod,
const Poco::Util::AbstractConfiguration & config,
const String & config_prefix,
DiskSelectorPtr disk_selector
);
VolumeType getType() const override { return VolumeType::JBOD; }
/// Always returns next disk (round-robin), ignores argument.
......@@ -38,11 +55,18 @@ public:
/// Returns valid reservation or nullptr if there is no space left on any disk.
ReservationPtr reserve(UInt64 bytes) override;
bool areMergesAllowed() const override;
void setAllowMergesFromQuery(bool allow) override;
/// True if parts on this volume participate in merges according to configuration.
bool are_merges_allowed = true;
/// True if parts on this volume participate in merges according to START/STOP MERGES ON VOLUME.
std::optional<bool> are_merges_allowed_user_override;
private:
mutable std::atomic<size_t> last_used = 0;
};
using VolumeJBODPtr = std::shared_ptr<VolumeJBOD>;
using VolumesJBOD = std::vector<VolumeJBODPtr>;
}
......@@ -3,18 +3,23 @@
#include <Disks/createVolume.h>
#include <Disks/VolumeJBOD.h>
namespace DB
{
/// Volume which reserserves space on each underlying disk.
class VolumeRAID1;
using VolumeRAID1Ptr = std::shared_ptr<VolumeRAID1>;
/// Volume which reserves space on each underlying disk.
///
/// NOTE: Just interface implementation, doesn't used in codebase,
/// also not available for user.
class VolumeRAID1 : public VolumeJBOD
{
public:
VolumeRAID1(String name_, Disks disks_, UInt64 max_data_part_size_)
: VolumeJBOD(name_, disks_, max_data_part_size_)
VolumeRAID1(String name_, Disks disks_, UInt64 max_data_part_size_, bool are_merges_allowed_in_config_)
: VolumeJBOD(name_, disks_, max_data_part_size_, are_merges_allowed_in_config_)
{
}
......@@ -27,11 +32,18 @@ public:
{
}
VolumeRAID1(
VolumeRAID1 & volume_raid1,
const Poco::Util::AbstractConfiguration & config,
const String & config_prefix,
DiskSelectorPtr disk_selector)
: VolumeJBOD(volume_raid1, config, config_prefix, disk_selector)
{
}
VolumeType getType() const override { return VolumeType::RAID1; }
ReservationPtr reserve(UInt64 bytes) override;
};
using VolumeRAID1Ptr = std::shared_ptr<VolumeRAID1>;
}
......@@ -12,6 +12,7 @@ namespace DB
namespace ErrorCodes
{
extern const int UNKNOWN_RAID_TYPE;
extern const int INVALID_RAID_TYPE;
}
VolumePtr createVolumeFromReservation(const ReservationPtr & reservation, VolumePtr other_volume)
......@@ -20,12 +21,12 @@ VolumePtr createVolumeFromReservation(const ReservationPtr & reservation, Volume
{
/// Since reservation on JBOD chooses one of disks and makes reservation there, volume
/// for such type of reservation will be with one disk.
return std::make_shared<SingleDiskVolume>(other_volume->getName(), reservation->getDisk());
return std::make_shared<SingleDiskVolume>(other_volume->getName(), reservation->getDisk(), other_volume->max_data_part_size);
}
if (other_volume->getType() == VolumeType::RAID1)
{
auto volume = std::dynamic_pointer_cast<VolumeRAID1>(other_volume);
return std::make_shared<VolumeRAID1>(volume->getName(), reservation->getDisks(), volume->max_data_part_size);
return std::make_shared<VolumeRAID1>(volume->getName(), reservation->getDisks(), volume->max_data_part_size, volume->are_merges_allowed);
}
return nullptr;
}
......@@ -37,15 +38,29 @@ VolumePtr createVolumeFromConfig(
DiskSelectorPtr disk_selector
)
{
auto has_raid_type = config.has(config_prefix + ".raid_type");
if (!has_raid_type)
String raid_type = config.getString(config_prefix + ".raid_type", "JBOD");
if (raid_type == "JBOD")
{
return std::make_shared<VolumeJBOD>(name, config, config_prefix, disk_selector);
}
String raid_type = config.getString(config_prefix + ".raid_type");
throw Exception("Unknown raid type '" + raid_type + "'", ErrorCodes::UNKNOWN_RAID_TYPE);
}
VolumePtr updateVolumeFromConfig(
VolumePtr volume,
const Poco::Util::AbstractConfiguration & config,
const String & config_prefix,
DiskSelectorPtr & disk_selector
)
{
String raid_type = config.getString(config_prefix + ".raid_type", "JBOD");
if (raid_type == "JBOD")
{
return std::make_shared<VolumeJBOD>(name, config, config_prefix, disk_selector);
VolumeJBODPtr volume_jbod = std::dynamic_pointer_cast<VolumeJBOD>(volume);
if (!volume_jbod)
throw Exception("Invalid raid type '" + raid_type + "', shall be JBOD", ErrorCodes::INVALID_RAID_TYPE);
return std::make_shared<VolumeJBOD>(*volume_jbod, config, config_prefix, disk_selector);
}
throw Exception("Unknown raid type '" + raid_type + "'", ErrorCodes::UNKNOWN_RAID_TYPE);
}
......
......@@ -6,6 +6,7 @@ namespace DB
{
VolumePtr createVolumeFromReservation(const ReservationPtr & reservation, VolumePtr other_volume);
VolumePtr createVolumeFromConfig(
String name_,
const Poco::Util::AbstractConfiguration & config,
......@@ -13,4 +14,11 @@ VolumePtr createVolumeFromConfig(
DiskSelectorPtr disk_selector
);
VolumePtr updateVolumeFromConfig(
VolumePtr volume,
const Poco::Util::AbstractConfiguration & config,
const String & config_prefix,
DiskSelectorPtr & disk_selector
);
}
......@@ -583,7 +583,7 @@ VolumePtr Context::setTemporaryStorage(const String & path, const String & polic
shared->tmp_path += '/';
auto disk = std::make_shared<DiskLocal>("_tmp_default", shared->tmp_path, 0);
shared->tmp_volume = std::make_shared<SingleDiskVolume>("_tmp_default", disk);
shared->tmp_volume = std::make_shared<SingleDiskVolume>("_tmp_default", disk, 0);
}
else
{
......
......@@ -163,7 +163,9 @@ void InterpreterSystemQuery::startStopAction(StorageActionBlockType action_type,
continue;
}
if (start)
if (volume_ptr && action_type == ActionLocks::PartsMerge)
volume_ptr->setAllowMergesFromQuery(start);
else if (start)
manager->remove(table, action_type);
else
manager->add(table, action_type);
......@@ -199,6 +201,10 @@ BlockIO InterpreterSystemQuery::execute()
if (!query.target_dictionary.empty() && !query.database.empty())
query.target_dictionary = query.database + "." + query.target_dictionary;
volume_ptr = {};
if (!query.storage_policy.empty() || !query.volume.empty())
volume_ptr = context.getStoragePolicy(query.storage_policy)->getVolumeByName(query.volume);
switch (query.type)
{
case Type::SHUTDOWN:
......
......@@ -5,6 +5,7 @@
#include <Storages/IStorage_fwd.h>
#include <Interpreters/StorageID.h>
#include <Common/ActionLock.h>
#include <Disks/VolumeJBOD.h>
namespace Poco { class Logger; }
......@@ -44,6 +45,7 @@ private:
Context & context;
Poco::Logger * log = nullptr;
StorageID table_id = StorageID::createEmpty(); /// Will be set up if query contains table name
VolumePtr volume_ptr;
/// Tries to get a replicated table and restart it
/// Returns pointer to a newly created table if the restart was successful
......
......@@ -118,7 +118,8 @@ void ASTSystemQuery::formatImpl(const FormatSettings & settings, FormatState &,
<< (settings.hilite ? hilite_none : "");
};
auto print_drop_replica = [&] {
auto print_drop_replica = [&]
{
settings.ostr << " " << quoteString(replica);
if (!table.empty())
{
......@@ -140,6 +141,16 @@ void ASTSystemQuery::formatImpl(const FormatSettings & settings, FormatState &,
}
};
auto print_on_volume = [&]
{
settings.ostr << " ON VOLUME "
<< (settings.hilite ? hilite_identifier : "") << backQuoteIfNeed(storage_policy)
<< (settings.hilite ? hilite_none : "")
<< "."
<< (settings.hilite ? hilite_identifier : "") << backQuoteIfNeed(volume)
<< (settings.hilite ? hilite_none : "");
};
if (!cluster.empty())
formatOnCluster(settings);
......@@ -160,6 +171,8 @@ void ASTSystemQuery::formatImpl(const FormatSettings & settings, FormatState &,
{
if (!table.empty())
print_database_table();
else if (!volume.empty())
print_on_volume();
}
else if (type == Type::RESTART_REPLICA || type == Type::SYNC_REPLICA || type == Type::FLUSH_DISTRIBUTED)
{
......
......@@ -65,6 +65,8 @@ public:
String replica;
String replica_zk_path;
bool is_drop_whole_replica;
String storage_policy;
String volume;
String getID(char) const override { return "SYSTEM query"; }
......
......@@ -129,6 +129,33 @@ bool ParserSystemQuery::parseImpl(IParser::Pos & pos, ASTPtr & node, Expected &
case Type::STOP_MERGES:
case Type::START_MERGES:
{
String storage_policy_str;
String volume_str;
if (ParserKeyword{"ON VOLUME"}.ignore(pos, expected))
{
ASTPtr ast;
if (ParserIdentifier{}.parse(pos, ast, expected))
storage_policy_str = ast->as<ASTIdentifier &>().name;
else
return false;
if (!ParserToken{TokenType::Dot}.ignore(pos, expected))
return false;
if (ParserIdentifier{}.parse(pos, ast, expected))
volume_str = ast->as<ASTIdentifier &>().name;
else
return false;
}
res->storage_policy = storage_policy_str;
res->volume = volume_str;
if (res->volume.empty() && res->storage_policy.empty())
parseDatabaseAndTableName(pos, expected, res->database, res->table);
break;
}
case Type::STOP_TTL_MERGES:
case Type::START_TTL_MERGES:
case Type::STOP_MOVES:
......
......@@ -299,7 +299,7 @@ MergeTreeData::MutableDataPartPtr Fetcher::downloadPartToMemory(
NativeBlockInputStream block_in(in, 0);
auto block = block_in.read();
auto volume = std::make_shared<SingleDiskVolume>("volume_" + part_name, reservation->getDisk());
auto volume = std::make_shared<SingleDiskVolume>("volume_" + part_name, reservation->getDisk(), 0);
MergeTreeData::MutableDataPartPtr new_data_part =
std::make_shared<MergeTreeDataPartInMemory>(data, part_name, volume);
......@@ -387,7 +387,7 @@ MergeTreeData::MutableDataPartPtr Fetcher::downloadPartToDisk(
assertEOF(in);
auto volume = std::make_shared<SingleDiskVolume>("volume_" + part_name, disk);
auto volume = std::make_shared<SingleDiskVolume>("volume_" + part_name, disk, 0);
MergeTreeData::MutableDataPartPtr new_data_part = data.createPart(part_name, volume, part_relative_path);
new_data_part->is_temp = true;
new_data_part->modification_time = time(nullptr);
......
......@@ -676,6 +676,18 @@ void IMergeTreeDataPart::loadColumns(bool require)
column_name_to_position.emplace(column.name, pos++);
}
bool IMergeTreeDataPart::canParticipateInMerges() const
{
auto storage_policy = storage.getStoragePolicy();
/// `IMergeTreeDataPart::volume` describes space where current part belongs, and holds
/// `SingleDiskVolume` object which does not contain up-to-date settings of corresponding volume.
/// Therefore we shall obtain volume by name from storage policy.
auto volume_ptr = storage_policy->getVolume(storage_policy->getVolumeIndexByDisk(volume->getDisk()));
return volume_ptr->areMergesAllowed();
}
UInt64 IMergeTreeDataPart::calculateTotalSizeOnDisk(const DiskPtr & disk_, const String & from)
{
if (disk_->isFile(from))
......
......@@ -322,7 +322,9 @@ public:
/// NOTE: Doesn't take column renames into account, if some column renames
/// take place, you must take original name of column for this part from
/// storage and pass it to this method.
virtual bool hasColumnFiles(const String & /* column */, const IDataType & /* type */) const{ return false; }
virtual bool hasColumnFiles(const String & /* column */, const IDataType & /* type */) const { return false; }
bool canParticipateInMerges() const;
/// Calculate the total size of the entire directory with all the files
static UInt64 calculateTotalSizeOnDisk(const DiskPtr & disk_, const String & from);
......
......@@ -772,7 +772,7 @@ void MergeTreeData::loadDataParts(bool skip_sanity_checks)
if (!MergeTreePartInfo::tryParsePartName(part_name, &part_info, format_version))
return;
auto single_disk_volume = std::make_shared<SingleDiskVolume>("volume_" + part_name, part_disk_ptr);
auto single_disk_volume = std::make_shared<SingleDiskVolume>("volume_" + part_name, part_disk_ptr, 0);
auto part = createPart(part_name, part_info, single_disk_volume, part_name);
bool broken = false;
......@@ -2909,7 +2909,7 @@ MergeTreeData::MutableDataPartsVector MergeTreeData::tryLoadPartsToAttach(const
for (const auto & part_names : renamed_parts.old_and_new_names)
{
LOG_DEBUG(log, "Checking part {}", part_names.second);
auto single_disk_volume = std::make_shared<SingleDiskVolume>("volume_" + part_names.first, name_to_disk[part_names.first]);
auto single_disk_volume = std::make_shared<SingleDiskVolume>("volume_" + part_names.first, name_to_disk[part_names.first], 0);
MutableDataPartPtr part = createPart(part_names.first, single_disk_volume, source_dir + part_names.second);
loadPartAndFixMetadataImpl(part);
loaded_parts.push_back(part);
......@@ -3275,7 +3275,7 @@ MergeTreeData::MutableDataPartPtr MergeTreeData::cloneAndLoadDataPartOnSameDisk(
localBackup(disk, src_part_path, dst_part_path);
disk->removeIfExists(dst_part_path + "/" + DELETE_ON_DESTROY_MARKER_PATH);
auto single_disk_volume = std::make_shared<SingleDiskVolume>(disk->getName(), disk);
auto single_disk_volume = std::make_shared<SingleDiskVolume>(disk->getName(), disk, 0);
auto dst_data_part = createPart(dst_part_name, dst_part_info, single_disk_volume, tmp_dst_part_name);
dst_data_part->is_temp = true;
......
......@@ -622,7 +622,7 @@ MergeTreeData::MutableDataPartPtr MergeTreeDataMergerMutator::mergePartsToTempor
merging_columns,
merging_column_names);
auto single_disk_volume = std::make_shared<SingleDiskVolume>("volume_" + future_part.name, disk);
auto single_disk_volume = std::make_shared<SingleDiskVolume>("volume_" + future_part.name, disk, 0);
MergeTreeData::MutableDataPartPtr new_data_part = data.createPart(
future_part.name,
future_part.type,
......@@ -1066,7 +1066,7 @@ MergeTreeData::MutableDataPartPtr MergeTreeDataMergerMutator::mutatePartToTempor
in->setProgressCallback(MergeProgressCallback(merge_entry, watch_prev_elapsed, stage_progress));
}
auto single_disk_volume = std::make_shared<SingleDiskVolume>("volume_" + future_part.name, space_reservation->getDisk());
auto single_disk_volume = std::make_shared<SingleDiskVolume>("volume_" + future_part.name, space_reservation->getDisk(), 0);
auto new_data_part = data.createPart(
future_part.name, future_part.type, future_part.part_info, single_disk_volume, "tmp_mut_" + future_part.name);
......
......@@ -194,7 +194,7 @@ MergeTreeData::DataPartPtr MergeTreePartsMover::clonePart(const MergeTreeMoveEnt
LOG_TRACE(log, "Cloning part {}", moving_part.part->name);
moving_part.part->makeCloneOnDiskDetached(moving_part.reserved_space);
auto single_disk_volume = std::make_shared<SingleDiskVolume>("volume_" + moving_part.part->name, moving_part.reserved_space->getDisk());
auto single_disk_volume = std::make_shared<SingleDiskVolume>("volume_" + moving_part.part->name, moving_part.reserved_space->getDisk(), 0);
MergeTreeData::MutableDataPartPtr cloned_part =
data->createPart(moving_part.part->name, single_disk_volume, "detached/" + moving_part.part->name);
LOG_TRACE(log, "Part {} was cloned to {}", moving_part.part->name, cloned_part->getFullPath());
......
......@@ -111,7 +111,7 @@ MergeTreeData::MutableDataPartsVector MergeTreeWriteAheadLog::restore(const Stor
else if (action_type == ActionType::ADD_PART)
{
auto part_disk = storage.reserveSpace(0)->getDisk();
auto single_disk_volume = std::make_shared<SingleDiskVolume>("volume_" + part_name, disk);
auto single_disk_volume = std::make_shared<SingleDiskVolume>("volume_" + part_name, disk, 0);
part = storage.createPart(
part_name,
......
#include <Storages/MergeTree/SimpleMergeSelector.h>
#include <Storages/MergeTree/MergeTreeData.h>
#include <Common/interpolate.h>
#include <cmath>
......@@ -152,6 +154,9 @@ void selectWithinPartition(
if (begin > 1000)
break;
if (!(*static_cast<const MergeTreeData::DataPartPtr *>(parts[begin].data))->canParticipateInMerges())
continue;
size_t sum_size = parts[begin].size;
size_t max_size = parts[begin].size;
size_t min_age = parts[begin].age;
......@@ -161,6 +166,9 @@ void selectWithinPartition(
if (settings.max_parts_to_merge_at_once && end - begin > settings.max_parts_to_merge_at_once)
break;
if (!(*static_cast<const MergeTreeData::DataPartPtr *>(parts[end - 1].data))->canParticipateInMerges())
break;
size_t cur_size = parts[end - 1].size;
size_t cur_age = parts[end - 1].age;
......
......@@ -24,6 +24,7 @@ IMergeSelector::PartsInPartition TTLMergeSelector::select(
ssize_t partition_to_merge_index = -1;
time_t partition_to_merge_min_ttl = 0;
/// Find most old TTL.
for (size_t i = 0; i < partitions.size(); ++i)
{
const auto & mergeable_parts_in_partition = partitions[i];
......@@ -41,9 +42,12 @@ IMergeSelector::PartsInPartition TTLMergeSelector::select(
if (ttl && (partition_to_merge_index == -1 || ttl < partition_to_merge_min_ttl))
{
partition_to_merge_min_ttl = ttl;
partition_to_merge_index = i;
best_begin = part_it;
if (only_drop_parts || (*static_cast<const MergeTreeData::DataPartPtr *>(part_it->data))->canParticipateInMerges())
{
partition_to_merge_min_ttl = ttl;
partition_to_merge_index = i;
best_begin = part_it;
}
}
}
}
......@@ -55,13 +59,16 @@ IMergeSelector::PartsInPartition TTLMergeSelector::select(
Iterator best_end = best_begin + 1;
size_t total_size = 0;
/// Find begin of range with most old TTL.
while (true)
{
time_t ttl = only_drop_parts ? best_begin->max_ttl : best_begin->min_ttl;
if (!ttl || ttl > current_time
|| (max_total_size_to_merge && total_size > max_total_size_to_merge))
|| (max_total_size_to_merge && total_size > max_total_size_to_merge)
|| (!only_drop_parts && !(*static_cast<const MergeTreeData::DataPartPtr *>(best_begin->data))->canParticipateInMerges()))
{
/// This condition can not be satisfied on first iteration.
++best_begin;
break;
}
......@@ -73,12 +80,14 @@ IMergeSelector::PartsInPartition TTLMergeSelector::select(
--best_begin;
}
/// Find end of range with most old TTL.
while (best_end != best_partition.end())
{
time_t ttl = only_drop_parts ? best_end->max_ttl : best_end->min_ttl;
if (!ttl || ttl > current_time
|| (max_total_size_to_merge && total_size > max_total_size_to_merge))
|| (max_total_size_to_merge && total_size > max_total_size_to_merge)
|| (!only_drop_parts && !(*static_cast<const MergeTreeData::DataPartPtr *>(best_end->data))->canParticipateInMerges()))
break;
total_size += best_end->size;
......
......@@ -30,6 +30,7 @@ StorageSystemStoragePolicies::StorageSystemStoragePolicies(const StorageID & tab
{"volume_type", std::make_shared<DataTypeString>()},
{"max_data_part_size", std::make_shared<DataTypeUInt64>()},
{"move_factor", std::make_shared<DataTypeFloat32>()},
{"allow_merges", std::make_shared<DataTypeUInt8>()}
}));
// TODO: Add string column with custom volume-type-specific options
setInMemoryMetadata(storage_metadata);
......@@ -53,6 +54,7 @@ Pipe StorageSystemStoragePolicies::read(
MutableColumnPtr col_volume_type = ColumnString::create();
MutableColumnPtr col_max_part_size = ColumnUInt64::create();
MutableColumnPtr col_move_factor = ColumnFloat32::create();
MutableColumnPtr col_allow_merges = ColumnUInt8::create();
for (const auto & [policy_name, policy_ptr] : context.getPoliciesMap())
{
......@@ -70,6 +72,7 @@ Pipe StorageSystemStoragePolicies::read(
col_volume_type->insert(volumeTypeToString(volumes[i]->getType()));
col_max_part_size->insert(volumes[i]->max_data_part_size);
col_move_factor->insert(policy_ptr->getMoveFactor());
col_allow_merges->insert(volumes[i]->areMergesAllowed() ? 1 : 0);
}
}
......@@ -81,6 +84,7 @@ Pipe StorageSystemStoragePolicies::read(
res_columns.emplace_back(std::move(col_volume_type));
res_columns.emplace_back(std::move(col_max_part_size));
res_columns.emplace_back(std::move(col_move_factor));
res_columns.emplace_back(std::move(col_allow_merges));
UInt64 num_rows = res_columns.at(0)->size();
Chunk chunk(std::move(res_columns), num_rows);
......
......@@ -30,6 +30,18 @@
</volumes>
</small_jbod_with_external>
<small_jbod_with_external_no_merges>
<volumes>
<main>
<disk>jbod1</disk>
</main>
<external>
<disk>external</disk>
<allow_merges>false</allow_merges>
</external>
</volumes>
</small_jbod_with_external_no_merges>
<one_more_small_jbod_with_external>
<volumes>
<m>
......
......@@ -76,6 +76,7 @@ def test_system_tables(start_cluster):
"volume_type": "JBOD",
"max_data_part_size": "0",
"move_factor": 0.1,
"allow_merges": 1,
},
{
"policy_name": "small_jbod_with_external",
......@@ -85,6 +86,27 @@ def test_system_tables(start_cluster):
"volume_type": "JBOD",
"max_data_part_size": "0",
"move_factor": 0.1,
"allow_merges": 1,
},
{
"policy_name": "small_jbod_with_external_no_merges",
"volume_name": "main",
"volume_priority": "1",
"disks": ["jbod1"],
"volume_type": "JBOD",
"max_data_part_size": "0",
"move_factor": 0.1,
"allow_merges": 1,
},
{
"policy_name": "small_jbod_with_external_no_merges",
"volume_name": "external",
"volume_priority": "2",
"disks": ["external"],
"volume_type": "JBOD",
"max_data_part_size": "0",
"move_factor": 0.1,
"allow_merges": 0,
},
{
"policy_name": "one_more_small_jbod_with_external",
......@@ -94,6 +116,7 @@ def test_system_tables(start_cluster):
"volume_type": "JBOD",
"max_data_part_size": "0",
"move_factor": 0.1,
"allow_merges": 1,
},
{
"policy_name": "one_more_small_jbod_with_external",
......@@ -103,6 +126,7 @@ def test_system_tables(start_cluster):
"volume_type": "JBOD",
"max_data_part_size": "0",
"move_factor": 0.1,
"allow_merges": 1,
},
{
"policy_name": "jbods_with_external",
......@@ -112,6 +136,7 @@ def test_system_tables(start_cluster):
"volume_type": "JBOD",
"max_data_part_size": "10485760",
"move_factor": 0.1,
"allow_merges": 1,
},
{
"policy_name": "jbods_with_external",
......@@ -121,6 +146,7 @@ def test_system_tables(start_cluster):
"volume_type": "JBOD",
"max_data_part_size": "0",
"move_factor": 0.1,
"allow_merges": 1,
},
{
"policy_name": "moving_jbod_with_external",
......@@ -130,6 +156,7 @@ def test_system_tables(start_cluster):
"volume_type": "JBOD",
"max_data_part_size": "0",
"move_factor": 0.7,
"allow_merges": 1,
},
{
"policy_name": "moving_jbod_with_external",
......@@ -139,6 +166,7 @@ def test_system_tables(start_cluster):
"volume_type": "JBOD",
"max_data_part_size": "0",
"move_factor": 0.7,
"allow_merges": 1,
},
{
"policy_name": "default_disk_with_external",
......@@ -148,6 +176,7 @@ def test_system_tables(start_cluster):
"volume_type": "JBOD",
"max_data_part_size": "2097152",
"move_factor": 0.1,
"allow_merges": 1,
},
{
"policy_name": "default_disk_with_external",
......@@ -157,6 +186,7 @@ def test_system_tables(start_cluster):
"volume_type": "JBOD",
"max_data_part_size": "20971520",
"move_factor": 0.1,
"allow_merges": 1,
},
{
"policy_name": "special_warning_policy",
......@@ -166,6 +196,7 @@ def test_system_tables(start_cluster):
"volume_type": "JBOD",
"max_data_part_size": "0",
"move_factor": 0.1,
"allow_merges": 1,
},
{
"policy_name": "special_warning_policy",
......@@ -175,6 +206,7 @@ def test_system_tables(start_cluster):
"volume_type": "JBOD",
"max_data_part_size": "0",
"move_factor": 0.1,
"allow_merges": 1,
},
{
"policy_name": "special_warning_policy",
......@@ -184,6 +216,7 @@ def test_system_tables(start_cluster):
"volume_type": "JBOD",
"max_data_part_size": "1024",
"move_factor": 0.1,
"allow_merges": 1,
},
{
"policy_name": "special_warning_policy",
......@@ -193,6 +226,7 @@ def test_system_tables(start_cluster):
"volume_type": "JBOD",
"max_data_part_size": "1024000000",
"move_factor": 0.1,
"allow_merges": 1,
},
]
......@@ -293,6 +327,9 @@ def get_random_string(length):
def get_used_disks_for_table(node, table_name):
return node.query("select disk_name from system.parts where table == '{}' and active=1 order by modification_time".format(table_name)).strip().split('\n')
def get_used_parts_for_table(node, table_name):
return node.query("SELECT name FROM system.parts WHERE table = '{}' AND active = 1 ORDER BY modification_time".format(table_name)).splitlines()
def test_no_warning_about_zero_max_data_part_size(start_cluster):
def get_log(node):
return node.exec_in_container(["bash", "-c", "cat /var/log/clickhouse-server/clickhouse-server.log"])
......@@ -356,6 +393,8 @@ def test_round_robin(start_cluster, name, engine):
])
def test_max_data_part_size(start_cluster, name, engine):
try:
assert int(*node1.query("""SELECT max_data_part_size FROM system.storage_policies WHERE policy_name = 'jbods_with_external' AND volume_name = 'main'""").splitlines()) == 10*1024*1024
node1.query("""
CREATE TABLE {name} (
s1 String
......@@ -1201,7 +1240,7 @@ def test_move_while_merge(start_cluster):
node1.query("INSERT INTO {name} VALUES (1)".format(name=name))
node1.query("INSERT INTO {name} VALUES (2)".format(name=name))
parts = node1.query("SELECT name FROM system.parts WHERE table = '{name}' AND active = 1".format(name=name)).splitlines()
parts = get_used_parts_for_table(node1, name)
assert len(parts) == 2
def optimize():
......@@ -1281,3 +1320,102 @@ def test_move_across_policies_does_not_work(start_cluster):
finally:
node1.query("DROP TABLE IF EXISTS {name}".format(name=name))
node1.query("DROP TABLE IF EXISTS {name}2".format(name=name))
def _insert_merge_execute(name, policy, parts, cmds, parts_before_cmds, parts_after_cmds):
try:
node1.query("""
CREATE TABLE {name} (
n Int64
) ENGINE = MergeTree
ORDER BY tuple()
PARTITION BY tuple()
TTL now()-1 TO VOLUME 'external'
SETTINGS storage_policy='{policy}'
""".format(name=name, policy=policy))
for i in range(parts):
node1.query("""INSERT INTO {name} VALUES ({n})""".format(name=name, n=i))
disks = get_used_disks_for_table(node1, name)
assert set(disks) == {"external"}
node1.query("""OPTIMIZE TABLE {name}""".format(name=name))
parts = get_used_parts_for_table(node1, name)
assert len(parts) == parts_before_cmds
for cmd in cmds:
node1.query(cmd)
node1.query("""OPTIMIZE TABLE {name}""".format(name=name))
parts = get_used_parts_for_table(node1, name)
assert len(parts) == parts_after_cmds
finally:
node1.query("DROP TABLE IF EXISTS {name}".format(name=name))
def _get_allow_merges_for_storage_policy(node, storage_policy):
return list(map(int, node.query("SELECT allow_merges FROM system.storage_policies WHERE policy_name = '{}' ORDER BY volume_priority".format(storage_policy)).splitlines()))
def test_no_merges_in_configuration_allow_from_query_without_reload(start_cluster):
try:
name = "test_no_merges_in_configuration_allow_from_query_without_reload"
node1.restart_clickhouse(kill=True)
assert _get_allow_merges_for_storage_policy(node1, "small_jbod_with_external_no_merges") == [1, 0]
_insert_merge_execute(name, "small_jbod_with_external_no_merges", 2, [
"SYSTEM START MERGES ON VOLUME small_jbod_with_external_no_merges.external"
], 2, 1)
assert _get_allow_merges_for_storage_policy(node1, "small_jbod_with_external_no_merges") == [1, 1]
finally:
node1.query("SYSTEM STOP MERGES ON VOLUME small_jbod_with_external_no_merges.external")
def test_no_merges_in_configuration_allow_from_query_with_reload(start_cluster):
try:
name = "test_no_merges_in_configuration_allow_from_query_with_reload"
node1.restart_clickhouse(kill=True)
assert _get_allow_merges_for_storage_policy(node1, "small_jbod_with_external_no_merges") == [1, 0]
_insert_merge_execute(name, "small_jbod_with_external_no_merges", 2, [
"SYSTEM START MERGES ON VOLUME small_jbod_with_external_no_merges.external",
"SYSTEM RELOAD CONFIG"
], 2, 1)
assert _get_allow_merges_for_storage_policy(node1, "small_jbod_with_external_no_merges") == [1, 1]
finally:
node1.query("SYSTEM STOP MERGES ON VOLUME small_jbod_with_external_no_merges.external")
def test_yes_merges_in_configuration_disallow_from_query_without_reload(start_cluster):
try:
name = "test_yes_merges_in_configuration_allow_from_query_without_reload"
node1.restart_clickhouse(kill=True)
assert _get_allow_merges_for_storage_policy(node1, "small_jbod_with_external") == [1, 1]
_insert_merge_execute(name, "small_jbod_with_external", 2, [
"SYSTEM STOP MERGES ON VOLUME small_jbod_with_external.external",
"INSERT INTO {name} VALUES (2)".format(name=name)
], 1, 2)
assert _get_allow_merges_for_storage_policy(node1, "small_jbod_with_external") == [1, 0]
finally:
node1.query("SYSTEM START MERGES ON VOLUME small_jbod_with_external.external")
def test_yes_merges_in_configuration_disallow_from_query_with_reload(start_cluster):
try:
name = "test_yes_merges_in_configuration_allow_from_query_with_reload"
node1.restart_clickhouse(kill=True)
assert _get_allow_merges_for_storage_policy(node1, "small_jbod_with_external") == [1, 1]
_insert_merge_execute(name, "small_jbod_with_external", 2, [
"SYSTEM STOP MERGES ON VOLUME small_jbod_with_external.external",
"INSERT INTO {name} VALUES (2)".format(name=name),
"SYSTEM RELOAD CONFIG"
], 1, 2)
assert _get_allow_merges_for_storage_policy(node1, "small_jbod_with_external") == [1, 0]
finally:
node1.query("SYSTEM START MERGES ON VOLUME small_jbod_with_external.external")
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册