提交 c99f3d7e 编写于 作者: A Alexey Zatelepin 提交者: alexey-milovidov

add test [#CLICKHOUSE-3151]

上级 b469ef17
......@@ -36,12 +36,18 @@ class PartitionManager:
self._delete_rule({'destination': instance.ip_address, 'source_port': 2181, 'action': action})
def partition_instances(self, left, right, action='DROP'):
def partition_instances(self, left, right, port=None, action='DROP'):
self._check_instance(left)
self._check_instance(right)
self._add_rule({'source': left.ip_address, 'destination': right.ip_address, 'action': action})
self._add_rule({'source': right.ip_address, 'destination': left.ip_address, 'action': action})
def create_rule(src, dst):
rule = {'source': src.ip_address, 'destination': dst.ip_address, 'action': action}
if port is not None:
rule['destination_port'] = port
return rule
self._add_rule(create_rule(left, right))
self._add_rule(create_rule(right, left))
def heal_all(self):
......
......@@ -4,11 +4,22 @@
<shard>
<internal_replication>true</internal_replication>
<replica>
<host>replica1</host>
<host>node_1_1</host>
<port>9000</port>
</replica>
<replica>
<host>replica2</host>
<host>node_1_2</host>
<port>9000</port>
</replica>
</shard>
<shard>
<internal_replication>true</internal_replication>
<replica>
<host>node_2_1</host>
<port>9000</port>
</replica>
<replica>
<host>node_2_2</host>
<port>9000</port>
</replica>
</shard>
......
......@@ -7,21 +7,28 @@ from helpers.network import PartitionManager
cluster = ClickHouseCluster(__file__)
instance_with_dist_table = cluster.add_instance('instance_with_dist_table', main_configs=['configs/remote_servers.xml'])
replica1 = cluster.add_instance('replica1', with_zookeeper=True)
replica2 = cluster.add_instance('replica2', with_zookeeper=True)
# Cluster with 2 shards of 2 replicas each. node_1_1 is the instance with Distributed table.
# Thus we have a shard with a local replica and a shard with remote replicas.
node_1_1 = instance_with_dist_table = cluster.add_instance(
'node_1_1', with_zookeeper=True, main_configs=['configs/remote_servers.xml'])
node_1_2 = cluster.add_instance('node_1_2', with_zookeeper=True)
node_2_1 = cluster.add_instance('node_2_1', with_zookeeper=True)
node_2_2 = cluster.add_instance('node_2_2', with_zookeeper=True)
@pytest.fixture(scope="module")
def started_cluster():
try:
cluster.start()
for replica in (replica1, replica2):
replica.query(
"CREATE TABLE replicated (d Date, x UInt32) ENGINE = "
"ReplicatedMergeTree('/clickhouse/tables/replicated', '{instance}', d, d, 8192)")
for shard in (1, 2):
for replica in (1, 2):
node = cluster.instances['node_{}_{}'.format(shard, replica)]
node.query('''
CREATE TABLE replicated (d Date, x UInt32) ENGINE =
ReplicatedMergeTree('/clickhouse/tables/{shard}/replicated', '{instance}', d, d, 8192)'''
.format(shard=shard, instance=node.name))
instance_with_dist_table.query(
node_1_1.query(
"CREATE TABLE distributed (d Date, x UInt32) ENGINE = "
"Distributed('test_cluster', 'default', 'replicated')")
......@@ -33,36 +40,42 @@ def started_cluster():
def test(started_cluster):
with PartitionManager() as pm:
pm.partition_instances(replica1, replica2)
# Hinder replication between replicas of the same shard, but leave the possibility of distributed connection.
pm.partition_instances(node_1_1, node_1_2, port=9009)
pm.partition_instances(node_2_1, node_2_2, port=9009)
replica2.query("INSERT INTO replicated VALUES ('2017-05-08', 1)")
node_1_2.query("INSERT INTO replicated VALUES ('2017-05-08', 1)")
node_2_2.query("INSERT INTO replicated VALUES ('2017-05-08', 2)")
time.sleep(1) # accrue replica delay
assert replica1.query("SELECT count() FROM replicated").strip() == ''
assert replica2.query("SELECT count() FROM replicated").strip() == '1'
assert node_1_1.query("SELECT sum(x) FROM replicated").strip() == ''
assert node_1_2.query("SELECT sum(x) FROM replicated").strip() == '1'
assert node_2_1.query("SELECT sum(x) FROM replicated").strip() == ''
assert node_2_2.query("SELECT sum(x) FROM replicated").strip() == '2'
# With in_order balancing replica1 is chosen.
# With in_order balancing first replicas chosen.
assert instance_with_dist_table.query(
"SELECT count() FROM distributed SETTINGS load_balancing='in_order'").strip() == ''
# When we set max_replica_delay, replica1 must be excluded.
# When we set max_replica_delay, first replicas must be excluded.
assert instance_with_dist_table.query('''
SELECT count() FROM distributed SETTINGS
SELECT sum(x) FROM distributed SETTINGS
load_balancing='in_order',
max_replica_delay_for_distributed_queries=1
''').strip() == '1'
''').strip() == '3'
pm.drop_instance_zk_connections(replica2)
pm.drop_instance_zk_connections(node_1_2)
pm.drop_instance_zk_connections(node_2_2)
time.sleep(4) # allow pings to zookeeper to timeout (must be greater than ZK session timeout).
# At this point all replicas are stale, but the query must still go to replica2 which is the least stale one.
# At this point all replicas are stale, but the query must still go to second replicas which are the least stale ones.
assert instance_with_dist_table.query('''
SELECT count() FROM distributed SETTINGS
SELECT sum(x) FROM distributed SETTINGS
load_balancing='in_order',
max_replica_delay_for_distributed_queries=1
''').strip() == '1'
''').strip() == '3'
# If we forbid stale replicas, the query must fail.
with pytest.raises(Exception):
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册