Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
2dot5
ClickHouse
提交
5afcc927
C
ClickHouse
项目概览
2dot5
/
ClickHouse
通知
3
Star
0
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
DevOps
流水线
流水线任务
计划
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
C
ClickHouse
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
DevOps
DevOps
流水线
流水线任务
计划
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
流水线任务
提交
Issue看板
体验新版 GitCode,发现更多精彩内容 >>
未验证
提交
5afcc927
编写于
1月 30, 2021
作者:
I
Ilya Yatsishin
提交者:
GitHub
1月 30, 2021
浏览文件
操作
浏览文件
下载
差异文件
Merge pull request #19706 from qoega/easier-integration-no-project-dir
上级
f7dc83ae
c07fb8a6
变更
2
隐藏空白更改
内联
并排
Showing
2 changed file
with
41 addition
and
46 deletion
+41
-46
tests/integration/helpers/cluster.py
tests/integration/helpers/cluster.py
+39
-46
tests/integration/test_storage_kerberized_kafka/test.py
tests/integration/test_storage_kerberized_kafka/test.py
+2
-0
未找到文件。
tests/integration/helpers/cluster.py
浏览文件 @
5afcc927
...
...
@@ -113,9 +113,9 @@ class ClickHouseCluster:
self
.
zookeeper_config_path
=
p
.
join
(
self
.
base_dir
,
zookeeper_config_path
)
if
zookeeper_config_path
else
p
.
join
(
HELPERS_DIR
,
'zookeeper_config.xml'
)
self
.
project_name
=
pwd
.
getpwuid
(
os
.
getuid
()).
pw_name
+
p
.
basename
(
self
.
base_dir
)
+
self
.
name
project_name
=
pwd
.
getpwuid
(
os
.
getuid
()).
pw_name
+
p
.
basename
(
self
.
base_dir
)
+
self
.
name
# docker-compose removes everything non-alphanumeric from project names so we do it too.
self
.
project_name
=
re
.
sub
(
r
'[^a-z0-9]'
,
''
,
self
.
project_name
.
lower
())
self
.
project_name
=
re
.
sub
(
r
'[^a-z0-9]'
,
''
,
project_name
.
lower
())
self
.
instances_dir
=
p
.
join
(
self
.
base_dir
,
'_instances'
+
(
''
if
not
self
.
name
else
'_'
+
self
.
name
))
self
.
docker_logs_path
=
p
.
join
(
self
.
instances_dir
,
'docker.log'
)
...
...
@@ -126,8 +126,8 @@ class ClickHouseCluster:
self
.
base_cmd
=
[
'docker-compose'
]
if
custom_dockerd_host
:
self
.
base_cmd
+=
[
'--host'
,
custom_dockerd_host
]
self
.
base_cmd
+=
[
'--project-name'
,
self
.
project_name
]
self
.
base_cmd
+=
[
'--project-directory'
,
self
.
base_dir
,
'--project-name'
,
self
.
project_name
]
self
.
base_zookeeper_cmd
=
None
self
.
base_mysql_cmd
=
[]
self
.
base_kafka_cmd
=
[]
...
...
@@ -260,25 +260,23 @@ class ClickHouseCluster:
self
.
with_zookeeper
=
True
self
.
zookeeper_use_tmpfs
=
zookeeper_use_tmpfs
self
.
base_cmd
.
extend
([
'--file'
,
zookeeper_docker_compose_path
])
self
.
base_zookeeper_cmd
=
[
'docker-compose'
,
'--project-
directory'
,
self
.
base_dir
,
'--project-name'
,
self
.
project_name
,
'--file'
,
zookeeper_docker_compose_path
]
self
.
base_zookeeper_cmd
=
[
'docker-compose'
,
'--project-
name'
,
self
.
project_name
,
'--file'
,
zookeeper_docker_compose_path
]
cmds
.
append
(
self
.
base_zookeeper_cmd
)
if
with_mysql
and
not
self
.
with_mysql
:
self
.
with_mysql
=
True
self
.
base_cmd
.
extend
([
'--file'
,
p
.
join
(
docker_compose_yml_dir
,
'docker_compose_mysql.yml'
)])
self
.
base_mysql_cmd
=
[
'docker-compose'
,
'--project-directory'
,
self
.
base_dir
,
'--project-name'
,
self
.
project_name
,
'--file'
,
p
.
join
(
docker_compose_yml_dir
,
'docker_compose_mysql.yml'
)]
self
.
base_mysql_cmd
=
[
'docker-compose'
,
'--project-name'
,
self
.
project_name
,
'--file'
,
p
.
join
(
docker_compose_yml_dir
,
'docker_compose_mysql.yml'
)]
cmds
.
append
(
self
.
base_mysql_cmd
)
if
with_postgres
and
not
self
.
with_postgres
:
self
.
with_postgres
=
True
self
.
base_cmd
.
extend
([
'--file'
,
p
.
join
(
docker_compose_yml_dir
,
'docker_compose_postgres.yml'
)])
self
.
base_postgres_cmd
=
[
'docker-compose'
,
'--project-directory'
,
self
.
base_dir
,
'--project-name'
,
self
.
project_name
,
'--file'
,
p
.
join
(
docker_compose_yml_dir
,
'docker_compose_postgres.yml'
)]
self
.
base_postgres_cmd
=
[
'docker-compose'
,
'--project-name'
,
self
.
project_name
,
'--file'
,
p
.
join
(
docker_compose_yml_dir
,
'docker_compose_postgres.yml'
)]
cmds
.
append
(
self
.
base_postgres_cmd
)
if
with_odbc_drivers
and
not
self
.
with_odbc_drivers
:
...
...
@@ -286,64 +284,57 @@ class ClickHouseCluster:
if
not
self
.
with_mysql
:
self
.
with_mysql
=
True
self
.
base_cmd
.
extend
([
'--file'
,
p
.
join
(
docker_compose_yml_dir
,
'docker_compose_mysql.yml'
)])
self
.
base_mysql_cmd
=
[
'docker-compose'
,
'--project-directory'
,
self
.
base_dir
,
'--project-name'
,
self
.
project_name
,
'--file'
,
p
.
join
(
docker_compose_yml_dir
,
'docker_compose_mysql.yml'
)]
self
.
base_mysql_cmd
=
[
'docker-compose'
,
'--project-name'
,
self
.
project_name
,
'--file'
,
p
.
join
(
docker_compose_yml_dir
,
'docker_compose_mysql.yml'
)]
cmds
.
append
(
self
.
base_mysql_cmd
)
if
not
self
.
with_postgres
:
self
.
with_postgres
=
True
self
.
base_cmd
.
extend
([
'--file'
,
p
.
join
(
docker_compose_yml_dir
,
'docker_compose_postgres.yml'
)])
self
.
base_postgres_cmd
=
[
'docker-compose'
,
'--project-directory'
,
self
.
base_dir
,
'--project-name'
,
self
.
project_name
,
'--file'
,
p
.
join
(
docker_compose_yml_dir
,
'docker_compose_postgres.yml'
)]
self
.
base_postgres_cmd
=
[
'docker-compose'
,
'--project-name'
,
self
.
project_name
,
'--file'
,
p
.
join
(
docker_compose_yml_dir
,
'docker_compose_postgres.yml'
)]
cmds
.
append
(
self
.
base_postgres_cmd
)
if
with_kafka
and
not
self
.
with_kafka
:
self
.
with_kafka
=
True
self
.
base_cmd
.
extend
([
'--file'
,
p
.
join
(
docker_compose_yml_dir
,
'docker_compose_kafka.yml'
)])
self
.
base_kafka_cmd
=
[
'docker-compose'
,
'--project-directory'
,
self
.
base_dir
,
'--project-name'
,
self
.
project_name
,
'--file'
,
p
.
join
(
docker_compose_yml_dir
,
'docker_compose_kafka.yml'
)]
self
.
base_kafka_cmd
=
[
'docker-compose'
,
'--project-name'
,
self
.
project_name
,
'--file'
,
p
.
join
(
docker_compose_yml_dir
,
'docker_compose_kafka.yml'
)]
cmds
.
append
(
self
.
base_kafka_cmd
)
if
with_kerberized_kafka
and
not
self
.
with_kerberized_kafka
:
self
.
with_kerberized_kafka
=
True
self
.
base_cmd
.
extend
([
'--file'
,
p
.
join
(
docker_compose_yml_dir
,
'docker_compose_kerberized_kafka.yml'
)])
self
.
base_kerberized_kafka_cmd
=
[
'docker-compose'
,
'--project-directory'
,
self
.
base_dir
,
'--project-name'
,
self
.
project_name
,
'--file'
,
p
.
join
(
docker_compose_yml_dir
,
'docker_compose_kerberized_kafka.yml'
)]
self
.
base_kerberized_kafka_cmd
=
[
'docker-compose'
,
'--project-name'
,
self
.
project_name
,
'--file'
,
p
.
join
(
docker_compose_yml_dir
,
'docker_compose_kerberized_kafka.yml'
)]
cmds
.
append
(
self
.
base_kerberized_kafka_cmd
)
if
with_rabbitmq
and
not
self
.
with_rabbitmq
:
self
.
with_rabbitmq
=
True
self
.
base_cmd
.
extend
([
'--file'
,
p
.
join
(
docker_compose_yml_dir
,
'docker_compose_rabbitmq.yml'
)])
self
.
base_rabbitmq_cmd
=
[
'docker-compose'
,
'--project-directory'
,
self
.
base_dir
,
'--project-name'
,
self
.
project_name
,
'--file'
,
p
.
join
(
docker_compose_yml_dir
,
'docker_compose_rabbitmq.yml'
)]
self
.
base_rabbitmq_cmd
=
[
'docker-compose'
,
'--project-name'
,
self
.
project_name
,
'--file'
,
p
.
join
(
docker_compose_yml_dir
,
'docker_compose_rabbitmq.yml'
)]
cmds
.
append
(
self
.
base_rabbitmq_cmd
)
if
with_hdfs
and
not
self
.
with_hdfs
:
self
.
with_hdfs
=
True
self
.
base_cmd
.
extend
([
'--file'
,
p
.
join
(
docker_compose_yml_dir
,
'docker_compose_hdfs.yml'
)])
self
.
base_hdfs_cmd
=
[
'docker-compose'
,
'--project-directory'
,
self
.
base_dir
,
'--project-name'
,
self
.
project_name
,
'--file'
,
p
.
join
(
docker_compose_yml_dir
,
'docker_compose_hdfs.yml'
)]
self
.
base_hdfs_cmd
=
[
'docker-compose'
,
'--project-name'
,
self
.
project_name
,
'--file'
,
p
.
join
(
docker_compose_yml_dir
,
'docker_compose_hdfs.yml'
)]
cmds
.
append
(
self
.
base_hdfs_cmd
)
if
with_kerberized_hdfs
and
not
self
.
with_kerberized_hdfs
:
self
.
with_kerberized_hdfs
=
True
self
.
base_cmd
.
extend
([
'--file'
,
p
.
join
(
docker_compose_yml_dir
,
'docker_compose_kerberized_hdfs.yml'
)])
self
.
base_kerberized_hdfs_cmd
=
[
'docker-compose'
,
'--project-directory'
,
self
.
base_dir
,
'--project-name'
,
self
.
project_name
,
'--file'
,
p
.
join
(
docker_compose_yml_dir
,
'docker_compose_kerberized_hdfs.yml'
)]
self
.
base_kerberized_hdfs_cmd
=
[
'docker-compose'
,
'--project-name'
,
self
.
project_name
,
'--file'
,
p
.
join
(
docker_compose_yml_dir
,
'docker_compose_kerberized_hdfs.yml'
)]
cmds
.
append
(
self
.
base_kerberized_hdfs_cmd
)
if
with_mongo
and
not
self
.
with_mongo
:
self
.
with_mongo
=
True
self
.
base_cmd
.
extend
([
'--file'
,
p
.
join
(
docker_compose_yml_dir
,
'docker_compose_mongo.yml'
)])
self
.
base_mongo_cmd
=
[
'docker-compose'
,
'--project-directory'
,
self
.
base_dir
,
'--project-name'
,
self
.
project_name
,
'--file'
,
p
.
join
(
docker_compose_yml_dir
,
'docker_compose_mongo.yml'
)]
self
.
base_mongo_cmd
=
[
'docker-compose'
,
'--project-name'
,
self
.
project_name
,
'--file'
,
p
.
join
(
docker_compose_yml_dir
,
'docker_compose_mongo.yml'
)]
cmds
.
append
(
self
.
base_mongo_cmd
)
if
self
.
with_net_trics
:
...
...
@@ -353,26 +344,25 @@ class ClickHouseCluster:
if
with_redis
and
not
self
.
with_redis
:
self
.
with_redis
=
True
self
.
base_cmd
.
extend
([
'--file'
,
p
.
join
(
docker_compose_yml_dir
,
'docker_compose_redis.yml'
)])
self
.
base_redis_cmd
=
[
'docker-compose'
,
'--project-directory'
,
self
.
base_dir
,
'--project-name'
,
self
.
project_name
,
'--file'
,
p
.
join
(
docker_compose_yml_dir
,
'docker_compose_redis.yml'
)]
self
.
base_redis_cmd
=
[
'docker-compose'
,
'--project-name'
,
self
.
project_name
,
'--file'
,
p
.
join
(
docker_compose_yml_dir
,
'docker_compose_redis.yml'
)]
if
with_minio
and
not
self
.
with_minio
:
self
.
with_minio
=
True
self
.
minio_certs_dir
=
minio_certs_dir
self
.
base_cmd
.
extend
([
'--file'
,
p
.
join
(
docker_compose_yml_dir
,
'docker_compose_minio.yml'
)])
self
.
base_minio_cmd
=
[
'docker-compose'
,
'--project-directory'
,
self
.
base_dir
,
'--project-name'
,
self
.
project_name
,
'--file'
,
p
.
join
(
docker_compose_yml_dir
,
'docker_compose_minio.yml'
)]
self
.
base_minio_cmd
=
[
'docker-compose'
,
'--project-name'
,
self
.
project_name
,
'--file'
,
p
.
join
(
docker_compose_yml_dir
,
'docker_compose_minio.yml'
)]
cmds
.
append
(
self
.
base_minio_cmd
)
if
with_cassandra
and
not
self
.
with_cassandra
:
self
.
with_cassandra
=
True
self
.
base_cmd
.
extend
([
'--file'
,
p
.
join
(
docker_compose_yml_dir
,
'docker_compose_cassandra.yml'
)])
self
.
base_cassandra_cmd
=
[
'docker-compose'
,
'--project-directory'
,
self
.
base_dir
,
'--project-name'
,
self
.
project_name
,
'--file'
,
p
.
join
(
docker_compose_yml_dir
,
'docker_compose_cassandra.yml'
)]
self
.
base_cassandra_cmd
=
[
'docker-compose'
,
'--project-name'
,
self
.
project_name
,
'--file'
,
p
.
join
(
docker_compose_yml_dir
,
'docker_compose_cassandra.yml'
)]
print
(
"Cluster name:{} project_name:{}. Added instance name:{} tag:{} base_cmd:{} docker_compose_yml_dir:{}"
.
format
(
self
.
name
,
self
.
project_name
,
name
,
tag
,
self
.
base_cmd
,
docker_compose_yml_dir
))
return
instance
def
get_instance_docker_id
(
self
,
instance_name
):
...
...
@@ -407,7 +397,10 @@ class ClickHouseCluster:
return
node
def
get_instance_ip
(
self
,
instance_name
):
print
(
"get_instance_ip instance_name={}"
.
format
(
instance_name
))
docker_id
=
self
.
get_instance_docker_id
(
instance_name
)
# for cont in self.docker_client.containers.list():
# print("CONTAINERS LIST: ID={} NAME={} STATUS={}".format(cont.id, cont.name, cont.status))
handle
=
self
.
docker_client
.
containers
.
get
(
docker_id
)
return
list
(
handle
.
attrs
[
'NetworkSettings'
][
'Networks'
].
values
())[
0
][
'IPAddress'
]
...
...
@@ -936,7 +929,7 @@ class ClickHouseInstance:
self
.
with_cassandra
=
with_cassandra
self
.
path
=
p
.
join
(
self
.
cluster
.
instances_dir
,
name
)
self
.
docker_compose_path
=
p
.
join
(
self
.
path
,
'docker
_
compose.yml'
)
self
.
docker_compose_path
=
p
.
join
(
self
.
path
,
'docker
-
compose.yml'
)
self
.
env_variables
=
env_variables
or
{}
if
with_odbc_drivers
:
self
.
odbc_ini_path
=
self
.
path
+
"/odbc.ini:/etc/odbc.ini"
...
...
@@ -1163,14 +1156,14 @@ class ClickHouseInstance:
if
status
==
'exited'
:
raise
Exception
(
"Instance `{}' failed to start. Container status: {}, logs: {}"
.
format
(
self
.
name
,
status
,
handle
.
logs
()))
handle
.
logs
()
.
decode
(
'utf-8'
)
))
current_time
=
time
.
time
()
time_left
=
deadline
-
current_time
if
deadline
is
not
None
and
current_time
>=
deadline
:
raise
Exception
(
"Timed out while waiting for instance `{}' with ip address {} to start. "
"Container status: {}, logs: {}"
.
format
(
self
.
name
,
self
.
ip_address
,
status
,
handle
.
logs
()))
handle
.
logs
()
.
decode
(
'utf-8'
)
))
# Repeatedly poll the instance address until there is something that listens there.
# Usually it means that ClickHouse is ready to accept queries.
...
...
tests/integration/test_storage_kerberized_kafka/test.py
浏览文件 @
5afcc927
...
...
@@ -105,6 +105,8 @@ def test_kafka_json_as_string(kafka_cluster):
kafka_flush_interval_ms=1000;
'''
)
time
.
sleep
(
3
)
result
=
instance
.
query
(
'SELECT * FROM test.kafka;'
)
expected
=
'''
\
{"t": 123, "e": {"x": "woof"} }
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录