Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
milvus
milvus
提交
96203b2e
M
milvus
项目概览
milvus
/
milvus
11 个月 前同步成功
通知
261
Star
22476
Fork
2472
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
DevOps
流水线
流水线任务
计划
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
M
milvus
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
DevOps
DevOps
流水线
流水线任务
计划
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
流水线任务
提交
Issue看板
体验新版 GitCode,发现更多精彩内容 >>
提交
96203b2e
编写于
5月 25, 2020
作者:
D
del-zhenwu
提交者:
JinHai-CN
6月 10, 2020
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
update ci cron on 0.10.0 (#2428)
Signed-off-by:
N
zw
<
zw@milvus.io
>
Co-authored-by:
N
zw
<
zw@milvus.io
>
上级
0ea259c3
变更
2
隐藏空白更改
内联
并排
Showing
2 changed file
with
58 addition
and
1 deletion
+58
-1
tests/milvus_python_test/test_partition.py
tests/milvus_python_test/test_partition.py
+1
-1
tests/milvus_python_test/test_restart.py
tests/milvus_python_test/test_restart.py
+57
-0
未找到文件。
tests/milvus_python_test/test_partition.py
浏览文件 @
96203b2e
...
...
@@ -33,7 +33,7 @@ class TestCreateBase:
status
=
connect
.
create_partition
(
collection
,
tag
)
assert
status
.
OK
()
@
pytest
.
mark
.
level
(
2
)
@
pytest
.
mark
.
level
(
3
)
@
pytest
.
mark
.
timeout
(
TIMEOUT
)
def
test_create_partition_limit
(
self
,
connect
,
collection
):
'''
...
...
tests/milvus_python_test/test_restart.py
浏览文件 @
96203b2e
...
...
@@ -65,3 +65,60 @@ class TestRestartBase:
logging
.
getLogger
().
info
(
res
)
assert
status
.
OK
()
assert
res
==
nq
@
pytest
.
mark
.
level
(
2
)
def
test_during_creating_index_restart
(
self
,
connect
,
collection
,
args
,
get_simple_index
):
'''
target: return the same row count after server restart
method: call function: insert, flush, and create index, server do restart during creating index
expected: row count, vector-id, index info keep the same
'''
# reset auto_flush_interval
# auto_flush_interval = 100
get_ids_length
=
500
index_param
=
get_simple_index
[
"index_param"
]
index_type
=
get_simple_index
[
"index_type"
]
# status, res_set = connect.set_config("db_config", "auto_flush_interval", auto_flush_interval)
# assert status.OK()
# status, res_get = connect.get_config("db_config", "auto_flush_interval")
# assert status.OK()
# assert res_get == str(auto_flush_interval)
# insert and create index
vectors
=
gen_vectors
(
big_nb
,
dim
)
status
,
ids
=
connect
.
insert
(
collection
,
vectors
,
ids
=
[
i
for
i
in
range
(
big_nb
)])
status
=
connect
.
flush
([
collection
])
assert
status
.
OK
()
status
,
res_count
=
connect
.
count_entities
(
collection
)
logging
.
getLogger
().
info
(
res_count
)
assert
status
.
OK
()
assert
res_count
==
big_nb
def
create_index
():
milvus
=
get_milvus
(
args
[
"ip"
],
args
[
"port"
],
handler
=
args
[
"handler"
])
status
=
milvus
.
create_index
(
collection
,
index_type
,
index_param
)
logging
.
getLogger
().
info
(
status
)
assert
status
.
OK
()
p
=
Process
(
target
=
create_index
,
args
=
(
collection
,
))
p
.
start
()
# restart server
if
restart_server
(
args
[
"service_name"
]):
logging
.
getLogger
().
info
(
"Restart success"
)
else
:
logging
.
getLogger
().
info
(
"Restart failed"
)
# check row count, index_type, vertor-id after server restart
new_connect
=
get_milvus
(
args
[
"ip"
],
args
[
"port"
],
handler
=
args
[
"handler"
])
status
,
res_count
=
new_connect
.
count_entities
(
collection
)
assert
status
.
OK
()
assert
res_count
==
big_nb
status
,
res_info
=
connect
.
get_index_info
(
collection
)
logging
.
getLogger
().
info
(
res_info
)
assert
res_info
.
_params
==
index_param
assert
res_info
.
_collection_name
==
collection
assert
res_info
.
_index_type
==
index_type
get_ids
=
random
.
sample
(
ids
,
get_ids_length
)
status
,
res
=
connect
.
get_entity_by_id
(
collection
,
get_ids
)
assert
status
.
OK
()
for
index
,
item_id
in
enumerate
(
get_ids
):
logging
.
getLogger
().
info
(
index
)
assert_equal_vector
(
res
[
index
],
vectors
[
item_id
])
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录