提交 619a8007 编写于 作者: X xj.lin

MS-143 merge


Former-commit-id: b8eeda3dcb955a065f4adc933d53c16a10b6f04a
milvus/
conf/server_config.yaml
conf/log_config.conf
version.h
megasearch/
lcov_out/
base.info
output.info
output_new.info
server.info
\ No newline at end of file
......@@ -2,7 +2,20 @@
Please mark all change in change log and use the ticket from JIRA.
# Milvus 0.3.0 (TBD)
# Milvus 0.3.1 (2019-07-10)
## Bug
## Improvement
## New Feature
## Task
- MS-125 - Create 0.3.1 release branch
# Milvus 0.3.0 (2019-06-30)
## Bug
- MS-104 - Fix unittest lcov execution error
......@@ -11,6 +24,7 @@ Please mark all change in change log and use the ticket from JIRA.
- MS-89 - Fix compile failed, libgpufaiss.a link missing
- MS-90 - Fix arch match incorrect on ARM
- MS-99 - Fix compilation bug
- MS-110 - Avoid huge file size
## Improvement
- MS-82 - Update server startup welcome message
......@@ -19,6 +33,11 @@ Please mark all change in change log and use the ticket from JIRA.
- MS-22 - Enhancement for MemVector size control
- MS-92 - Unify behavior of debug and release build
- MS-98 - Install all unit test to installation directory
- MS-115 - Change is_startup of metric_config switch from true to on
- MS-122 - Archive criteria config
- MS-124 - HasTable interface
- MS-126 - Add more error code
- MS-128 - Change default db path
## New Feature
......@@ -40,6 +59,9 @@ Please mark all change in change log and use the ticket from JIRA.
- MS-85 - add NetIO metric
- MS-96 - add new query interface for specified files
- MS-97 - Add S3 SDK for MinIO Storage
- MS-105 - Add MySQL
- MS-130 - Add prometheus_test
- MS-143 - Intergrate Knowhere but not activate
## Task
- MS-74 - Change README.md in cpp
......
......@@ -52,7 +52,7 @@ if(MILVUS_VERSION_MAJOR STREQUAL ""
OR MILVUS_VERSION_MINOR STREQUAL ""
OR MILVUS_VERSION_PATCH STREQUAL "")
message(WARNING "Failed to determine Milvus version from git branch name")
set(MILVUS_VERSION "0.3.0")
set(MILVUS_VERSION "0.3.1")
endif()
message(STATUS "Build version = ${MILVUS_VERSION}")
......@@ -113,20 +113,13 @@ link_directories(${MILVUS_BINARY_DIR})
set(MILVUS_ENGINE_INCLUDE ${PROJECT_SOURCE_DIR}/include)
set(MILVUS_ENGINE_SRC ${PROJECT_SOURCE_DIR}/src)
#set(MILVUS_THIRD_PARTY ${CMAKE_CURRENT_SOURCE_DIR}/third_party)
#set(MILVUS_THIRD_PARTY_BUILD ${CMAKE_CURRENT_SOURCE_DIR}/third_party/build)
add_compile_definitions(PROFILER=${PROFILER})
include_directories(${MILVUS_ENGINE_INCLUDE})
include_directories(${MILVUS_ENGINE_SRC})
#include_directories(${MILVUS_THIRD_PARTY_BUILD}/include)
link_directories(${CMAKE_CURRRENT_BINARY_DIR})
#link_directories(${MILVUS_THIRD_PARTY_BUILD}/lib)
#link_directories(${MILVUS_THIRD_PARTY_BUILD}/lib64)
#execute_process(COMMAND bash build.sh
# WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/third_party)
add_subdirectory(src)
......@@ -135,12 +128,19 @@ if (BUILD_COVERAGE STREQUAL "ON")
endif()
if (BUILD_UNIT_TEST)
if (BUILD_UNIT_TEST STREQUAL "ON")
add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/unittest)
endif(BUILD_UNIT_TEST)
endif()
add_custom_target(Clean-All COMMAND ${CMAKE_BUILD_TOOL} clean)
if("${MILVUS_DB_PATH}" STREQUAL "")
set(MILVUS_DB_PATH "/tmp/milvus")
endif()
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/conf/server_config.template ${CMAKE_CURRENT_SOURCE_DIR}/conf/server_config.yaml)
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/conf/log_config.template ${CMAKE_CURRENT_SOURCE_DIR}/conf/log_config.conf)
#install
install(FILES
scripts/start_server.sh
......@@ -152,5 +152,12 @@ install(FILES
conf/log_config.conf
DESTINATION
conf)
install(FILES
./Milvus-EULA-cn.md
./Milvus-EULA-en.md
DESTINATION
license
)
config_summary()
# **Milvus**终端用户授权许可条款及条件
#### 2019-06-30 版
本条款和条件(下称“本协议”)适用于使用由上海赜睿信息科技有限公司(下称“**ZILLIZ**”)所提供的Milvus产品(参见如下定义) 的用户。
**请仔细阅读如下条款:**
**若您(下称“您”或“用户”)代表某公司或者其他机构使用任何产品时, 您特此陈述您作为该公司或该等其他机构的员工或代理,您有权代表该公司或该等其他机构接受本协议项下所要求的全部条款和条件。**
**若使用任何产品,您知晓并同意:**
**(A)您已阅读本协议中所有的条款和条件;**
**(B)您已理解本协议中所有的条款和条件;**
**(C)您已同意本协议中所有条款和条件对您具有法律约束力。**
**如果您不同意本协议所述条款和条件中的任意内容,则可以选择不使用产品的任何部分。**
**本协议的“生效日期”是指您第一次下载任何产品的日期。**
1. **产品**,指本协议项下任何 **ZILLIZ** 的Milvus产品和软件,包括: Milvus向量检索数据库Docker版与其相关的升级、更新、故障修复或修改版本(统称“更新软件”)。无论本协议是否另有规定:
(a)仅Milvus向量检索数据库Docker版是免费授权用户的版本,且ZILLIZ保留收回该授权的权力;
(b)任何使用或者试用Milvus向量检索数据库Docker版的个人与组织,需要通过support@zilliz.com向ZILLIZ告知个人或者组织的身份、联系方式以及使用Milvus的目的。
(c)制作和使用额外的副本仅限于必要的备份目的。
2. **全部协议**,本协议包括本授权许可条款及条件以及任何[Milvus官方网站](https://milvus.io)展示或者网页链接所附或引用的全部条款。本协议是双方就相关事项达成的完整协议,取代 **ZILLIZ** 与用户之间就本条款相关事项所达成的其他任何协议,无论是口头的还是书面的。
3. **使用许可****ZILLIZ** 授予用户非排他性的、不可转让的、非可再授权的、可撤回的和有限的许可进行访问和使用第1条所定义的产品,该访问和使用许可仅限于用户内部使用之目的。通过电子下载或其他经许可的来源获得产品的用户均应受限于本协议的内容。
4. **许可限制**,除非本协议另有明文规定,否则用户将不被允许:
(a)修改、翻译或制造产品的衍生作品;
(b)反向编译、反向工程、破解产品的任何部分或试图发现有关产品的任何源代码、基本理念或运算方法; (c)销售、分派、再授权、出租、出借、出质、提供或另行翻译全部或部分产品;
(d)制造、获取非法制造的、再版或复制产品;
(e)删除或更改与产品相关联的任何商标、标志、版权或其他专有标 ;
(f)不得在没有 **ZILLIZ** 明确书面授权的情况下,使用或许可他人使用产品为第三方提供服务,无论是在产品服务过程中使用或采用分时的方式;
(g)引起或许可任何其他方进行上述任何一种禁止行为。
5. **所有权****ZILLIZ** 和用户在本协议项下的许可需明确,**ZILLIZ** 有以下各项的全部权利、所有权和相关利益:(a)产品(包括但不限于,任何更新软件、修订版本或其衍生作品);
(b)在 **ZILLIZ** 根据本协议提供任何服务的过程中或作为其提供服务的结果,由 **ZILLIZ** 发现、 产生或发展出来的所有的概念、发明、发现、改进、信息、创意作品等;
(c)前述各项所含的任何知识产权权利。在本协议项下,“知识产权”是指在任何管辖区域经申请和注册获得认可和保护的全部专利、版权、道德权利、商标、商业秘密和任何其他形式的权利。**ZILLIZ** 与用户同意,在受限于法律法规规定及本协议全部条款和条件的前提下,用户拥有使用产品而产生的数据的权利、所有权等相关利益。本协议中无任何默示许可,**ZILLIZ** 保留本协议项下未明确授权的全部权利。除非本协议明确约定,**ZILLIZ** 在本协议下未授予用户任何许可权利,无论是通过暗示、默许或其他方式。
6. **保密**,保密信息是指,无论是在本协议生效前或生效后,由 **ZILLIZ** 披露给用户的与本协议或与 **ZILLIZ** 相关的所有信息(无论是以口头、书面或其他有形、无形的形式)。保密信息包括但不限于,商业计划的内容、产品、发明、设计图纸、财务计划、计算机程序、发明、用户信息、战略和其他类似信息。在本协议期限内,除非获得明确许可, 用户需保证保密信息的秘密性,并确保不会使用上述保密信息。用户将采用与保护其自身保密信息的同等谨慎程度(不论在何种情况下均不低于合理的谨慎程度)来保护 **ZILLIZ** 的保密信息,来避免使得保密信息被未经授权的使用和披露。保密信息只供用户根据本协议规定使用产品之目的而使用。此外,用户将:
(a)除非用户为了根据本协议的规定而使用产品之目的外,不得以任何形式复制、使用或披露保密信息; (b)只向为确保用户可根据本协议使用产品而必需知道该保密信息的员工和顾问披露保密信息,前提是上述员工和顾问已签署了包含保密义务不低于本条所述内容的保密协议。
保密信息不包括下列信息:
(a) 非因用户过错违反本协议导致已进入公共领域可被第三方获取的;
(b) 用户能合理证明其在通过 **ZILLIZ** 获得之前已知晓的;
(c)用户能证明没有使用或参考该保密信息而独立获得的;
(d)用户从其他无披露限制或无保密义务的第三方获得的。如无另行说明,由用户提供给 **ZILLIZ** 有关产品的任何建议、评论或者其他反馈(统称“反馈信息”)将同样构成保密信息。
此外,**ZILLIZ** 有权使用、披露、复制、许可和利用上述反馈信息,而无需承担任何知识产权负担或其他任何形式的义务或限制。根据相关法律法规,与本协议的履行和用户使用 **ZILLIZ** 产品相关的情况下:
(a)**ZILLIZ** 同意不会要求用户提供任何个人身份信息;
(b)用户同意不提供任何个人身份信息给 **ZILLIZ**
7. **免责声明**,用户陈述、保证及承诺如下:
(a)其所有员工和顾问都将遵守本协议的全部条款;
(b)在履行本协议时将遵守全部可适用的政府部门颁发的法律、法规、规章、命令和其他要求(无论是现行有效还是之后生效的)。
无论本协议是否另有规定,用户将持续对其雇员或顾问的全部作为或不作为承担责任,如同该等作为或不作为系其自身所为。
产品系按照原状或现状提供给用户,不含任何形式的陈述、保证、 承诺或条件。**ZILLIZ** 及其供应商不保证任何产品将无任何故障、错误或漏洞。**ZILLIZ** 和其供应商不为产品的如下内容提供任何陈述和保证(无论是明示或暗示,口头或书面),不论该内容是否依据法律之规定,行业惯例,交易习惯或其他原因而要求的:
(a)保证适销性;
(b)保证可适用于任何目的(不论 **ZILLIZ** 是否知晓、应当知晓、被建议或另行得知该目的);
(c)保证不侵权和拥有全部所有权。用户已明确知悉并同意产品上无任何陈述和保证。此外,鉴于进行入侵和网络攻击的新技术在不断发展,**ZILLIZ** 并不保证产品或产品所使用的系统或网络将免于任何入侵或攻击。
8. **损害赔偿**,用户应赔偿、保护或使得 **ZILLIZ** 及其董事、高管、 雇员、供应商、顾问、承包商和代理商(统称为“**ZILLIZ **受保障方”)免受所有现存或潜在的针对 **ZILLIZ** 受保障方因提起请求、诉讼或其他程序而引起的要求其赔偿损害损失、支付费用、罚款、调解、 损失费用等支出(包括但不限于律师费、费用、罚款、利息和垫付款),用户承担上述责任的前提是该请求、诉讼或其他程序,不论是否成功系在如下情况发生时导致、引起的,或以任何形式与下述情况相关:
(a)任何对本协议的违反(包括但不限于,任何违反用户陈述和保证或约定的情况);
(b)用户过失或故意产生的过错行为;
(c)引起争议的数据和信息系在产品的使用过程中产生或收集的。
9. **责任限制**,除了 **ZILLIZ** 存在欺诈或故意的过错行为,在任何情况下:
(a)**ZILLIZ** 都不会赔偿用户或任何第三方的因本协议或产品(包括用户使用或无法使用产品的情况)而遭受的任何利润损失、数 据损失、使用损失、收入损失、商誉损失、任何经营活动的中断,任何其他商业损害或损失,或任何间接的、特殊的、附带的、惩戒性、惩罚性或伴随的损失,不论上述损失系因合同、侵权、严格责任或其他原因而确认的,即使 **ZILLIZ** 已被通知或因其他可能的渠道知晓上述损失发生的可能性;
(b)**ZILLIZ** 因本协议所需承担的全部赔偿责任不应超过用户已支付或将支付给 **ZILLIZ** 的全部款项总额(若有),多项请求亦不得超过该金额限制。上述限制、排除情况及声明应在相关法律允许的最大范围内得以适用,即便任何补偿无法达到其实质目的。
10. **第三方供应商**,产品可能包括由第三方供应商许可提供的软件或其他代码(下称“第三方软件”)。用户已知悉第三方供应商不对产品或其任何部分提供任何陈述和保证,**ZILLIZ** 不承担因产品或用户对第三方软件的使用或不能使用的情况而产生的任何责任。
11. **诊断和报告**,用户了解并同意该产品包含诊断功能作为其默认配置。 诊断功能用于收集有关使用环境和产品使用过程中的配置文件、节点数、 软件版本、日志文档和其他信息,并将上述信息报告给 **ZILLIZ** 用于提前识别潜在的支持问题、了解用户的使用环境、并提高产品的使用性能。虽然用户可以选择更改诊断功能来禁用自动定时报告或仅用于报告服务记录,但用户需同意,每季度须至少运行一次诊断功能并将结果报告给**ZILLIZ**
12. **终止**,本协议期限从生效之日起直到 **ZILLIZ** 网站规定的期限终止,除非本协议因用户违反本协议中条款而提前终止。无论本协议是否另有规定,在用户存在违反第3、4、5或7条时,**ZILLIZ**有权立即终止本协议。本协议期满或提前终止时:
(a)根据本协议所授予给用户的所有权利将立即终止,在此情况下用户应立即停止使用产品;
(b) 用户应及时将届时仍由其占有的所有保密信息及其副本(包括但不限于产品)交还给 **ZILLIZ**,或根据 **ZILLIZ** 的自行审慎决定及指示, 销毁该等保密信息全部副本,未经 **ZILLIZ** 书面同意,用户不得擅自保留任何由 **ZILLIZ** 提供的保密信息及其副本。
13. **第三方资源****ZILLIZ** 供应的产品可能包括对其他网站、内容或资源的超链接(下称“第三方资源”),且 **ZILLIZ** 此类产品的正常使用可能依赖于第三方资源的可用性。**ZILLIZ** 无法控制任何第三方资源。用户承认并同意,**ZILLIZ** 不就第三方资源的可用性及安全性承担任何责任,也不对该等第三方资源所涉及的或从其中获得的任何广告、产品或其他材料提供保证。用户承认并同意,**ZILLIZ** 不应因第三方资源的可用性及安全性、或用户依赖于第三方资源所涉及的或从其中获得的任何广告、产品或其他材料的完整性、准确性及存续而可能遭受的损失或损害承担任何责任。
14. **其他**,本协议全部内容均在中华人民共和国境内履行,受中华人民共和国法律管辖并根据其解释(但不适用相关冲突法的法律条款)。用 **ZILLIZ** 同意与本协议有关的任何争议将向上海市徐汇区人民法院提出,且不可撤销无条件的同意上述法院对因本协议提起的全部诉讼、争议拥有排他的管辖权。一旦确定任何条款无效、非法或无法执行, **ZILLIZ** 保留修改和解释该条款的权利。任何需要发送给用户的通知如公布在 **ZILLIZ** 的网站上则被视为已有效、合法地发送给用户。除了本合同项下应支付款项的义务外,任何一方将不对因不可抗力而导致的无法合理控制的全部或部分未能履行或延迟履行本协议的行为负责, 不可抗力包括但不限于火灾、暴风雨、洪水、地震、内乱、电信中断、 电力中断或其他基础设施的中断、**ZILLIZ** 使用的服务提供商存在问题导致服务中断或终止、罢工、故意毁坏事件、电缆被切断、病毒入侵或其他任意第三方故意或非法的行为引起的其他类似事件。在上述迟延履行情况出现时,可延迟履行协议的时间为因上述原因引起的延迟时间。 本协议另有明确规定外,本协议所要求或认可的通知或通讯均需以书面形式经一方有权代表签署或授权并以直接呈递、隔夜快递,经确认的电子邮件发送,经确认的传真或邮寄挂号信、挂号邮件保留回单等方式送达。对本协议的任何修改、补充或删除或权利放弃,必须通过书面由双方适当授权的代表签署确认后方为有效。任何一方对任何权利或救济的不履行或迟延履行(部分或全部)不构成对该等权利或救济的放弃,也不影响任何其他权利或救济。本协议项下的所有权利主张和救济均可为累积的且不排除本协议中包含的或法律所规定的其他任何权利或救济。 对本协议中任何一项违约责任的豁免或延迟行使任何权利,并不构成对其他后续违约责任的豁免。
\ No newline at end of file
# ZILLIZ End-User License Agreement
#### Last updated: 2019-06-30
This End-user License Agreement ("Agreement") is applicable to all users who uses Milvus provided by ZILLIZ company.
**Please read this agreement carefully before clicking the I Agree button, downloading or using this Application.**
**If you ("You" or "User") use any product on behalf of a company or other organization, you hereby state that you are an employee or agent of the company or such other institution, and you have the right to represent the company or such institutions to accept all the terms and conditions required under this Agreement. **
**If you use any product, you acknowledge and agree:**
**(A) You have read all the terms and conditions in the Agreement;**
**(B) You have understand all the terms and conditions in the Agreement;**
**(C) You have agreed that all the terms and conditions of this Agreement are legally binding on you.**
**If you do not agree to any of the terms and conditions set forth in this Agreement, you may choose not to use any part of the product.**
**This agreement takes effect immediately the first time you download the application**.
1. **Product**. In this Agreement, it refers to Milvus and other related software products of **ZILLIZ**, including Milvus vector indexing database and its updates, higher versions, maintenance or patch releases ("Updated Software").
(a) Only the Docker version of Milvus vector indexing database is granted free to the User. **ZILLIZ** retains the right to revoke this grant;
(b) Any person or organization that intend to use or try the Docker version of Milvus vector indexing database need to inform **ZILLIZ** of the personal identity, contact information and purposes of using the Product by sending an email to: support@zilliz.com;
(c)Making or using additional copy of the Product is only restricted to necessary copy purposes.
2. **Related Agreements**. The Related Agreements includes this Agreement and all other related terms and conditions that appear in [Milvus official website](https://milvus.io). This Agreement is the entire and final agreement that replaces all other terms agreed between the User and **ZILLIZ** about issues listed here, oral or written.
3. **License Grant**. **ZILLIZ** grant You a revocable, non-exclusive, non-transferable limited right to install and use the Application defined above for your personal, non-commercial purposes. The User who uses the Application through downloading and other permitted channels are also subject to this Agreement;
4. **Restrictions on Use.** You shall use the Application in accordance with the terms in the Agreement, and shall not:
(a)Make any modification, translation or derivative work from the Application;
(b)Decompile, reverse engineer, disassemble, attempt to derive the source code or algorithm of the Application;
(c)Sell, distribute, license re-granting or provide translation of the whole or part of the Application;
(d)Use the Application for creating a product, service or software.
(e)Remove, alter or obscure any proprietary notice, trademark, or copyright of the Company and Application;
(f)Install or use the Application to provide service to third-party partners, without acquiring formal grant of **ZILLIZ** ;
(g)Perform or permit any behaviors that might lead to one of the above prohibited actions.
5. **Ownership**. **ZILLIZ** enjoys the ownership of the following:
(a)Products (includes but is not restricted to any updated software, patch releases, or derivative products);
(b)All concepts, innovations, discoveries, improvements, information, or creative products developed and discovered by **ZILLIZ** as a result of or arising out of the service providing process;
(c)Intellectual property rights of the above mentioned products and innovations. In this Agreement, "Intellectual Property" refers to trademarks, patents, designations of origin, industrial designs and models and copyright. **ZILLIZ** and the User agree that the User enjoy all the rights to use data produced by using the Product, while **ZILLIZ** keeps all other rights not explicitly stated in the Agreement. Unless otherwise stated, **ZILLIZ** has not granted any additional rights to Users, either implied, acquiesced or in other ways.
6. **Non-disclosure**. Confidential Information refers to any and all information revealed to the User by **ZILLIZ**, either oral or written, tangible or intangible, before or after the Agreement takes effect. Confidential information includes but is not restricted to business plans and strategies, product, innovations, design papers, financial plans, computer programs, User information, etc. Within the term of this Agreement, unless granted definite permission, the User shall hold and maintain the Confidential Information in strictest confidence for the sole and exclusive benefit of **ZILLIZ** and using the Product. In addition:
(a)You shall not copy, use or disclose Confidential Information for purposes other than using the Product agreed in this Agreement;
(b)You shall carefully restrict access to Confidential Information to employees, contractors, and third parties as is reasonably required and shall require those persons to sign nondisclosure restrictions at least as protective as those in this Agreement.
Confidential Information does not include:
(a)Information that can be obtained by third-parties not due to User's violation of the Agreement;
(b)Information that can be proven to be provided to Users not by **ZILLIZ** ;
(c) Information that are obtained with no reference to Confidential Information;
(d)Information the User gets from third-parties that are not subject to non-disclosure agreement. Unless otherwise stated, any comments, suggestions or other feedback ("Feedback Information") about the Product by the User to **ZILLIZ** will also be counted as Confidential Information.
Furthermore, **ZILLIZ** has the right to use, disclose, copy or use above Feedback Information, and bearing no intellectual property burden or restrictions. According to related laws and regulations, during the fulfillment of this Agreement:
(a)**ZILLIZ** agree not to require the User to provide any information regarding personal identities;
(b)The User agree not to provide **ZILLIZ** with any personal information.
7. **Disclaimer of Warranties**. You acknowledge, agree and promise that:
(a) All employees and consultants will obey all terms in the Agreement;
(b)Application of the Agreement is subject to all laws, terms, acts, commands and other requirements issued by the government (no matter these laws are in effect now or will be effective in the future).
The User shall be held responsible for all the behaviors in relation to the Application.
The Application is provided on an "As is" or "As available" basis, and that You use or reliance on the Application is at your sole risk and discretion. **ZILLIZ** and its partners make no warranty that the Application will meet all Your requirements and expectations.
**ZILLIZ** and its suppliers hereby disclaim any and all representations, warranties and guaranties regarding the Application, whether expressed, implied or statutory:
(a)The implied warranty of merchantability;
(b)Fitness for a particular purpose;
(c)Non-infringement.
Further more, considering the continuous advancement of Internet hacking and attaching technologies, **ZILLIZ** make no guarantee that the Application or the systems and Internet it uses will be exempt from any hack or attack.
8. **Damages and Penalties**. The User shall pay, protect or prevent **ZILLIZ** and its board members, executives, employees, consultants or representative agencies (**ZILLIZ** Protected Party) from any existing or potential damage loss, fees, penalties and other outgoing payments (include but are not limited to lawyer fees, fines, interests and advance payment) arising out of legal request, litigation or other processes. The prerequisite condition of the above obligations are that the legal request, litigation or process are caused by any of the following situations:
(a)Any violation of the Agreement;
(b)User fault or deliberate behavior;
(c)Controversial data is produced or collected during the usage of the Product.
9. **Limitation of Liability**. Unless due to deliberate fraud or error from **ZILLIZ**, below terms are applicable:
(a)Under no circumstances shall **ZILLIZ** be held liable for any profit loss, data loss, revenue loss, termination of operations, any indirect, special, exemplary or consequential damages arising out or in connection with Your access or use of the Application;
(b)Without limiting the generality of the foregoing, **ZILLIZ**'s aggregate liability to You shall not exceed the total amount of money You already paid or will pay to **ZILLIZ** (if any).
10. **Third-party Suppliers**. The User acknowledge that no statement and guarantee should be expected from Third-party Suppliers about the Product or its components. **ZILLIZ** hold no obligations to the Users' usage of the softwares provided by third-party Suppliers.
11. **Diagnosis and Report**. The User know and agree that Diagnosis is part of the configuration of the Product. Diagnosis is used to collect the configuration files, node numbers, software version, logs and related information, and send a Report to **ZILLIZ** to recognize potential support problems, get to know User environment, and to enhance product features. Although You can choose to turn off the Diagnosis function of automatic report sending, however, You shall run the Diagnosis at least once every quarter and send the Report to **ZILLIZ**.
12. **Termination of Licensing**. This Agreement is valid from the day it takes effect to the termination dated defined in **ZILLIZ** website, unless the User has disobeyed the terms and caused the Agreement to end in advance. Whether or not listed, if the User has violated terms in Clause 3, 4, 5 or 7, **ZILLIZ** may, in its sole and absolute discretion, terminate this License and the rights afforded to You. Upon the expiration or termination of the License:
(a)All rights afforded to the User based upon this Agreement will be terminated. You shall ease use of the Product and uninstall related software;
(b)The User shall return all confidential information and the copy (includes but not restricted to Product) back to **ZILLIZ**, or destroy all copy of confidential information on permission of **ZILLIZ**. Without the written approval of **ZILLIZ**, the User is not allowed to keep any confidential information or its copy provided by **ZILLIZ**.
13. **Third-party Resources**. Products supplied by **ZILLIZ** may include hyperlinks to other websites, content or resources ("Third Party Resources"), and the normal use of such products may depend on the availability of third party resources. **ZILLIZ** is unable to control any third-party resources. The User acknowledges and agrees that **ZILLIZ** is not responsible for the availability and security of third-party resources and does not guarantee any advertising, products or other materials that are or are derived from such third party resources. The User acknowledges and agrees that **ZILLIZ** shall not hold obligations about any liability for loss or damage that may be suffered due to the availability and security of third party resources, or the integrity or accuracy of any advertisements, products or other materials that the User relies on or obtains from third party resources.
14. **Other**. The entire contents of this Agreement are performed within the territory of the People's Republic of China and are governed by and construed in accordance with the laws of the People's Republic of China (but not applicable to the relevant conflict laws). **ZILLIZ** agrees that any disputes relating to this Agreement will be submitted to the Xuhui District People's Court of Shanghai, and irrevocably and unconditionally agree that the above courts have exclusive jurisdiction over all litigations and disputes brought about by this Agreement. Once it is determined that any provision is invalid, illegal or unenforceable, **ZILLIZ** reserves the right to modify and interpret the terms. Any notice that needs to be sent to the user, if posted on the **ZILLIZ** website, is deemed to have been validly and legally sent to the user. Except for the obligation to pay under this contract, neither party will be liable for failure to perform or delayed performance of this Agreement in whole or in part due to force majeure. The force majeure includes but is not limited to fire, storm, flood , earthquake, civil strife, telecommunications disruption, power outage or other infrastructure disruption, service interruption or termination caused by **ZILLIZ** service provider problems, strikes, intentional destruction events, cable cuts, virus intrusion or any other similar incidents caused by intentional or illegal acts by third parties. In the case of the above-mentioned delayed performance, the delay in fulfilling the agreement may be the delay time due to the above reasons. Unless otherwise stated in this Agreement, notices or communications required or endorsed by this Agreement must be signed or authorized in writing by a party, and delivered by direct delivery, overnight delivery, confirmed email, confirmed fax or by mailing a registered letter, registered mail, and returning the order, etc. Any modification, addition or deletion or waiver of this Agreement must be confirmed by a written confirmation by a suitably authorized representative of both parties. The non-performance or delay in the performance of any right or remedy by any party (partially or wholly) does not constitute a waiver of such rights or remedies, nor does it affect any other rights or remedies. All claims and remedies under this Agreement may be cumulative and do not exclude any other rights or remedies contained in this Agreement or as required by law. Exemption from the waiver or delay of any liability for breach of contract in this Agreement does not constitute an exemption from other subsequent breach of contract obligations.
\ No newline at end of file
### Compilation
#### Step 1: install necessery tools
Install MySQL
centos7 :
yum install gfortran flex bison
yum install gfortran qt4 flex bison mysql-devel
ubuntu16.04 :
sudo apt-get install gfortran flex bison
sudo apt-get install gfortran qt4-qmake flex bison libmysqlclient-dev
If `libmysqlclient_r.so` does not exist after installing MySQL Development Files, you need to create a symbolic link:
```
sudo ln -s /path/to/libmysqlclient.so /path/to/libmysqlclient_r.so
```
#### Step 2: build(output to cmake_build folder)
cmake_build/src/milvus_server is the server
cmake_build/src/libmilvus_engine.a is the static library
......@@ -39,9 +48,20 @@ If you encounter the following error when building:
or
./build.sh --unittest
#### To run code coverage
apt-get install lcov
./build.sh -u -c
### Launch server
Set config in cpp/conf/server_config.yaml
Add milvus/bin/lib to LD_LIBRARY_PATH
```
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/path/to/milvus/bin/lib
```
Then launch server with config:
cd [build output path]
start_server.sh
......
#!/bin/bash
BUILD_TYPE="Debug"
BUILD_UNITTEST="off"
BUILD_UNITTEST="OFF"
LICENSE_CHECK="OFF"
INSTALL_PREFIX=$(pwd)/milvus
MAKE_CLEAN="OFF"
BUILD_COVERAGE="OFF"
DB_PATH="/opt/milvus"
while getopts "p:t:uhlrc" arg
while getopts "p:d:t:uhlrc" arg
do
case $arg in
t)
......@@ -15,11 +16,14 @@ do
;;
u)
echo "Build and run unittest cases" ;
BUILD_UNITTEST="on";
BUILD_UNITTEST="ON";
;;
p)
INSTALL_PREFIX=$OPTARG
;;
d)
DB_PATH=$OPTARG
;;
l)
LICENSE_CHECK="ON"
;;
......@@ -36,12 +40,13 @@ do
echo "
parameter:
-t: build type
-u: building unit test options
-p: install prefix
-l: build license version
-r: remove previous build directory
-c: code coverage
-t: build type(default: Debug)
-u: building unit test options(default: OFF)
-p: install prefix(default: $(pwd)/milvus)
-d: db path(default: /opt/milvus)
-l: build license version(default: OFF)
-r: remove previous build directory(default: OFF)
-c: code coverage(default: OFF)
usage:
./build.sh -t \${BUILD_TYPE} [-u] [-h] [-g] [-r] [-c]
......@@ -71,6 +76,7 @@ if [[ ${MAKE_CLEAN} == "ON" ]]; then
-DCMAKE_CUDA_COMPILER=${CUDA_COMPILER} \
-DCMAKE_LICENSE_CHECK=${LICENSE_CHECK} \
-DBUILD_COVERAGE=${BUILD_COVERAGE} \
-DMILVUS_DB_PATH=${DB_PATH} \
$@ ../"
echo ${CMAKE_CMD}
......
......@@ -93,6 +93,8 @@ define_option(MILVUS_WITH_SQLITE "Build with SQLite library" ON)
define_option(MILVUS_WITH_SQLITE_ORM "Build with SQLite ORM library" ON)
define_option(MILVUS_WITH_MYSQLPP "Build with MySQL++" ON)
define_option(MILVUS_WITH_THRIFT "Build with Apache Thrift library" ON)
define_option(MILVUS_WITH_YAMLCPP "Build with yaml-cpp library" ON)
......
......@@ -27,6 +27,7 @@ set(MILVUS_THIRDPARTY_DEPENDENCIES
JSONCONS
LAPACK
Lz4
MySQLPP
OpenBLAS
Prometheus
RocksDB
......@@ -57,14 +58,16 @@ macro(build_dependency DEPENDENCY_NAME)
build_easyloggingpp()
elseif("${DEPENDENCY_NAME}" STREQUAL "FAISS")
build_faiss()
elseif ("${DEPENDENCY_NAME}" STREQUAL "GTest")
build_gtest()
elseif("${DEPENDENCY_NAME}" STREQUAL "LAPACK")
build_lapack()
elseif("${DEPENDENCY_NAME}" STREQUAL "Knowhere")
build_knowhere()
elseif("${DEPENDENCY_NAME}" STREQUAL "Lz4")
build_lz4()
elseif ("${DEPENDENCY_NAME}" STREQUAL "GTest")
build_gtest()
elseif ("${DEPENDENCY_NAME}" STREQUAL "MySQLPP")
build_mysqlpp()
elseif ("${DEPENDENCY_NAME}" STREQUAL "JSONCONS")
build_jsoncons()
elseif ("${DEPENDENCY_NAME}" STREQUAL "OpenBLAS")
......@@ -274,6 +277,12 @@ else()
set(LZ4_SOURCE_URL "https://github.com/lz4/lz4/archive/${LZ4_VERSION}.tar.gz")
endif()
if(DEFINED ENV{MILVUS_MYSQLPP_URL})
set(MYSQLPP_SOURCE_URL "$ENV{MILVUS_MYSQLPP_URL}")
else()
set(MYSQLPP_SOURCE_URL "https://tangentsoft.com/mysqlpp/releases/mysql++-${MYSQLPP_VERSION}.tar.gz")
endif()
if (DEFINED ENV{MILVUS_OPENBLAS_URL})
set(OPENBLAS_SOURCE_URL "$ENV{MILVUS_OPENBLAS_URL}")
else ()
......@@ -886,8 +895,8 @@ macro(build_faiss)
# ${MAKE} ${MAKE_BUILD_ARGS}
BUILD_COMMAND
${MAKE} ${MAKE_BUILD_ARGS} all
COMMAND
cd gpu && make ${MAKE_BUILD_ARGS}
COMMAND
cd gpu && ${MAKE} ${MAKE_BUILD_ARGS}
BUILD_IN_SOURCE
1
# INSTALL_DIR
......@@ -1125,6 +1134,65 @@ if(MILVUS_WITH_LZ4)
include_directories(SYSTEM ${LZ4_INCLUDE_DIR})
endif()
# ----------------------------------------------------------------------
# MySQL++
macro(build_mysqlpp)
message(STATUS "Building MySQL++-${MYSQLPP_VERSION} from source")
set(MYSQLPP_PREFIX "${CMAKE_CURRENT_BINARY_DIR}/mysqlpp_ep-prefix/src/mysqlpp_ep")
set(MYSQLPP_INCLUDE_DIR "${MYSQLPP_PREFIX}/include")
set(MYSQLPP_SHARED_LIB
"${MYSQLPP_PREFIX}/lib/${CMAKE_SHARED_LIBRARY_PREFIX}mysqlpp${CMAKE_SHARED_LIBRARY_SUFFIX}")
set(MYSQLPP_CONFIGURE_ARGS
"--prefix=${MYSQLPP_PREFIX}"
"--enable-thread-check"
"CFLAGS=${EP_C_FLAGS}"
"CXXFLAGS=${EP_CXX_FLAGS}"
"LDFLAGS=-pthread")
externalproject_add(mysqlpp_ep
URL
${MYSQLPP_SOURCE_URL}
# GIT_REPOSITORY
# ${MYSQLPP_SOURCE_URL}
# GIT_TAG
# ${MYSQLPP_VERSION}
# GIT_SHALLOW
# TRUE
${EP_LOG_OPTIONS}
CONFIGURE_COMMAND
# "./bootstrap"
# COMMAND
"./configure"
${MYSQLPP_CONFIGURE_ARGS}
BUILD_COMMAND
${MAKE} ${MAKE_BUILD_ARGS}
BUILD_IN_SOURCE
1
BUILD_BYPRODUCTS
${MYSQLPP_SHARED_LIB})
file(MAKE_DIRECTORY "${MYSQLPP_INCLUDE_DIR}")
add_library(mysqlpp SHARED IMPORTED)
set_target_properties(
mysqlpp
PROPERTIES
IMPORTED_LOCATION "${MYSQLPP_SHARED_LIB}"
INTERFACE_INCLUDE_DIRECTORIES "${MYSQLPP_INCLUDE_DIR}")
add_dependencies(mysqlpp mysqlpp_ep)
endmacro()
if(MILVUS_WITH_MYSQLPP)
resolve_dependency(MySQLPP)
get_target_property(MYSQLPP_INCLUDE_DIR mysqlpp INTERFACE_INCLUDE_DIRECTORIES)
include_directories(SYSTEM "${MYSQLPP_INCLUDE_DIR}")
link_directories(SYSTEM ${MYSQLPP_PREFIX}/lib)
endif()
# ----------------------------------------------------------------------
# Prometheus
......
* GLOBAL:
FORMAT = "%datetime | %level | %logger | %msg"
FILENAME = "@MILVUS_DB_PATH@/logs/milvus-%datetime{%H:%m}-global.log"
ENABLED = true
TO_FILE = true
TO_STANDARD_OUTPUT = false
SUBSECOND_PRECISION = 3
PERFORMANCE_TRACKING = false
MAX_LOG_FILE_SIZE = 2097152 ## Throw log files away after 2MB
* DEBUG:
FILENAME = "@MILVUS_DB_PATH@/logs/milvus-%datetime{%H:%m}-debug.log"
ENABLED = true
* WARNING:
FILENAME = "@MILVUS_DB_PATH@/logs/milvus-%datetime{%H:%m}-warning.log"
* TRACE:
FILENAME = "@MILVUS_DB_PATH@/logs/milvus-%datetime{%H:%m}-trace.log"
* VERBOSE:
FORMAT = "%datetime{%d/%M/%y} | %level-%vlevel | %msg"
TO_FILE = false
TO_STANDARD_OUTPUT = false
## Error logs
* ERROR:
ENABLED = true
FILENAME = "@MILVUS_DB_PATH@/logs/milvus-%datetime{%H:%m}-error.log"
* FATAL:
ENABLED = true
FILENAME = "@MILVUS_DB_PATH@/logs/milvus-%datetime{%H:%m}-fatal.log"
\ No newline at end of file
server_config:
address: 0.0.0.0
port: 19530 # the port milvus listen to, default: 19530, range: 1025 ~ 65534
gpu_index: 0 # the gpu milvus use, default: 0, range: 0 ~ gpu number - 1
mode: single # milvus deployment type: single, cluster
db_config:
db_path: @MILVUS_DB_PATH@ # milvus data storage path
# URI format: dialect://username:password@host:port/database
# All parts except dialect are optional, but you MUST include the delimiters
# Currently dialect supports mysql or sqlite
db_backend_url: sqlite://:@:/
index_building_threshold: 1024 # index building trigger threshold, default: 1024, unit: MB
archive_disk_threshold: 512 # triger archive action if storage size exceed this value, unit: GB
archive_days_threshold: 30 # files older than x days will be archived, unit: day
metric_config:
is_startup: off # if monitoring start: on, off
collector: prometheus # metrics collector: prometheus
prometheus_config: # following are prometheus configure
collect_type: pull # prometheus collect data method
port: 8080 # the port prometheus use to fetch metrics
push_gateway_ip_address: 127.0.0.1 # push method configure: push gateway ip address
push_gateway_port: 9091 # push method configure: push gateway port
license_config: # license configure
license_path: "@MILVUS_DB_PATH@/system.license" # license file path
cache_config: # cache configure
cpu_cache_capacity: 16 # how many memory are used as cache, unit: GB, range: 0 ~ less than total memory
\ No newline at end of file
server_config:
address: 0.0.0.0
port: 19530
transfer_protocol: binary #optional: binary, compact, json
server_mode: thread_pool #optional: simple, thread_pool
gpu_index: 0 #which gpu to be used
mode: single #optional: single, cluster
db_config:
db_path: /tmp/milvus
db_backend_url: http://127.0.0.1
index_building_threshold: 1024 #build index file when raw data file size larger than this value, unit: MB
metric_config:
is_startup: true # true is on, false is off
collector: prometheus # prometheus, now we only have prometheus
prometheus_config:
collect_type: pull # pull means prometheus pull the message from server, push means server push metric to push gateway
port: 8080
push_gateway_ip_address: 127.0.0.1
push_gateway_port: 9091
license_config:
license_path: "/tmp/system.license"
cache_config:
cpu_cache_capacity: 16 # memory pool to hold index data, unit: GB
\ No newline at end of file
#!/bin/bash
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$(pwd)/milvus/lib
LCOV_CMD="lcov"
LCOV_GEN_CMD="genhtml"
......@@ -12,6 +14,26 @@ DIR_LCOV_OUTPUT="lcov_out"
DIR_GCNO="cmake_build"
DIR_UNITTEST="milvus/bin"
MYSQL_USER_NAME=root
MYSQL_PASSWORD=Fantast1c
MYSQL_HOST='192.168.1.194'
MYSQL_PORT='3306'
MYSQL_DB_NAME=milvus_`date +%s%N`
function mysql_exc()
{
cmd=$1
mysql -h${MYSQL_HOST} -u${MYSQL_USER_NAME} -p${MYSQL_PASSWORD} -e "${cmd}"
if [ $? -ne 0 ]; then
echo "mysql $cmd run failed"
fi
}
mysql_exc "CREATE DATABASE IF NOT EXISTS ${MYSQL_DB_NAME};"
mysql_exc "GRANT ALL PRIVILEGES ON ${MYSQL_DB_NAME}.* TO '${MYSQL_USER_NAME}'@'%';"
mysql_exc "FLUSH PRIVILEGES;"
# get baseline
${LCOV_CMD} -c -i -d ${DIR_GCNO} -o "${FILE_INFO_BASE}"
if [ $? -ne 0 ]; then
......@@ -21,16 +43,24 @@ fi
for test in `ls ${DIR_UNITTEST}`; do
echo $test
case ${test} in
case ${test} in
db_test)
# set run args for db_test
args="mysql://${MYSQL_USER_NAME}:${MYSQL_PASSWORD}@${MYSQL_HOST}:${MYSQL_PORT}/${MYSQL_DB_NAME}"
;;
*_test)
# run unittest
./${DIR_UNITTEST}/${test}
if [ $? -ne 0 ]; then
echo ${DIR_UNITTEST}/${test} "run failed"
fi
args=""
;;
esac
# run unittest
./${DIR_UNITTEST}/${test} "${args}"
if [ $? -ne 0 ]; then
echo ${DIR_UNITTEST}/${test} "run failed"
fi
done
mysql_exc "DROP DATABASE IF EXISTS ${MYSQL_DB_NAME};"
# gen test converage
${LCOV_CMD} -d ${DIR_GCNO} -o "${FILE_INFO_MILVUS}" -c
# merge coverage
......@@ -43,4 +73,4 @@ ${LCOV_CMD} -r "${FILE_INFO_OUTPUT}" -o "${FILE_INFO_OUTPUT_NEW}" \
"*/cmake_build/*_ep-prefix/*" \
# gen html report
${LCOV_GEN_CMD} "${FILE_INFO_OUTPUT_NEW}" --output-directory ${DIR_LCOV_OUTPUT}/
${LCOV_GEN_CMD} "${FILE_INFO_OUTPUT_NEW}" --output-directory ${DIR_LCOV_OUTPUT}/
\ No newline at end of file
......@@ -64,6 +64,7 @@ set(s3_client_files
include_directories(/usr/include)
include_directories("${CUDA_TOOLKIT_ROOT_DIR}/include")
include_directories(thrift/gen-cpp)
include_directories(/usr/include/mysql)
set(third_party_libs
knowhere
......@@ -91,6 +92,7 @@ set(third_party_libs
zstd
cudart
cublas
mysqlpp
${CUDA_TOOLKIT_ROOT_DIR}/lib64/stubs/libnvidia-ml.so
)
if (MEGASEARCH_WITH_ARROW STREQUAL "ON")
......@@ -190,4 +192,10 @@ endif ()
install(TARGETS milvus_server DESTINATION bin)
add_subdirectory(sdk)
install(FILES
${CMAKE_BINARY_DIR}/mysqlpp_ep-prefix/src/mysqlpp_ep/lib/${CMAKE_SHARED_LIBRARY_PREFIX}mysqlpp${CMAKE_SHARED_LIBRARY_SUFFIX}
${CMAKE_BINARY_DIR}/mysqlpp_ep-prefix/src/mysqlpp_ep/lib/${CMAKE_SHARED_LIBRARY_PREFIX}mysqlpp${CMAKE_SHARED_LIBRARY_SUFFIX}.3
${CMAKE_BINARY_DIR}/mysqlpp_ep-prefix/src/mysqlpp_ep/lib/${CMAKE_SHARED_LIBRARY_PREFIX}mysqlpp${CMAKE_SHARED_LIBRARY_SUFFIX}.3.2.4
DESTINATION lib) #need to copy libmysqlpp.so
#add_subdirectory(sdk)
......@@ -7,11 +7,13 @@
#include "DBMetaImpl.h"
#include "Log.h"
#include "EngineFactory.h"
#include "Factories.h"
#include "metrics/Metrics.h"
#include "scheduler/TaskScheduler.h"
#include "scheduler/context/SearchContext.h"
#include "scheduler/context/DeleteContext.h"
#include "utils/TimeRecorder.h"
#include "MetaConsts.h"
#include <assert.h>
#include <chrono>
......@@ -27,9 +29,9 @@ namespace engine {
namespace {
static constexpr uint64_t METRIC_ACTION_INTERVAL = 1;
static constexpr uint64_t COMPACT_ACTION_INTERVAL = 1;
static constexpr uint64_t INDEX_ACTION_INTERVAL = 1;
constexpr uint64_t METRIC_ACTION_INTERVAL = 1;
constexpr uint64_t COMPACT_ACTION_INTERVAL = 1;
constexpr uint64_t INDEX_ACTION_INTERVAL = 1;
void CollectInsertMetrics(double total_time, size_t n, bool succeed) {
double avg_time = total_time / n;
......@@ -76,67 +78,20 @@ void CollectFileMetrics(int file_type, size_t file_size, double total_time) {
}
}
}
void CalcScore(uint64_t vector_count,
const float *vectors_data,
uint64_t dimension,
const SearchContext::ResultSet &result_src,
SearchContext::ResultSet &result_target) {
result_target.clear();
if(result_src.empty()){
return;
}
server::TimeRecorder rc("Calculate Score");
int vec_index = 0;
for(auto& result : result_src) {
const float * vec_data = vectors_data + vec_index*dimension;
double vec_len = 0;
for(uint64_t i = 0; i < dimension; i++) {
vec_len += vec_data[i]*vec_data[i];
}
vec_index++;
double max_score = 0.0;
for(auto& pair : result) {
if(max_score < pair.second) {
max_score = pair.second;
}
}
//makesure socre is less than 100
if(max_score > vec_len) {
vec_len = max_score;
}
//avoid divided by zero
static constexpr double TOLERANCE = std::numeric_limits<float>::epsilon();
if(vec_len < TOLERANCE) {
vec_len = TOLERANCE;
}
SearchContext::Id2ScoreMap score_array;
double vec_len_inverse = 1.0/vec_len;
for(auto& pair : result) {
score_array.push_back(std::make_pair(pair.first, (1 - pair.second*vec_len_inverse)*100.0));
}
result_target.emplace_back(score_array);
}
rc.Elapse("totally cost");
}
}
DBImpl::DBImpl(const Options& options)
: options_(options),
shutting_down_(false),
meta_ptr_(new meta::DBMetaImpl(options_.meta)),
mem_mgr_(new MemManager(meta_ptr_, options_)),
compact_thread_pool_(1, 1),
index_thread_pool_(1, 1) {
StartTimerTasks();
meta_ptr_ = DBMetaImplFactory::Build(options.meta, options.mode);
mem_mgr_ = std::make_shared<MemManager>(meta_ptr_, options_);
// mem_mgr_ = (MemManagerPtr)(new MemManager(meta_ptr_, options_));
if (options.mode != Options::MODE::READ_ONLY) {
StartTimerTasks();
}
}
Status DBImpl::CreateTable(meta::TableSchema& table_schema) {
......@@ -203,10 +158,6 @@ Status DBImpl::Query(const std::string &table_id, uint64_t k, uint64_t nq,
Status DBImpl::Query(const std::string& table_id, uint64_t k, uint64_t nq,
const float* vectors, const meta::DatesT& dates, QueryResults& results) {
#if 0
return QuerySync(table_id, k, nq, vectors, dates, results);
#else
//get all table files from table
meta::DatePartionedTableFilesSchema files;
auto status = meta_ptr_->FilesToSearch(table_id, dates, files);
......@@ -220,7 +171,6 @@ Status DBImpl::Query(const std::string& table_id, uint64_t k, uint64_t nq,
}
return QueryAsync(table_id, file_id_array, k, nq, vectors, dates, results);
#endif
}
Status DBImpl::Query(const std::string& table_id, const std::vector<std::string>& file_ids,
......@@ -232,7 +182,7 @@ Status DBImpl::Query(const std::string& table_id, const std::vector<std::string>
meta::TableFileSchema table_file;
table_file.table_id_ = table_id;
std::string::size_type sz;
ids.push_back(std::stol(id, &sz));
ids.push_back(std::stoul(id, &sz));
}
meta::TableFilesSchema files_array;
......@@ -248,145 +198,6 @@ Status DBImpl::Query(const std::string& table_id, const std::vector<std::string>
return QueryAsync(table_id, files_array, k, nq, vectors, dates, results);
}
Status DBImpl::QuerySync(const std::string& table_id, uint64_t k, uint64_t nq,
const float* vectors, const meta::DatesT& dates, QueryResults& results) {
meta::DatePartionedTableFilesSchema files;
auto status = meta_ptr_->FilesToSearch(table_id, dates, files);
if (!status.ok()) { return status; }
ENGINE_LOG_DEBUG << "Search DateT Size = " << files.size();
meta::TableFilesSchema index_files;
meta::TableFilesSchema raw_files;
for (auto &day_files : files) {
for (auto &file : day_files.second) {
file.file_type_ == meta::TableFileSchema::INDEX ?
index_files.push_back(file) : raw_files.push_back(file);
}
}
int dim = 0;
if (!index_files.empty()) {
dim = index_files[0].dimension_;
} else if (!raw_files.empty()) {
dim = raw_files[0].dimension_;
} else {
ENGINE_LOG_DEBUG << "no files to search";
return Status::OK();
}
{
// [{ids, distence}, ...]
using SearchResult = std::pair<std::vector<long>, std::vector<float>>;
std::vector<SearchResult> batchresult(nq); // allocate nq cells.
auto cluster = [&](long *nns, float *dis, const int& k) -> void {
for (int i = 0; i < nq; ++i) {
auto f_begin = batchresult[i].first.cbegin();
auto s_begin = batchresult[i].second.cbegin();
batchresult[i].first.insert(f_begin, nns + i * k, nns + i * k + k);
batchresult[i].second.insert(s_begin, dis + i * k, dis + i * k + k);
}
};
// Allocate Memory
float *output_distence;
long *output_ids;
output_distence = (float *) malloc(k * nq * sizeof(float));
output_ids = (long *) malloc(k * nq * sizeof(long));
memset(output_distence, 0, k * nq * sizeof(float));
memset(output_ids, 0, k * nq * sizeof(long));
long search_set_size = 0;
auto search_in_index = [&](meta::TableFilesSchema& file_vec) -> void {
for (auto &file : file_vec) {
ExecutionEnginePtr index = EngineFactory::Build(file.dimension_, file.location_, (EngineType)file.engine_type_);
index->Load();
auto file_size = index->PhysicalSize();
search_set_size += file_size;
ENGINE_LOG_DEBUG << "Search file_type " << file.file_type_ << " Of Size: "
<< file_size/(1024*1024) << " M";
int inner_k = index->Count() < k ? index->Count() : k;
auto start_time = METRICS_NOW_TIME;
index->Search(nq, vectors, inner_k, output_distence, output_ids);
auto end_time = METRICS_NOW_TIME;
auto total_time = METRICS_MICROSECONDS(start_time, end_time);
CollectFileMetrics(file.file_type_, file_size, total_time);
cluster(output_ids, output_distence, inner_k); // cluster to each query
memset(output_distence, 0, k * nq * sizeof(float));
memset(output_ids, 0, k * nq * sizeof(long));
}
};
auto topk_cpu = [](const std::vector<float> &input_data,
const int &k,
float *output_distence,
long *output_ids) -> void {
std::map<float, std::vector<int>> inverted_table;
for (int i = 0; i < input_data.size(); ++i) {
if (inverted_table.count(input_data[i]) == 1) {
auto& ori_vec = inverted_table[input_data[i]];
ori_vec.push_back(i);
}
else {
inverted_table[input_data[i]] = std::vector<int>{i};
}
}
int count = 0;
for (auto &item : inverted_table){
if (count == k) break;
for (auto &id : item.second){
output_distence[count] = item.first;
output_ids[count] = id;
if (++count == k) break;
}
}
};
auto cluster_topk = [&]() -> void {
QueryResult res;
for (auto &result_pair : batchresult) {
auto &dis = result_pair.second;
auto &nns = result_pair.first;
topk_cpu(dis, k, output_distence, output_ids);
int inner_k = dis.size() < k ? dis.size() : k;
for (int i = 0; i < inner_k; ++i) {
res.emplace_back(std::make_pair(nns[output_ids[i]], output_distence[i])); // mapping
}
results.push_back(res); // append to result list
res.clear();
memset(output_distence, 0, k * nq * sizeof(float));
memset(output_ids, 0, k * nq * sizeof(long));
}
};
search_in_index(raw_files);
search_in_index(index_files);
ENGINE_LOG_DEBUG << "Search Overall Set Size = " << search_set_size << " M";
cluster_topk();
free(output_distence);
free(output_ids);
}
if (results.empty()) {
return Status::NotFound("Group " + table_id + ", search result not found!");
}
QueryResults temp_results;
CalcScore(nq, vectors, dim, results, temp_results);
results.swap(temp_results);
return Status::OK();
}
Status DBImpl::QueryAsync(const std::string& table_id, const meta::TableFilesSchema& files,
uint64_t k, uint64_t nq, const float* vectors,
const meta::DatesT& dates, QueryResults& results) {
......@@ -405,13 +216,8 @@ Status DBImpl::QueryAsync(const std::string& table_id, const meta::TableFilesSch
context->WaitResult();
//step 3: construct results, calculate score between 0 ~ 100
auto& context_result = context->GetResult();
meta::TableSchema table_schema;
table_schema.table_id_ = table_id;
meta_ptr_->DescribeTable(table_schema);
CalcScore(context->nq(), context->vectors(), table_schema.dimension_, context_result, results);
//step 3: construct results
results = context->GetResult();
return Status::OK();
}
......@@ -465,14 +271,19 @@ void DBImpl::StartMetricTask() {
}
void DBImpl::StartCompactionTask() {
// static int count = 0;
// count++;
// std::cout << "StartCompactionTask: " << count << std::endl;
// std::cout << "c: " << count++ << std::endl;
static uint64_t compact_clock_tick = 0;
compact_clock_tick++;
if(compact_clock_tick%COMPACT_ACTION_INTERVAL != 0) {
// std::cout << "c r: " << count++ << std::endl;
return;
}
//serialize memory data
std::vector<std::string> temp_table_ids;
std::set<std::string> temp_table_ids;
mem_mgr_->Serialize(temp_table_ids);
for(auto& id : temp_table_ids) {
compact_table_ids_.insert(id);
......@@ -543,7 +354,8 @@ Status DBImpl::MergeFiles(const std::string& table_id, const meta::DateT& date,
ENGINE_LOG_DEBUG << "New merged file " << table_file.file_id_ <<
" of size=" << index->PhysicalSize()/(1024*1024) << " M";
index->Cache();
//current disable this line to avoid memory
//index->Cache();
return status;
}
......@@ -573,8 +385,12 @@ Status DBImpl::BackgroundMergeFiles(const std::string& table_id) {
}
void DBImpl::BackgroundCompaction(std::set<std::string> table_ids) {
// static int b_count = 0;
// b_count++;
// std::cout << "BackgroundCompaction: " << b_count << std::endl;
Status status;
for (auto table_id : table_ids) {
for (auto& table_id : table_ids) {
status = BackgroundMergeFiles(table_id);
if (!status.ok()) {
bg_error_ = status;
......@@ -583,7 +399,13 @@ void DBImpl::BackgroundCompaction(std::set<std::string> table_ids) {
}
meta_ptr_->Archive();
meta_ptr_->CleanUpFilesWithTTL(1);
int ttl = 1;
if (options_.mode == Options::MODE::CLUSTER) {
ttl = meta::D_SEC;
// ENGINE_LOG_DEBUG << "Server mode is cluster. Clean up files with ttl = " << std::to_string(ttl) << "seconds.";
}
meta_ptr_->CleanUpFilesWithTTL(ttl);
}
void DBImpl::StartBuildIndexTask() {
......@@ -659,7 +481,8 @@ Status DBImpl::BuildIndex(const meta::TableFileSchema& file) {
<< index->PhysicalSize()/(1024*1024) << " M"
<< " from file " << to_remove.file_id_;
index->Cache();
//current disable this line to avoid memory
//index->Cache();
} catch (std::exception& ex) {
return Status::Error("Build index encounter exception", ex.what());
......@@ -698,7 +521,7 @@ Status DBImpl::Size(uint64_t& result) {
DBImpl::~DBImpl() {
shutting_down_.store(true, std::memory_order_release);
bg_timer_thread_.join();
std::vector<std::string> ids;
std::set<std::string> ids;
mem_mgr_->Serialize(ids);
}
......
......@@ -17,6 +17,8 @@
#include <thread>
#include <list>
#include <set>
#include "scheduler/context/SearchContext.h"
namespace zilliz {
namespace milvus {
......@@ -25,49 +27,72 @@ namespace engine {
class Env;
namespace meta {
class Meta;
class Meta;
}
class DBImpl : public DB {
public:
public:
using MetaPtr = meta::Meta::Ptr;
using MemManagerPtr = typename MemManager::Ptr;
DBImpl(const Options& options);
explicit DBImpl(const Options &options);
Status
CreateTable(meta::TableSchema &table_schema) override;
Status
DeleteTable(const std::string &table_id, const meta::DatesT &dates) override;
Status
DescribeTable(meta::TableSchema &table_schema) override;
virtual Status CreateTable(meta::TableSchema& table_schema) override;
virtual Status DeleteTable(const std::string& table_id, const meta::DatesT& dates) override;
virtual Status DescribeTable(meta::TableSchema& table_schema) override;
virtual Status HasTable(const std::string& table_id, bool& has_or_not) override;
virtual Status AllTables(std::vector<meta::TableSchema>& table_schema_array) override;
virtual Status GetTableRowCount(const std::string& table_id, uint64_t& row_count) override;
Status
HasTable(const std::string &table_id, bool &has_or_not) override;
virtual Status InsertVectors(const std::string& table_id,
uint64_t n, const float* vectors, IDNumbers& vector_ids) override;
Status
AllTables(std::vector<meta::TableSchema> &table_schema_array) override;
virtual Status Query(const std::string& table_id, uint64_t k, uint64_t nq,
const float* vectors, QueryResults& results) override;
Status
GetTableRowCount(const std::string &table_id, uint64_t &row_count) override;
virtual Status Query(const std::string& table_id, uint64_t k, uint64_t nq,
const float* vectors, const meta::DatesT& dates, QueryResults& results) override;
Status
InsertVectors(const std::string &table_id, uint64_t n, const float *vectors, IDNumbers &vector_ids) override;
virtual Status Query(const std::string& table_id, const std::vector<std::string>& file_ids,
uint64_t k, uint64_t nq, const float* vectors,
const meta::DatesT& dates, QueryResults& results) override;
Status
Query(const std::string &table_id, uint64_t k, uint64_t nq, const float *vectors, QueryResults &results) override;
virtual Status DropAll() override;
Status
Query(const std::string &table_id,
uint64_t k,
uint64_t nq,
const float *vectors,
const meta::DatesT &dates,
QueryResults &results) override;
virtual Status Size(uint64_t& result) override;
Status
Query(const std::string &table_id,
const std::vector<std::string> &file_ids,
uint64_t k,
uint64_t nq,
const float *vectors,
const meta::DatesT &dates,
QueryResults &results) override;
virtual ~DBImpl();
Status DropAll() override;
private:
Status QuerySync(const std::string& table_id, uint64_t k, uint64_t nq,
const float* vectors, const meta::DatesT& dates, QueryResults& results);
Status Size(uint64_t &result) override;
Status QueryAsync(const std::string& table_id, const meta::TableFilesSchema& files,
uint64_t k, uint64_t nq, const float* vectors,
const meta::DatesT& dates, QueryResults& results);
~DBImpl() override;
private:
Status
QueryAsync(const std::string &table_id,
const meta::TableFilesSchema &files,
uint64_t k,
uint64_t nq,
const float *vectors,
const meta::DatesT &dates,
QueryResults &results);
void StartTimerTasks();
......@@ -76,15 +101,19 @@ private:
void StartMetricTask();
void StartCompactionTask();
Status MergeFiles(const std::string& table_id,
const meta::DateT& date,
const meta::TableFilesSchema& files);
Status BackgroundMergeFiles(const std::string& table_id);
Status MergeFiles(const std::string &table_id,
const meta::DateT &date,
const meta::TableFilesSchema &files);
Status BackgroundMergeFiles(const std::string &table_id);
void BackgroundCompaction(std::set<std::string> table_ids);
void StartBuildIndexTask();
void BackgroundBuildIndex();
Status BuildIndex(const meta::TableFileSchema&);
Status
BuildIndex(const meta::TableFileSchema &);
private:
const Options options_;
......
......@@ -183,6 +183,7 @@ Status DBMetaImpl::DropPartitionsByDates(const std::string &table_id,
}
Status DBMetaImpl::CreateTable(TableSchema &table_schema) {
try {
MetricCollector metric;
......@@ -192,9 +193,11 @@ Status DBMetaImpl::CreateTable(TableSchema &table_schema) {
auto table = ConnectorPtr->select(columns(&TableSchema::state_),
where(c(&TableSchema::table_id_) == table_schema.table_id_));
if (table.size() == 1) {
std::string msg = (TableSchema::TO_DELETE == std::get<0>(table[0])) ?
"Table already exists and it is in delete state, please wait a second" : "Table already exists";
return Status::Error(msg);
if(TableSchema::TO_DELETE == std::get<0>(table[0])) {
return Status::Error("Table already exists and it is in delete state, please wait a second");
} else {
return Status::OK();//table already exists, no error
}
}
}
......@@ -328,7 +331,7 @@ Status DBMetaImpl::HasTable(const std::string &table_id, bool &has_or_not) {
}
} catch (std::exception &e) {
HandleException("Encounter exception when lookup table", e);
return HandleException("Encounter exception when lookup table", e);
}
return Status::OK();
......@@ -358,7 +361,7 @@ Status DBMetaImpl::AllTables(std::vector<TableSchema>& table_schema_array) {
}
} catch (std::exception &e) {
HandleException("Encounter exception when lookup all tables", e);
return HandleException("Encounter exception when lookup all tables", e);
}
return Status::OK();
......@@ -655,7 +658,7 @@ Status DBMetaImpl::Archive() {
for (auto kv : criterias) {
auto &criteria = kv.first;
auto &limit = kv.second;
if (criteria == "days") {
if (criteria == engine::ARCHIVE_CONF_DAYS) {
long usecs = limit * D_SEC * US_PS;
long now = utils::GetMicroSecTimeStamp();
try {
......@@ -671,11 +674,11 @@ Status DBMetaImpl::Archive() {
return HandleException("Encounter exception when update table files", e);
}
}
if (criteria == "disk") {
if (criteria == engine::ARCHIVE_CONF_DISK) {
uint64_t sum = 0;
Size(sum);
auto to_delete = (sum - limit * G);
int64_t to_delete = (int64_t)sum - limit * G;
DiscardFiles(to_delete);
}
}
......
......@@ -11,14 +11,9 @@ namespace zilliz {
namespace milvus {
namespace engine {
Status ExecutionEngine::AddWithIds(const std::vector<float>& vectors, const std::vector<long>& vector_ids) {
long n1 = (long)vectors.size();
long n2 = (long)vector_ids.size();
if (n1 != n2) {
LOG(ERROR) << "vectors size is not equal to the size of vector_ids: " << n1 << "!=" << n2;
return Status::Error("Error: AddWithIds");
}
return AddWithIds(n1, vectors.data(), vector_ids.data());
Status ExecutionEngine::AddWithIdArray(const std::vector<float>& vectors, const std::vector<long>& vector_ids) {
long n = (long)vector_ids.size();
return AddWithIds(n, vectors.data(), vector_ids.data());
}
......
......@@ -23,8 +23,7 @@ enum class EngineType {
class ExecutionEngine {
public:
virtual Status AddWithIds(const std::vector<float>& vectors,
const std::vector<long>& vector_ids);
virtual Status AddWithIdArray(const std::vector<float>& vectors, const std::vector<long>& vector_ids);
virtual Status AddWithIds(long n, const float *xdata, const long *xids) = 0;
......
......@@ -3,16 +3,18 @@
// Unauthorized copying of this file, via any medium is strictly prohibited.
// Proprietary and confidential.
////////////////////////////////////////////////////////////////////////////////
#include <stdlib.h>
#include "Factories.h"
#include "DBImpl.h"
#include <stdlib.h>
#include <time.h>
#include <sstream>
#include <iostream>
#include <vector>
#include <assert.h>
#include <easylogging++.h>
#include <regex>
#include "Exception.h"
namespace zilliz {
namespace milvus {
......@@ -26,6 +28,7 @@ DBMetaOptions DBMetaOptionsFactory::Build(const std::string& path) {
ss << "/tmp/" << rand();
p = ss.str();
}
DBMetaOptions meta;
meta.path = p;
return meta;
......@@ -43,6 +46,48 @@ std::shared_ptr<meta::DBMetaImpl> DBMetaImplFactory::Build() {
return std::shared_ptr<meta::DBMetaImpl>(new meta::DBMetaImpl(options));
}
std::shared_ptr<meta::Meta> DBMetaImplFactory::Build(const DBMetaOptions& metaOptions,
const int& mode) {
std::string uri = metaOptions.backend_uri;
std::string dialectRegex = "(.*)";
std::string usernameRegex = "(.*)";
std::string passwordRegex = "(.*)";
std::string hostRegex = "(.*)";
std::string portRegex = "(.*)";
std::string dbNameRegex = "(.*)";
std::string uriRegexStr = dialectRegex + "\\:\\/\\/" +
usernameRegex + "\\:" +
passwordRegex + "\\@" +
hostRegex + "\\:" +
portRegex + "\\/" +
dbNameRegex;
std::regex uriRegex(uriRegexStr);
std::smatch pieces_match;
if (std::regex_match(uri, pieces_match, uriRegex)) {
std::string dialect = pieces_match[1].str();
std::transform(dialect.begin(), dialect.end(), dialect.begin(), ::tolower);
if (dialect.find("mysql") != std::string::npos) {
ENGINE_LOG_INFO << "Using MySQL";
return std::make_shared<meta::MySQLMetaImpl>(meta::MySQLMetaImpl(metaOptions, mode));
}
else if (dialect.find("sqlite") != std::string::npos) {
ENGINE_LOG_INFO << "Using SQLite";
return std::make_shared<meta::DBMetaImpl>(meta::DBMetaImpl(metaOptions));
}
else {
ENGINE_LOG_ERROR << "Invalid dialect in URI: dialect = " << dialect;
throw InvalidArgumentException("URI dialect is not mysql / sqlite");
}
}
else {
ENGINE_LOG_ERROR << "Wrong URI format: URI = " << uri;
throw InvalidArgumentException("Wrong URI format ");
}
}
std::shared_ptr<DB> DBFactory::Build() {
auto options = OptionsFactory::Build();
auto db = DBFactory::Build(options);
......
......@@ -7,6 +7,7 @@
#include "DB.h"
#include "DBMetaImpl.h"
#include "MySQLMetaImpl.h"
#include "Options.h"
#include "ExecutionEngine.h"
......@@ -27,6 +28,7 @@ struct OptionsFactory {
struct DBMetaImplFactory {
static std::shared_ptr<meta::DBMetaImpl> Build();
static std::shared_ptr<meta::Meta> Build(const DBMetaOptions& metaOptions, const int& mode);
};
struct DBFactory {
......
......@@ -20,36 +20,54 @@ namespace engine {
MemVectors::MemVectors(const std::shared_ptr<meta::Meta>& meta_ptr,
const meta::TableFileSchema& schema, const Options& options)
: pMeta_(meta_ptr),
: meta_(meta_ptr),
options_(options),
schema_(schema),
pIdGenerator_(new SimpleIDGenerator()),
pEE_(EngineFactory::Build(schema_.dimension_, schema_.location_, (EngineType)schema_.engine_type_)) {
id_generator_(new SimpleIDGenerator()),
active_engine_(EngineFactory::Build(schema_.dimension_, schema_.location_, (EngineType)schema_.engine_type_)) {
}
void MemVectors::Add(size_t n_, const float* vectors_, IDNumbers& vector_ids_) {
Status MemVectors::Add(size_t n_, const float* vectors_, IDNumbers& vector_ids_) {
if(active_engine_ == nullptr) {
return Status::Error("index engine is null");
}
auto start_time = METRICS_NOW_TIME;
pIdGenerator_->GetNextIDNumbers(n_, vector_ids_);
pEE_->AddWithIds(n_, vectors_, vector_ids_.data());
id_generator_->GetNextIDNumbers(n_, vector_ids_);
Status status = active_engine_->AddWithIds(n_, vectors_, vector_ids_.data());
auto end_time = METRICS_NOW_TIME;
auto total_time = METRICS_MICROSECONDS(start_time, end_time);
server::Metrics::GetInstance().AddVectorsPerSecondGaugeSet(static_cast<int>(n_), static_cast<int>(schema_.dimension_), total_time);
return status;
}
size_t MemVectors::Total() const {
return pEE_->Count();
size_t MemVectors::RowCount() const {
if(active_engine_ == nullptr) {
return 0;
}
return active_engine_->Count();
}
size_t MemVectors::ApproximateSize() const {
return pEE_->Size();
size_t MemVectors::Size() const {
if(active_engine_ == nullptr) {
return 0;
}
return active_engine_->Size();
}
Status MemVectors::Serialize(std::string& table_id) {
if(active_engine_ == nullptr) {
return Status::Error("index engine is null");
}
table_id = schema_.table_id_;
auto size = ApproximateSize();
auto size = Size();
auto start_time = METRICS_NOW_TIME;
pEE_->Serialize();
active_engine_->Serialize();
auto end_time = METRICS_NOW_TIME;
auto total_time = METRICS_MICROSECONDS(start_time, end_time);
schema_.size_ = size;
......@@ -59,20 +77,20 @@ Status MemVectors::Serialize(std::string& table_id) {
schema_.file_type_ = (size >= options_.index_trigger_size) ?
meta::TableFileSchema::TO_INDEX : meta::TableFileSchema::RAW;
auto status = pMeta_->UpdateTableFile(schema_);
auto status = meta_->UpdateTableFile(schema_);
LOG(DEBUG) << "New " << ((schema_.file_type_ == meta::TableFileSchema::RAW) ? "raw" : "to_index")
<< " file " << schema_.file_id_ << " of size " << (double)(pEE_->Size()) / (double)meta::M << " M";
<< " file " << schema_.file_id_ << " of size " << (double)(active_engine_->Size()) / (double)meta::M << " M";
pEE_->Cache();
active_engine_->Cache();
return status;
}
MemVectors::~MemVectors() {
if (pIdGenerator_ != nullptr) {
delete pIdGenerator_;
pIdGenerator_ = nullptr;
if (id_generator_ != nullptr) {
delete id_generator_;
id_generator_ = nullptr;
}
}
......@@ -81,20 +99,20 @@ MemVectors::~MemVectors() {
*/
MemManager::MemVectorsPtr MemManager::GetMemByTable(
const std::string& table_id) {
auto memIt = memMap_.find(table_id);
if (memIt != memMap_.end()) {
auto memIt = mem_id_map_.find(table_id);
if (memIt != mem_id_map_.end()) {
return memIt->second;
}
meta::TableFileSchema table_file;
table_file.table_id_ = table_id;
auto status = pMeta_->CreateTableFile(table_file);
auto status = meta_->CreateTableFile(table_file);
if (!status.ok()) {
return nullptr;
}
memMap_[table_id] = MemVectorsPtr(new MemVectors(pMeta_, table_file, options_));
return memMap_[table_id];
mem_id_map_[table_id] = MemVectorsPtr(new MemVectors(meta_, table_file, options_));
return mem_id_map_[table_id];
}
Status MemManager::InsertVectors(const std::string& table_id_,
......@@ -114,37 +132,62 @@ Status MemManager::InsertVectorsNoLock(const std::string& table_id,
if (mem == nullptr) {
return Status::NotFound("Group " + table_id + " not found!");
}
mem->Add(n, vectors, vector_ids);
return Status::OK();
//makesure each file size less than index_trigger_size
if(mem->Size() > options_.index_trigger_size) {
std::unique_lock<std::mutex> lock(serialization_mtx_);
immu_mem_list_.push_back(mem);
mem_id_map_.erase(table_id);
return InsertVectorsNoLock(table_id, n, vectors, vector_ids);
} else {
return mem->Add(n, vectors, vector_ids);
}
}
Status MemManager::ToImmutable() {
std::unique_lock<std::mutex> lock(mutex_);
for (auto& kv: memMap_) {
immMems_.push_back(kv.second);
MemIdMap temp_map;
for (auto& kv: mem_id_map_) {
if(kv.second->RowCount() == 0) {
temp_map.insert(kv);
continue;//empty vector, no need to serialize
}
immu_mem_list_.push_back(kv.second);
}
memMap_.clear();
mem_id_map_.swap(temp_map);
return Status::OK();
}
Status MemManager::Serialize(std::vector<std::string>& table_ids) {
Status MemManager::Serialize(std::set<std::string>& table_ids) {
ToImmutable();
std::unique_lock<std::mutex> lock(serialization_mtx_);
std::string table_id;
table_ids.clear();
for (auto& mem : immMems_) {
for (auto& mem : immu_mem_list_) {
mem->Serialize(table_id);
table_ids.push_back(table_id);
table_ids.insert(table_id);
}
immMems_.clear();
immu_mem_list_.clear();
return Status::OK();
}
Status MemManager::EraseMemVector(const std::string& table_id) {
std::unique_lock<std::mutex> lock(mutex_);
memMap_.erase(table_id);
{//erase MemVector from rapid-insert cache
std::unique_lock<std::mutex> lock(mutex_);
mem_id_map_.erase(table_id);
}
{//erase MemVector from serialize cache
std::unique_lock<std::mutex> lock(serialization_mtx_);
MemList temp_list;
for (auto& mem : immu_mem_list_) {
if(mem->TableId() != table_id) {
temp_list.push_back(mem);
}
}
immu_mem_list_.swap(temp_list);
}
return Status::OK();
}
......
......@@ -15,6 +15,7 @@
#include <ctime>
#include <memory>
#include <mutex>
#include <set>
namespace zilliz {
namespace milvus {
......@@ -32,11 +33,11 @@ public:
explicit MemVectors(const std::shared_ptr<meta::Meta>&,
const meta::TableFileSchema&, const Options&);
void Add(size_t n_, const float* vectors_, IDNumbers& vector_ids_);
Status Add(size_t n_, const float* vectors_, IDNumbers& vector_ids_);
size_t Total() const;
size_t RowCount() const;
size_t ApproximateSize() const;
size_t Size() const;
Status Serialize(std::string& table_id);
......@@ -44,16 +45,18 @@ public:
const std::string& Location() const { return schema_.location_; }
std::string TableId() const { return schema_.table_id_; }
private:
MemVectors() = delete;
MemVectors(const MemVectors&) = delete;
MemVectors& operator=(const MemVectors&) = delete;
MetaPtr pMeta_;
MetaPtr meta_;
Options options_;
meta::TableFileSchema schema_;
IDGenerator* pIdGenerator_;
ExecutionEnginePtr pEE_;
IDGenerator* id_generator_;
ExecutionEnginePtr active_engine_;
}; // MemVectors
......@@ -66,14 +69,14 @@ public:
using Ptr = std::shared_ptr<MemManager>;
MemManager(const std::shared_ptr<meta::Meta>& meta, const Options& options)
: pMeta_(meta), options_(options) {}
: meta_(meta), options_(options) {}
MemVectorsPtr GetMemByTable(const std::string& table_id);
Status InsertVectors(const std::string& table_id,
size_t n, const float* vectors, IDNumbers& vector_ids);
Status Serialize(std::vector<std::string>& table_ids);
Status Serialize(std::set<std::string>& table_ids);
Status EraseMemVector(const std::string& table_id);
......@@ -82,11 +85,11 @@ private:
size_t n, const float* vectors, IDNumbers& vector_ids);
Status ToImmutable();
using MemMap = std::map<std::string, MemVectorsPtr>;
using ImmMemPool = std::vector<MemVectorsPtr>;
MemMap memMap_;
ImmMemPool immMems_;
MetaPtr pMeta_;
using MemIdMap = std::map<std::string, MemVectorsPtr>;
using MemList = std::vector<MemVectorsPtr>;
MemIdMap mem_id_map_;
MemList immu_mem_list_;
MetaPtr meta_;
Options options_;
std::mutex mutex_;
std::mutex serialization_mtx_;
......
#include "mysql++/mysql++.h"
#include <string>
#include <unistd.h>
#include <atomic>
#include "Log.h"
class MySQLConnectionPool : public mysqlpp::ConnectionPool {
public:
// The object's only constructor
MySQLConnectionPool(std::string dbName,
std::string userName,
std::string passWord,
std::string serverIp,
int port = 0,
int maxPoolSize = 8) :
db_(dbName),
user_(userName),
password_(passWord),
server_(serverIp),
port_(port),
max_pool_size_(maxPoolSize)
{
conns_in_use_ = 0;
max_idle_time_ = 10; //10 seconds
}
// The destructor. We _must_ call ConnectionPool::clear() here,
// because our superclass can't do it for us.
~MySQLConnectionPool() override {
clear();
}
// Do a simple form of in-use connection limiting: wait to return
// a connection until there are a reasonably low number in use
// already. Can't do this in create() because we're interested in
// connections actually in use, not those created. Also note that
// we keep our own count; ConnectionPool::size() isn't the same!
mysqlpp::Connection* grab() override {
while (conns_in_use_ > max_pool_size_) {
sleep(1);
}
++conns_in_use_;
return mysqlpp::ConnectionPool::grab();
}
// Other half of in-use conn count limit
void release(const mysqlpp::Connection* pc) override {
mysqlpp::ConnectionPool::release(pc);
if (conns_in_use_ <= 0) {
ENGINE_LOG_WARNING << "MySQLConnetionPool::release: conns_in_use_ is less than zero. conns_in_use_ = " << conns_in_use_ << std::endl;
}
else {
--conns_in_use_;
}
}
int getConnectionsInUse() {
return conns_in_use_;
}
void set_max_idle_time(int max_idle) {
max_idle_time_ = max_idle;
}
std::string getDB() {
return db_;
}
protected:
// Superclass overrides
mysqlpp::Connection* create() override {
// Create connection using the parameters we were passed upon
// creation.
mysqlpp::Connection* conn = new mysqlpp::Connection();
conn->set_option(new mysqlpp::ReconnectOption(true));
conn->connect(db_.empty() ? 0 : db_.c_str(),
server_.empty() ? 0 : server_.c_str(),
user_.empty() ? 0 : user_.c_str(),
password_.empty() ? 0 : password_.c_str(),
port_);
return conn;
}
void destroy(mysqlpp::Connection* cp) override {
// Our superclass can't know how we created the Connection, so
// it delegates destruction to us, to be safe.
delete cp;
}
unsigned int max_idle_time() override {
return max_idle_time_;
}
private:
// Number of connections currently in use
std::atomic<int> conns_in_use_;
// Our connection parameters
std::string db_, user_, password_, server_;
int port_;
int max_pool_size_;
unsigned int max_idle_time_;
};
\ No newline at end of file
此差异已折叠。
/*******************************************************************************
* Copyright 上海赜睿信息科技有限公司(Zilliz) - All Rights Reserved
* Unauthorized copying of this file, via any medium is strictly prohibited.
* Proprietary and confidential.
******************************************************************************/
#pragma once
#include "Meta.h"
#include "Options.h"
#include "MySQLConnectionPool.h"
#include "mysql++/mysql++.h"
#include <mutex>
namespace zilliz {
namespace milvus {
namespace engine {
namespace meta {
// auto StoragePrototype(const std::string& path);
using namespace mysqlpp;
class MySQLMetaImpl : public Meta {
public:
MySQLMetaImpl(const DBMetaOptions& options_, const int& mode);
virtual Status CreateTable(TableSchema& table_schema) override;
virtual Status DescribeTable(TableSchema& group_info_) override;
virtual Status HasTable(const std::string& table_id, bool& has_or_not) override;
virtual Status AllTables(std::vector<TableSchema>& table_schema_array) override;
virtual Status DeleteTable(const std::string& table_id) override;
virtual Status DeleteTableFiles(const std::string& table_id) override;
virtual Status CreateTableFile(TableFileSchema& file_schema) override;
virtual Status DropPartitionsByDates(const std::string& table_id,
const DatesT& dates) override;
virtual Status GetTableFiles(const std::string& table_id,
const std::vector<size_t>& ids,
TableFilesSchema& table_files) override;
virtual Status UpdateTableFile(TableFileSchema& file_schema) override;
virtual Status UpdateTableFiles(TableFilesSchema& files) override;
virtual Status FilesToSearch(const std::string& table_id,
const DatesT& partition,
DatePartionedTableFilesSchema& files) override;
virtual Status FilesToMerge(const std::string& table_id,
DatePartionedTableFilesSchema& files) override;
virtual Status FilesToIndex(TableFilesSchema&) override;
virtual Status Archive() override;
virtual Status Size(uint64_t& result) override;
virtual Status CleanUp() override;
virtual Status CleanUpFilesWithTTL(uint16_t seconds) override;
virtual Status DropAll() override;
virtual Status Count(const std::string& table_id, uint64_t& result) override;
virtual ~MySQLMetaImpl();
private:
Status NextFileId(std::string& file_id);
Status NextTableId(std::string& table_id);
Status DiscardFiles(long long to_discard_size);
std::string GetTablePath(const std::string& table_id);
std::string GetTableDatePartitionPath(const std::string& table_id, DateT& date);
void GetTableFilePath(TableFileSchema& group_file);
Status Initialize();
const DBMetaOptions options_;
const int mode_;
std::shared_ptr<MySQLConnectionPool> mysql_connection_pool_;
bool safe_grab = false;
// std::mutex connectionMutex_;
}; // DBMetaImpl
} // namespace meta
} // namespace engine
} // namespace milvus
} // namespace zilliz
......@@ -24,6 +24,12 @@ ArchiveConf::ArchiveConf(const std::string& type, const std::string& criterias)
ParseCritirias(criterias);
}
void ArchiveConf::SetCriterias(const ArchiveConf::CriteriaT& criterial) {
for(auto& pair : criterial) {
criterias_[pair.first] = pair.second;
}
}
void ArchiveConf::ParseCritirias(const std::string& criterias) {
std::stringstream ss(criterias);
std::vector<std::string> tokens;
......
......@@ -19,14 +19,20 @@ static constexpr uint64_t ONE_KB = 1024;
static constexpr uint64_t ONE_MB = ONE_KB*ONE_KB;
static constexpr uint64_t ONE_GB = ONE_KB*ONE_MB;
static const std::string ARCHIVE_CONF_DISK = "disk";
static const std::string ARCHIVE_CONF_DAYS = "days";
static const std::string ARCHIVE_CONF_DEFAULT = ARCHIVE_CONF_DISK + ":512";
struct ArchiveConf {
using CriteriaT = std::map<std::string, int>;
ArchiveConf(const std::string& type, const std::string& criterias = "disk:512");
ArchiveConf(const std::string& type, const std::string& criterias = ARCHIVE_CONF_DEFAULT);
const std::string& GetType() const { return type_; }
const CriteriaT GetCriterias() const { return criterias_; }
void SetCriterias(const ArchiveConf::CriteriaT& criterial);
private:
void ParseCritirias(const std::string& type);
void ParseType(const std::string& criterias);
......@@ -41,13 +47,20 @@ struct DBMetaOptions {
ArchiveConf archive_conf = ArchiveConf("delete");
}; // DBMetaOptions
struct Options {
typedef enum {
SINGLE,
CLUSTER,
READ_ONLY
} MODE;
Options();
uint16_t memory_sync_interval = 1; //unit: second
uint16_t merge_trigger_number = 2;
size_t index_trigger_size = ONE_GB; //unit: byte
DBMetaOptions meta;
int mode = MODE::SINGLE;
}; // Options
......
......@@ -15,33 +15,46 @@ namespace engine {
class Status {
public:
Status() noexcept : state_(nullptr) {}
~Status() { delete[] state_; }
Status(const Status &rhs);
Status &operator=(const Status &rhs);
Status &
operator=(const Status &rhs);
Status(Status &&rhs) noexcept : state_(rhs.state_) { rhs.state_ = nullptr; }
Status &operator=(Status &&rhs_) noexcept;
static Status OK() { return Status(); }
static Status NotFound(const std::string &msg, const std::string &msg2 = "") {
Status &
operator=(Status &&rhs_) noexcept;
static Status
OK() { return Status(); }
static Status
NotFound(const std::string &msg, const std::string &msg2 = "") {
return Status(kNotFound, msg, msg2);
}
static Status Error(const std::string &msg, const std::string &msg2 = "") {
static Status
Error(const std::string &msg, const std::string &msg2 = "") {
return Status(kError, msg, msg2);
}
static Status InvalidDBPath(const std::string &msg, const std::string &msg2 = "") {
static Status
InvalidDBPath(const std::string &msg, const std::string &msg2 = "") {
return Status(kInvalidDBPath, msg, msg2);
}
static Status GroupError(const std::string &msg, const std::string &msg2 = "") {
static Status
GroupError(const std::string &msg, const std::string &msg2 = "") {
return Status(kGroupError, msg, msg2);
}
static Status DBTransactionError(const std::string &msg, const std::string &msg2 = "") {
static Status
DBTransactionError(const std::string &msg, const std::string &msg2 = "") {
return Status(kDBTransactionError, msg, msg2);
}
static Status AlreadyExist(const std::string &msg, const std::string &msg2 = "") {
static Status
AlreadyExist(const std::string &msg, const std::string &msg2 = "") {
return Status(kAlreadyExist, msg, msg2);
}
......
......@@ -24,14 +24,6 @@ TaskDispatchQueue::Put(const ScheduleContextPtr &context) {
return;
}
if (queue_.size() >= capacity_) {
std::string error_msg =
"blocking queue is full, capacity: " + std::to_string(capacity_) + " queue_size: " +
std::to_string(queue_.size());
SERVER_LOG_ERROR << error_msg;
throw server::ServerException(server::SERVER_BLOCKING_QUEUE_EMPTY, error_msg);
}
TaskDispatchStrategy::Schedule(context, queue_);
empty_.notify_all();
......@@ -42,12 +34,6 @@ TaskDispatchQueue::Take() {
std::unique_lock <std::mutex> lock(mtx);
empty_.wait(lock, [this] { return !queue_.empty(); });
if (queue_.empty()) {
std::string error_msg = "blocking queue empty";
SERVER_LOG_ERROR << error_msg;
throw server::ServerException(server::SERVER_BLOCKING_QUEUE_EMPTY, error_msg);
}
ScheduleTaskPtr front(queue_.front());
queue_.pop_front();
full_.notify_all();
......
......@@ -74,20 +74,26 @@ public:
}
std::string table_id = context->table_id();
for(auto iter = task_list.begin(); iter != task_list.end(); ++iter) {
//put delete task to proper position
//for example: task_list has 10 IndexLoadTask, only the No.5 IndexLoadTask is for table1
//if user want to delete table1, the DeleteTask will be insert into No.6 position
for(std::list<ScheduleTaskPtr>::reverse_iterator iter = task_list.rbegin(); iter != task_list.rend(); ++iter) {
if((*iter)->type() != ScheduleTaskType::kIndexLoad) {
continue;
}
//put delete task to proper position
IndexLoadTaskPtr loader = std::static_pointer_cast<IndexLoadTask>(*iter);
if(loader->file_->table_id_ == table_id) {
task_list.insert(++iter, delete_task);
break;
if(loader->file_->table_id_ != table_id) {
continue;
}
task_list.insert(iter.base(), delete_task);
return true;
}
//no task is searching this table, put DeleteTask to front of list so that the table will be delete asap
task_list.push_front(delete_task);
return true;
}
};
......
/*******************************************************************************
* Copyright 上海赜睿信息科技有限公司(Zilliz) - All Rights Reserved
* Unauthorized copying of this file, via any medium is strictly prohibited.
* Proprietary and confidential.
******************************************************************************/
#pragma once
#include <boost/serialization/access.hpp>
#include <string>
#include <map>
class GPUInfoFile {
public:
GPUInfoFile() = default;
GPUInfoFile(const int &device_count, const std::map<int, std::string> &uuid_encryption_map)
: device_count_(device_count), uuid_encryption_map_(uuid_encryption_map) {}
int get_device_count() {
return device_count_;
}
std::map<int, std::string> &get_uuid_encryption_map() {
return uuid_encryption_map_;
}
public:
friend class boost::serialization::access;
template<typename Archive>
void serialize(Archive &ar, const unsigned int version) {
ar & device_count_;
ar & uuid_encryption_map_;
}
public:
int device_count_ = 0;
std::map<int, std::string> uuid_encryption_map_;
};
class SerializedGPUInfoFile {
public:
~SerializedGPUInfoFile() {
if (gpu_info_file_ != nullptr) {
delete (gpu_info_file_);
gpu_info_file_ = nullptr;
}
}
void
set_gpu_info_file(GPUInfoFile *gpu_info_file) {
gpu_info_file_ = gpu_info_file;
}
GPUInfoFile *get_gpu_info_file() {
return gpu_info_file_;
}
private:
friend class boost::serialization::access;
template<typename Archive>
void serialize(Archive &ar, const unsigned int version) {
ar & gpu_info_file_;
}
private:
GPUInfoFile *gpu_info_file_ = nullptr;
};
///*******************************************************************************
// * Copyright 上海赜睿信息科技有限公司(Zilliz) - All Rights Reserved
// * Unauthorized copying of this file, via any medium is strictly prohibited.
// * Proprietary and confidential.
// ******************************************************************************/
//#pragma once
//
//#include <boost/serialization/access.hpp>
//#include <string>
//#include <map>
//
//
//class GPUInfoFile {
// public:
// GPUInfoFile() = default;
//
// GPUInfoFile(const int &device_count, const std::map<int, std::string> &uuid_encryption_map)
// : device_count_(device_count), uuid_encryption_map_(uuid_encryption_map) {}
//
// int get_device_count() {
// return device_count_;
// }
// std::map<int, std::string> &get_uuid_encryption_map() {
// return uuid_encryption_map_;
// }
//
//
// public:
// friend class boost::serialization::access;
//
// template<typename Archive>
// void serialize(Archive &ar, const unsigned int version) {
// ar & device_count_;
// ar & uuid_encryption_map_;
// }
//
// public:
// int device_count_ = 0;
// std::map<int, std::string> uuid_encryption_map_;
//};
//
//class SerializedGPUInfoFile {
// public:
// ~SerializedGPUInfoFile() {
// if (gpu_info_file_ != nullptr) {
// delete (gpu_info_file_);
// gpu_info_file_ = nullptr;
// }
// }
//
// void
// set_gpu_info_file(GPUInfoFile *gpu_info_file) {
// gpu_info_file_ = gpu_info_file;
// }
//
// GPUInfoFile *get_gpu_info_file() {
// return gpu_info_file_;
// }
// private:
// friend class boost::serialization::access;
//
// template<typename Archive>
// void serialize(Archive &ar, const unsigned int version) {
// ar & gpu_info_file_;
// }
//
// private:
// GPUInfoFile *gpu_info_file_ = nullptr;
//};
#include "utils/Log.h"
#include "LicenseLibrary.h"
#include "utils/Error.h"
#include <iostream>
#include <getopt.h>
#include <memory.h>
// Not provide path: current work path will be used and system.info.
using namespace zilliz::milvus;
void
print_usage(const std::string &app_name) {
printf("\n Usage: %s [OPTIONS]\n\n", app_name.c_str());
printf(" Options:\n");
printf(" -h --help Print this help\n");
printf(" -s --sysinfo filename Generate system info file as given name\n");
printf("\n");
}
int main(int argc, char *argv[]) {
std::string app_name = argv[0];
if (argc != 1 && argc != 3) {
print_usage(app_name);
return EXIT_FAILURE;
}
static struct option long_options[] = {{"system_info", required_argument, 0, 's'},
{"help", no_argument, 0, 'h'},
{NULL, 0, 0, 0}};
int value = 0;
int option_index = 0;
std::string system_info_filename = "./system.info";
while ((value = getopt_long(argc, argv, "s:h", long_options, &option_index)) != -1) {
switch (value) {
case 's': {
char *system_info_filename_ptr = strdup(optarg);
system_info_filename = system_info_filename_ptr;
free(system_info_filename_ptr);
// printf("Generate system info file: %s\n", system_info_filename.c_str());
break;
}
case 'h':print_usage(app_name);
return EXIT_SUCCESS;
case '?':print_usage(app_name);
return EXIT_FAILURE;
default:print_usage(app_name);
break;
}
}
int device_count = 0;
server::ServerError err = server::LicenseLibrary::GetDeviceCount(device_count);
if (err != server::SERVER_SUCCESS) return -1;
// 1. Get All GPU UUID
std::vector<std::string> uuid_array;
err = server::LicenseLibrary::GetUUID(device_count, uuid_array);
if (err != server::SERVER_SUCCESS) return -1;
// 2. Get UUID SHA256
std::vector<std::string> uuid_sha256_array;
err = server::LicenseLibrary::GetUUIDSHA256(device_count, uuid_array, uuid_sha256_array);
if (err != server::SERVER_SUCCESS) return -1;
// 3. Generate GPU ID map with GPU UUID
std::map<int, std::string> uuid_encrption_map;
for (int i = 0; i < device_count; ++i) {
uuid_encrption_map[i] = uuid_sha256_array[i];
}
// 4. Generate GPU_info File
err = server::LicenseLibrary::GPUinfoFileSerialization(system_info_filename,
device_count,
uuid_encrption_map);
if (err != server::SERVER_SUCCESS) return -1;
printf("Generate GPU_info File Success\n");
return 0;
}
\ No newline at end of file
//
//#include "utils/Log.h"
//#include "LicenseLibrary.h"
//#include "utils/Error.h"
//
//#include <iostream>
//#include <getopt.h>
//#include <memory.h>
//// Not provide path: current work path will be used and system.info.
//using namespace zilliz::milvus;
//
//void
//print_usage(const std::string &app_name) {
// printf("\n Usage: %s [OPTIONS]\n\n", app_name.c_str());
// printf(" Options:\n");
// printf(" -h --help Print this help\n");
// printf(" -s --sysinfo filename Generate system info file as given name\n");
// printf("\n");
//}
//
//int main(int argc, char *argv[]) {
// std::string app_name = argv[0];
// if (argc != 1 && argc != 3) {
// print_usage(app_name);
// return EXIT_FAILURE;
// }
//
// static struct option long_options[] = {{"system_info", required_argument, 0, 's'},
// {"help", no_argument, 0, 'h'},
// {NULL, 0, 0, 0}};
// int value = 0;
// int option_index = 0;
// std::string system_info_filename = "./system.info";
// while ((value = getopt_long(argc, argv, "s:h", long_options, &option_index)) != -1) {
// switch (value) {
// case 's': {
// char *system_info_filename_ptr = strdup(optarg);
// system_info_filename = system_info_filename_ptr;
// free(system_info_filename_ptr);
//// printf("Generate system info file: %s\n", system_info_filename.c_str());
// break;
// }
// case 'h':print_usage(app_name);
// return EXIT_SUCCESS;
// case '?':print_usage(app_name);
// return EXIT_FAILURE;
// default:print_usage(app_name);
// break;
// }
// }
//
// int device_count = 0;
// server::ServerError err = server::LicenseLibrary::GetDeviceCount(device_count);
// if (err != server::SERVER_SUCCESS) return -1;
//
// // 1. Get All GPU UUID
// std::vector<std::string> uuid_array;
// err = server::LicenseLibrary::GetUUID(device_count, uuid_array);
// if (err != server::SERVER_SUCCESS) return -1;
//
// // 2. Get UUID SHA256
// std::vector<std::string> uuid_sha256_array;
// err = server::LicenseLibrary::GetUUIDSHA256(device_count, uuid_array, uuid_sha256_array);
// if (err != server::SERVER_SUCCESS) return -1;
//
// // 3. Generate GPU ID map with GPU UUID
// std::map<int, std::string> uuid_encrption_map;
// for (int i = 0; i < device_count; ++i) {
// uuid_encrption_map[i] = uuid_sha256_array[i];
// }
//
//
// // 4. Generate GPU_info File
// err = server::LicenseLibrary::GPUinfoFileSerialization(system_info_filename,
// device_count,
// uuid_encrption_map);
// if (err != server::SERVER_SUCCESS) return -1;
//
// printf("Generate GPU_info File Success\n");
//
//
// return 0;
//}
\ No newline at end of file
#include "LicenseCheck.h"
#include <iostream>
#include <thread>
#include <boost/archive/binary_oarchive.hpp>
#include <boost/archive/binary_iarchive.hpp>
//#include <boost/foreach.hpp>
//#include <boost/serialization/vector.hpp>
#include <boost/filesystem/path.hpp>
#include <boost/serialization/map.hpp>
#include <boost/filesystem/operations.hpp>
#include <boost/thread.hpp>
#include <boost/date_time/posix_time/posix_time.hpp>
namespace zilliz {
namespace milvus {
namespace server {
LicenseCheck::LicenseCheck() {
}
LicenseCheck::~LicenseCheck() {
StopCountingDown();
}
ServerError
LicenseCheck::LegalityCheck(const std::string &license_file_path) {
int device_count;
LicenseLibrary::GetDeviceCount(device_count);
std::vector<std::string> uuid_array;
LicenseLibrary::GetUUID(device_count, uuid_array);
std::vector<std::string> sha_array;
LicenseLibrary::GetUUIDSHA256(device_count, uuid_array, sha_array);
int output_device_count;
std::map<int, std::string> uuid_encryption_map;
time_t starting_time;
time_t end_time;
ServerError err = LicenseLibrary::LicenseFileDeserialization(license_file_path,
output_device_count,
uuid_encryption_map,
starting_time,
end_time);
if(err !=SERVER_SUCCESS)
{
std::cout << "License check error: 01" << std::endl;
return SERVER_UNEXPECTED_ERROR;
}
time_t system_time;
LicenseLibrary::GetSystemTime(system_time);
if (device_count != output_device_count) {
std::cout << "License check error: 02" << std::endl;
return SERVER_UNEXPECTED_ERROR;
}
for (int i = 0; i < device_count; ++i) {
if (sha_array[i] != uuid_encryption_map[i]) {
std::cout << "License check error: 03" << std::endl;
return SERVER_UNEXPECTED_ERROR;
}
}
if (system_time < starting_time || system_time > end_time) {
std::cout << "License check error: 04" << std::endl;
return SERVER_UNEXPECTED_ERROR;
}
std::cout << "Legality Check Success" << std::endl;
return SERVER_SUCCESS;
}
// Part 2: Timing check license
ServerError
LicenseCheck::AlterFile(const std::string &license_file_path,
const boost::system::error_code &ec,
boost::asio::deadline_timer *pt) {
ServerError err = LicenseCheck::LegalityCheck(license_file_path);
if(err!=SERVER_SUCCESS) {
std::cout << "license file check error" << std::endl;
exit(1);
}
std::cout << "---runing---" << std::endl;
pt->expires_at(pt->expires_at() + boost::posix_time::hours(1));
pt->async_wait(boost::bind(LicenseCheck::AlterFile, license_file_path, boost::asio::placeholders::error, pt));
return SERVER_SUCCESS;
}
ServerError
LicenseCheck::StartCountingDown(const std::string &license_file_path) {
if (!LicenseLibrary::IsFileExistent(license_file_path)) {
std::cout << "license file not exist" << std::endl;
exit(1);
}
//create a thread to run AlterFile
if(counting_thread_ == nullptr) {
counting_thread_ = std::make_shared<std::thread>([&]() {
boost::asio::deadline_timer t(io_service_, boost::posix_time::hours(1));
t.async_wait(boost::bind(LicenseCheck::AlterFile, license_file_path, boost::asio::placeholders::error, &t));
io_service_.run();//this thread will block here
});
}
return SERVER_SUCCESS;
}
ServerError
LicenseCheck::StopCountingDown() {
if(!io_service_.stopped()) {
io_service_.stop();
}
if(counting_thread_ != nullptr) {
counting_thread_->join();
counting_thread_ = nullptr;
}
return SERVER_SUCCESS;
}
}
}
}
\ No newline at end of file
//#include "LicenseCheck.h"
//#include <iostream>
//#include <thread>
//
//#include <boost/archive/binary_oarchive.hpp>
//#include <boost/archive/binary_iarchive.hpp>
////#include <boost/foreach.hpp>
////#include <boost/serialization/vector.hpp>
//#include <boost/filesystem/path.hpp>
//#include <boost/serialization/map.hpp>
//#include <boost/filesystem/operations.hpp>
//#include <boost/thread.hpp>
//#include <boost/date_time/posix_time/posix_time.hpp>
//
//
//namespace zilliz {
//namespace milvus {
//namespace server {
//
//LicenseCheck::LicenseCheck() {
//
//}
//
//LicenseCheck::~LicenseCheck() {
// StopCountingDown();
//}
//
//ServerError
//LicenseCheck::LegalityCheck(const std::string &license_file_path) {
//
// int device_count;
// LicenseLibrary::GetDeviceCount(device_count);
// std::vector<std::string> uuid_array;
// LicenseLibrary::GetUUID(device_count, uuid_array);
//
// std::vector<std::string> sha_array;
// LicenseLibrary::GetUUIDSHA256(device_count, uuid_array, sha_array);
//
// int output_device_count;
// std::map<int, std::string> uuid_encryption_map;
// time_t starting_time;
// time_t end_time;
// ServerError err = LicenseLibrary::LicenseFileDeserialization(license_file_path,
// output_device_count,
// uuid_encryption_map,
// starting_time,
// end_time);
// if(err !=SERVER_SUCCESS)
// {
// std::cout << "License check error: 01" << std::endl;
// return SERVER_UNEXPECTED_ERROR;
// }
// time_t system_time;
// LicenseLibrary::GetSystemTime(system_time);
//
// if (device_count != output_device_count) {
// std::cout << "License check error: 02" << std::endl;
// return SERVER_UNEXPECTED_ERROR;
// }
// for (int i = 0; i < device_count; ++i) {
// if (sha_array[i] != uuid_encryption_map[i]) {
// std::cout << "License check error: 03" << std::endl;
// return SERVER_UNEXPECTED_ERROR;
// }
// }
// if (system_time < starting_time || system_time > end_time) {
// std::cout << "License check error: 04" << std::endl;
// return SERVER_UNEXPECTED_ERROR;
// }
// std::cout << "Legality Check Success" << std::endl;
// return SERVER_SUCCESS;
//}
//
//// Part 2: Timing check license
//
//ServerError
//LicenseCheck::AlterFile(const std::string &license_file_path,
// const boost::system::error_code &ec,
// boost::asio::deadline_timer *pt) {
//
// ServerError err = LicenseCheck::LegalityCheck(license_file_path);
// if(err!=SERVER_SUCCESS) {
// std::cout << "license file check error" << std::endl;
// exit(1);
// }
//
// std::cout << "---runing---" << std::endl;
// pt->expires_at(pt->expires_at() + boost::posix_time::hours(1));
// pt->async_wait(boost::bind(LicenseCheck::AlterFile, license_file_path, boost::asio::placeholders::error, pt));
//
// return SERVER_SUCCESS;
//
//}
//
//ServerError
//LicenseCheck::StartCountingDown(const std::string &license_file_path) {
//
// if (!LicenseLibrary::IsFileExistent(license_file_path)) {
// std::cout << "license file not exist" << std::endl;
// exit(1);
// }
//
// //create a thread to run AlterFile
// if(counting_thread_ == nullptr) {
// counting_thread_ = std::make_shared<std::thread>([&]() {
// boost::asio::deadline_timer t(io_service_, boost::posix_time::hours(1));
// t.async_wait(boost::bind(LicenseCheck::AlterFile, license_file_path, boost::asio::placeholders::error, &t));
// io_service_.run();//this thread will block here
// });
// }
//
// return SERVER_SUCCESS;
//}
//
//ServerError
//LicenseCheck::StopCountingDown() {
// if(!io_service_.stopped()) {
// io_service_.stop();
// }
//
// if(counting_thread_ != nullptr) {
// counting_thread_->join();
// counting_thread_ = nullptr;
// }
//
// return SERVER_SUCCESS;
//}
//
//}
//}
//}
\ No newline at end of file
#pragma once
#include "utils/Error.h"
#include "LicenseLibrary.h"
#include <boost/asio.hpp>
#include <thread>
#include <memory>
namespace zilliz {
namespace milvus {
namespace server {
class LicenseCheck {
private:
LicenseCheck();
~LicenseCheck();
public:
static LicenseCheck &
GetInstance() {
static LicenseCheck instance;
return instance;
};
static ServerError
LegalityCheck(const std::string &license_file_path);
ServerError
StartCountingDown(const std::string &license_file_path);
ServerError
StopCountingDown();
private:
static ServerError
AlterFile(const std::string &license_file_path,
const boost::system::error_code &ec,
boost::asio::deadline_timer *pt);
private:
boost::asio::io_service io_service_;
std::shared_ptr<std::thread> counting_thread_;
};
}
}
}
//#pragma once
//
//#include "utils/Error.h"
//#include "LicenseLibrary.h"
//
//#include <boost/asio.hpp>
//
//#include <thread>
//#include <memory>
//
//namespace zilliz {
//namespace milvus {
//namespace server {
//
//class LicenseCheck {
//private:
// LicenseCheck();
// ~LicenseCheck();
//
//public:
// static LicenseCheck &
// GetInstance() {
// static LicenseCheck instance;
// return instance;
// };
//
// static ServerError
// LegalityCheck(const std::string &license_file_path);
//
// ServerError
// StartCountingDown(const std::string &license_file_path);
//
// ServerError
// StopCountingDown();
//
//private:
// static ServerError
// AlterFile(const std::string &license_file_path,
// const boost::system::error_code &ec,
// boost::asio::deadline_timer *pt);
//
//private:
// boost::asio::io_service io_service_;
// std::shared_ptr<std::thread> counting_thread_;
//
//};
//
//}
//}
//}
//
//
/*******************************************************************************
* Copyright 上海赜睿信息科技有限公司(Zilliz) - All Rights Reserved
* Unauthorized copying of this file, via any medium is strictly prohibited.
* Proprietary and confidential.
******************************************************************************/
#pragma once
#include <boost/serialization/access.hpp>
#include <string>
#include <map>
class LicenseFile {
public:
LicenseFile() = default;
LicenseFile(const int &device_count,
const std::map<int, std::string> &uuid_encryption_map,
const time_t &starting_time,
const time_t &end_time)
: device_count_(device_count),
uuid_encryption_map_(uuid_encryption_map),
starting_time_(starting_time),
end_time_(end_time) {}
int get_device_count() {
return device_count_;
}
std::map<int, std::string> &get_uuid_encryption_map() {
return uuid_encryption_map_;
}
time_t get_starting_time() {
return starting_time_;
}
time_t get_end_time() {
return end_time_;
}
public:
friend class boost::serialization::access;
template<typename Archive>
void serialize(Archive &ar, const unsigned int version) {
ar & device_count_;
ar & uuid_encryption_map_;
ar & starting_time_;
ar & end_time_;
}
public:
int device_count_ = 0;
std::map<int, std::string> uuid_encryption_map_;
time_t starting_time_ = 0;
time_t end_time_ = 0;
};
class SerializedLicenseFile {
public:
~SerializedLicenseFile() {
if (license_file_ != nullptr) {
delete (license_file_);
license_file_ = nullptr;
}
}
void
set_license_file(LicenseFile *license_file) {
license_file_ = license_file;
}
LicenseFile *get_license_file() {
return license_file_;
}
private:
friend class boost::serialization::access;
template<typename Archive>
void serialize(Archive &ar, const unsigned int version) {
ar & license_file_;
}
private:
LicenseFile *license_file_ = nullptr;
};
///*******************************************************************************
// * Copyright 上海赜睿信息科技有限公司(Zilliz) - All Rights Reserved
// * Unauthorized copying of this file, via any medium is strictly prohibited.
// * Proprietary and confidential.
// ******************************************************************************/
//#pragma once
//
//
//#include <boost/serialization/access.hpp>
//#include <string>
//#include <map>
//
//
//class LicenseFile {
// public:
// LicenseFile() = default;
//
// LicenseFile(const int &device_count,
// const std::map<int, std::string> &uuid_encryption_map,
// const time_t &starting_time,
// const time_t &end_time)
// : device_count_(device_count),
// uuid_encryption_map_(uuid_encryption_map),
// starting_time_(starting_time),
// end_time_(end_time) {}
//
// int get_device_count() {
// return device_count_;
// }
// std::map<int, std::string> &get_uuid_encryption_map() {
// return uuid_encryption_map_;
// }
// time_t get_starting_time() {
// return starting_time_;
// }
// time_t get_end_time() {
// return end_time_;
// }
//
// public:
// friend class boost::serialization::access;
//
// template<typename Archive>
// void serialize(Archive &ar, const unsigned int version) {
// ar & device_count_;
// ar & uuid_encryption_map_;
// ar & starting_time_;
// ar & end_time_;
// }
//
// public:
// int device_count_ = 0;
// std::map<int, std::string> uuid_encryption_map_;
// time_t starting_time_ = 0;
// time_t end_time_ = 0;
//};
//
//class SerializedLicenseFile {
// public:
// ~SerializedLicenseFile() {
// if (license_file_ != nullptr) {
// delete (license_file_);
// license_file_ = nullptr;
// }
// }
//
// void
// set_license_file(LicenseFile *license_file) {
// license_file_ = license_file;
// }
//
// LicenseFile *get_license_file() {
// return license_file_;
// }
// private:
// friend class boost::serialization::access;
//
// template<typename Archive>
// void serialize(Archive &ar, const unsigned int version) {
// ar & license_file_;
// }
//
// private:
// LicenseFile *license_file_ = nullptr;
//};
//
#include <iostream>
#include <getopt.h>
#include <memory.h>
#include "utils/Log.h"
#include "license/LicenseLibrary.h"
#include "utils/Error.h"
using namespace zilliz::milvus;
// Not provide path: current work path will be used and system.info.
void
print_usage(const std::string &app_name) {
printf("\n Usage: %s [OPTIONS]\n\n", app_name.c_str());
printf(" Options:\n");
printf(" -h --help Print this help\n");
printf(" -s --sysinfo filename sysinfo file location\n");
printf(" -l --license filename Generate license file as given name\n");
printf(" -b --starting time Set start time (format: YYYY-MM-DD)\n");
printf(" -e --end time Set end time (format: YYYY-MM-DD)\n");
printf("\n");
}
int main(int argc, char *argv[]) {
std::string app_name = argv[0];
// if (argc != 1 && argc != 3) {
// print_usage(app_name);
// return EXIT_FAILURE;
//
//#include <iostream>
//#include <getopt.h>
//#include <memory.h>
//
//#include "utils/Log.h"
//#include "license/LicenseLibrary.h"
//#include "utils/Error.h"
//
//
//using namespace zilliz::milvus;
//// Not provide path: current work path will be used and system.info.
//
//void
//print_usage(const std::string &app_name) {
// printf("\n Usage: %s [OPTIONS]\n\n", app_name.c_str());
// printf(" Options:\n");
// printf(" -h --help Print this help\n");
// printf(" -s --sysinfo filename sysinfo file location\n");
// printf(" -l --license filename Generate license file as given name\n");
// printf(" -b --starting time Set start time (format: YYYY-MM-DD)\n");
// printf(" -e --end time Set end time (format: YYYY-MM-DD)\n");
// printf("\n");
//}
//
//int main(int argc, char *argv[]) {
// std::string app_name = argv[0];
//// if (argc != 1 && argc != 3) {
//// print_usage(app_name);
//// return EXIT_FAILURE;
//// }
// static struct option long_options[] = {{"system_info", required_argument, 0, 's'},
// {"license", optional_argument, 0, 'l'},
// {"help", no_argument, 0, 'h'},
// {"starting_time", required_argument, 0, 'b'},
// {"end_time", required_argument, 0, 'e'},
// {NULL, 0, 0, 0}};
// server::ServerError err;
// int value = 0;
// int option_index = 0;
// std::string system_info_filename = "./system.info";
// std::string license_filename = "./system.license";
// char *string_starting_time = NULL;
// char *string_end_time = NULL;
// time_t starting_time = 0;
// time_t end_time = 0;
// int flag_s = 1;
// int flag_b = 1;
// int flag_e = 1;
// while ((value = getopt_long(argc, argv, "hl:s:b:e:", long_options, NULL)) != -1) {
// switch (value) {
// case 's': {
// flag_s = 0;
// system_info_filename = (std::string) (optarg);
// break;
// }
// case 'b': {
// flag_b = 0;
// string_starting_time = optarg;
// break;
// }
// case 'e': {
// flag_e = 0;
// string_end_time = optarg;
// break;
// }
// case 'l': {
// license_filename = (std::string) (optarg);
// break;
// }
// case 'h':print_usage(app_name);
// return EXIT_SUCCESS;
// case '?':print_usage(app_name);
// return EXIT_FAILURE;
// default:print_usage(app_name);
// break;
// }
//
// }
static struct option long_options[] = {{"system_info", required_argument, 0, 's'},
{"license", optional_argument, 0, 'l'},
{"help", no_argument, 0, 'h'},
{"starting_time", required_argument, 0, 'b'},
{"end_time", required_argument, 0, 'e'},
{NULL, 0, 0, 0}};
server::ServerError err;
int value = 0;
int option_index = 0;
std::string system_info_filename = "./system.info";
std::string license_filename = "./system.license";
char *string_starting_time = NULL;
char *string_end_time = NULL;
time_t starting_time = 0;
time_t end_time = 0;
int flag_s = 1;
int flag_b = 1;
int flag_e = 1;
while ((value = getopt_long(argc, argv, "hl:s:b:e:", long_options, NULL)) != -1) {
switch (value) {
case 's': {
flag_s = 0;
system_info_filename = (std::string) (optarg);
break;
}
case 'b': {
flag_b = 0;
string_starting_time = optarg;
break;
}
case 'e': {
flag_e = 0;
string_end_time = optarg;
break;
}
case 'l': {
license_filename = (std::string) (optarg);
break;
}
case 'h':print_usage(app_name);
return EXIT_SUCCESS;
case '?':print_usage(app_name);
return EXIT_FAILURE;
default:print_usage(app_name);
break;
}
}
if (flag_s) {
printf("Error: sysinfo file location must be entered\n");
return 1;
}
if (flag_b) {
printf("Error: start time must be entered\n");
return 1;
}
if (flag_e) {
printf("Error: end time must be entered\n");
return 1;
}
err = server::LicenseLibrary::GetDateTime(string_starting_time, starting_time);
if (err != server::SERVER_SUCCESS) return -1;
err = server::LicenseLibrary::GetDateTime(string_end_time, end_time);
if (err != server::SERVER_SUCCESS) return -1;
int output_info_device_count = 0;
std::map<int, std::string> output_info_uuid_encrption_map;
err = server::LicenseLibrary::GPUinfoFileDeserialization(system_info_filename,
output_info_device_count,
output_info_uuid_encrption_map);
if (err != server::SERVER_SUCCESS) return -1;
err = server::LicenseLibrary::LicenseFileSerialization(license_filename,
output_info_device_count,
output_info_uuid_encrption_map,
starting_time,
end_time);
if (err != server::SERVER_SUCCESS) return -1;
printf("Generate License File Success\n");
return 0;
}
// if (flag_s) {
// printf("Error: sysinfo file location must be entered\n");
// return 1;
// }
// if (flag_b) {
// printf("Error: start time must be entered\n");
// return 1;
// }
// if (flag_e) {
// printf("Error: end time must be entered\n");
// return 1;
// }
//
// err = server::LicenseLibrary::GetDateTime(string_starting_time, starting_time);
// if (err != server::SERVER_SUCCESS) return -1;
//
// err = server::LicenseLibrary::GetDateTime(string_end_time, end_time);
// if (err != server::SERVER_SUCCESS) return -1;
//
//
// int output_info_device_count = 0;
// std::map<int, std::string> output_info_uuid_encrption_map;
//
//
// err = server::LicenseLibrary::GPUinfoFileDeserialization(system_info_filename,
// output_info_device_count,
// output_info_uuid_encrption_map);
// if (err != server::SERVER_SUCCESS) return -1;
//
//
// err = server::LicenseLibrary::LicenseFileSerialization(license_filename,
// output_info_device_count,
// output_info_uuid_encrption_map,
// starting_time,
// end_time);
// if (err != server::SERVER_SUCCESS) return -1;
//
//
// printf("Generate License File Success\n");
//
// return 0;
//}
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
......@@ -59,8 +59,6 @@ class SystemInfo {
double MemoryPercent();
double CPUPercent();
std::pair<unsigned long long , unsigned long long > Octets();
// std::unordered_map<int,std::vector<double>> GetGPUMemPercent() {};
// std::vector<std::string> split(std::string input) {};
std::vector<unsigned int> GPUPercent();
std::vector<unsigned long long> GPUMemoryUsed();
......
......@@ -32,4 +32,4 @@ target_link_libraries(milvus_sdk
add_subdirectory(examples)
install(TARGETS milvus_sdk DESTINATION bin)
install(TARGETS milvus_sdk DESTINATION lib)
此差异已折叠。
......@@ -23,6 +23,8 @@ public:
virtual Status CreateTable(const TableSchema &param) override;
virtual bool HasTable(const std::string &table_name) override;
virtual Status DeleteTable(const std::string &table_name) override;
virtual Status AddVector(const std::string &table_name,
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册