提交 90516aeb 编写于 作者: K kun yu

Merge remote-tracking branch 'upstream/branch-0.4.0' into branch-0.4.0


Former-commit-id: 5474859a3d793694efca59e69eb33bedc2588baf
[submodule "cpp/thirdparty/knowhere"]
path = cpp/thirdparty/knowhere
url = git@192.168.1.105:xiaojun.lin/knowhere.git
branch = develop
url = git@192.168.1.105:megasearch/knowhere.git
branch = branch-0.4.0
......@@ -5,7 +5,7 @@ try {
dir ("milvus-helm") {
checkout([$class: 'GitSCM', branches: [[name: "${SEMVER}"]], doGenerateSubmoduleConfigurations: false, extensions: [], submoduleCfg: [], userRemoteConfigs: [[credentialsId: "${params.GIT_USER}", url: "git@192.168.1.105:megasearch/milvus-helm.git", name: 'origin', refspec: "+refs/heads/${SEMVER}:refs/remotes/origin/${SEMVER}"]]])
dir ("milvus/milvus-cluster") {
sh "helm install --wait --timeout 300 --set roServers.image.tag=${DOCKER_VERSION} --set woServers.image.tag=${DOCKER_VERSION} --set expose.type=clusterIP -f ci/values.yaml --name ${env.JOB_NAME}-${env.BUILD_NUMBER}-cluster --namespace milvus-cluster --version 0.3.1 . "
sh "helm install --wait --timeout 300 --set roServers.image.tag=${DOCKER_VERSION} --set woServers.image.tag=${DOCKER_VERSION} --set expose.type=clusterIP -f ci/values.yaml --name ${env.JOB_NAME}-${env.BUILD_NUMBER}-cluster --namespace milvus-cluster --version 0.4.0 . "
}
}
/*
......
......@@ -5,7 +5,7 @@ try {
dir ("milvus-helm") {
checkout([$class: 'GitSCM', branches: [[name: "${SEMVER}"]], doGenerateSubmoduleConfigurations: false, extensions: [], submoduleCfg: [], userRemoteConfigs: [[credentialsId: "${params.GIT_USER}", url: "git@192.168.1.105:megasearch/milvus-helm.git", name: 'origin', refspec: "+refs/heads/${SEMVER}:refs/remotes/origin/${SEMVER}"]]])
dir ("milvus/milvus-gpu") {
sh "helm install --wait --timeout 300 --set engine.image.tag=${DOCKER_VERSION} --set expose.type=clusterIP --name ${env.JOB_NAME}-${env.BUILD_NUMBER} -f ci/values.yaml --namespace milvus-1 --version 0.3.1 ."
sh "helm install --wait --timeout 300 --set engine.image.tag=${DOCKER_VERSION} --set expose.type=clusterIP --name ${env.JOB_NAME}-${env.BUILD_NUMBER} -f ci/values.yaml --namespace milvus-1 --version 0.4.0 ."
}
}
} catch (exc) {
......
......@@ -2,6 +2,16 @@
Please mark all change in change log and use the ticket from JIRA.
# Milvus 0.4.0 (2019-07-28)
## Bug
## Improvement
## New Feature
## Task
- MS-297 - disable mysql unit test
# Milvus 0.3.1 (2019-07-10)
......@@ -50,6 +60,7 @@ Please mark all change in change log and use the ticket from JIRA.
## Task
- MS-125 - Create 0.3.1 release branch
- MS-306 - Optimize build efficiency
# Milvus 0.3.0 (2019-06-30)
......
......@@ -9,8 +9,10 @@ BUILD_COVERAGE="OFF"
DB_PATH="/opt/milvus"
PROFILING="OFF"
BUILD_FAISS_WITH_MKL="OFF"
USE_JFROG_CACHE="OFF"
KNOWHERE_OPTS=""
while getopts "p:d:t:uhlrcgm" arg
while getopts "p:d:t:uhlrcgmj" arg
do
case $arg in
t)
......@@ -44,6 +46,10 @@ do
m)
BUILD_FAISS_WITH_MKL="ON"
;;
j)
USE_JFROG_CACHE="ON"
KNOWHERE_OPTS="${KNOWHERE_OPTS} -j"
;;
h) # help
echo "
......@@ -57,9 +63,10 @@ parameter:
-c: code coverage(default: OFF)
-g: profiling(default: OFF)
-m: build faiss with MKL(default: OFF)
-j: use jfrog cache build directory
usage:
./build.sh -t \${BUILD_TYPE} [-u] [-h] [-g] [-r] [-c] [-m]
./build.sh -t \${BUILD_TYPE} [-u] [-h] [-g] [-r] [-c] [-m] [-j]
"
exit 0
;;
......@@ -78,7 +85,7 @@ fi
# Build Knowhere
KNOWHERE_BUILD_DIR="`pwd`/thirdparty/knowhere_build"
pushd `pwd`/thirdparty/knowhere
./build.sh -t Release -p ${KNOWHERE_BUILD_DIR}
./build.sh -t Release -p ${KNOWHERE_BUILD_DIR} ${KNOWHERE_OPTS}
popd
cd cmake_build
......@@ -97,6 +104,7 @@ if [[ ${MAKE_CLEAN} == "ON" ]]; then
-DBUILD_FAISS_WITH_MKL=${BUILD_FAISS_WITH_MKL} \
-DMILVUS_ENABLE_THRIFT=${MILVUS_ENABLE_THRIFT} \
-DKNOWHERE_BUILD_DIR=${KNOWHERE_BUILD_DIR} \
-DUSE_JFROG_CACHE=${USE_JFROG_CACHE} \
$@ ../"
echo ${CMAKE_CMD}
......
# Define a function that extracts a cached package
function(ExternalProject_Use_Cache project_name package_file install_path)
message(STATUS "Will use cached package file: ${package_file}")
ExternalProject_Add(${project_name}
DOWNLOAD_COMMAND ${CMAKE_COMMAND} -E echo
"No download step needed (using cached package)"
CONFIGURE_COMMAND ${CMAKE_COMMAND} -E echo
"No configure step needed (using cached package)"
BUILD_COMMAND ${CMAKE_COMMAND} -E echo
"No build step needed (using cached package)"
INSTALL_COMMAND ${CMAKE_COMMAND} -E echo
"No install step needed (using cached package)"
)
# We want our tar files to contain the Install/<package> prefix (not for any
# very special reason, only for consistency and so that we can identify them
# in the extraction logs) which means that we must extract them in the
# binary (top-level build) directory to have them installed in the right
# place for subsequent ExternalProjects to pick them up. It seems that the
# only way to control the working directory is with Add_Step!
ExternalProject_Add_Step(${project_name} extract
ALWAYS 1
COMMAND
${CMAKE_COMMAND} -E echo
"Extracting ${package_file} to ${install_path}"
COMMAND
${CMAKE_COMMAND} -E tar xzvf ${package_file} ${install_path}
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
)
ExternalProject_Add_StepTargets(${project_name} extract)
endfunction()
# Define a function that to create a new cached package
function(ExternalProject_Create_Cache project_name package_file install_path cache_username cache_password cache_path)
if(EXISTS ${package_file})
message(STATUS "Removing existing package file: ${package_file}")
file(REMOVE ${package_file})
endif()
message(STATUS "Will create cached package file: ${package_file}")
ExternalProject_Add_Step(${project_name} package
DEPENDEES install
BYPRODUCTS ${package_file}
COMMAND ${CMAKE_COMMAND} -E echo "Updating cached package file: ${package_file}"
COMMAND ${CMAKE_COMMAND} -E tar czvf ${package_file} ${install_path}
COMMAND ${CMAKE_COMMAND} -E echo "Uploading package file ${package_file} to ${cache_path}"
COMMAND curl -u${cache_username}:${cache_password} -T ${package_file} ${cache_path}
)
ExternalProject_Add_StepTargets(${project_name} package)
endfunction()
function(ADD_THIRDPARTY_LIB LIB_NAME)
set(options)
set(one_value_args SHARED_LIB STATIC_LIB)
......@@ -92,4 +147,4 @@ function(ADD_THIRDPARTY_LIB LIB_NAME)
else()
message(FATAL_ERROR "No static or shared library provided for ${LIB_NAME}")
endif()
endfunction()
\ No newline at end of file
endfunction()
此差异已折叠。
......@@ -369,6 +369,7 @@ Status DBImpl::MergeFiles(const std::string& table_id, const meta::DateT& date,
meta::TableFileSchema table_file;
table_file.table_id_ = table_id;
table_file.date_ = date;
table_file.file_type_ = meta::TableFileSchema::NEW_MERGE;
Status status = meta_ptr_->CreateTableFile(table_file);
if (!status.ok()) {
......@@ -529,7 +530,7 @@ Status DBImpl::BuildIndex(const meta::TableFileSchema& file) {
meta::TableFileSchema table_file;
table_file.table_id_ = file.table_id_;
table_file.date_ = file.date_;
table_file.file_type_ = meta::TableFileSchema::INDEX; //for multi-db-path, distribute index file averagely to each path
table_file.file_type_ = meta::TableFileSchema::NEW_INDEX; //for multi-db-path, distribute index file averagely to each path
Status status = meta_ptr_->CreateTableFile(table_file);
if (!status.ok()) {
ENGINE_LOG_ERROR << "Failed to create table: " << status.ToString();
......
......@@ -302,19 +302,49 @@ Status DBMetaImpl::DescribeTable(TableSchema &table_schema) {
Status DBMetaImpl::HasNonIndexFiles(const std::string& table_id, bool& has) {
has = false;
try {
auto selected = ConnectorPtr->select(columns(&TableFileSchema::id_),
auto selected = ConnectorPtr->select(columns(&TableFileSchema::id_,
&TableFileSchema::file_type_),
where((c(&TableFileSchema::file_type_) == (int) TableFileSchema::RAW
or
c(&TableFileSchema::file_type_) == (int) TableFileSchema::NEW
or
c(&TableFileSchema::file_type_) == (int) TableFileSchema::NEW_MERGE
or
c(&TableFileSchema::file_type_) == (int) TableFileSchema::NEW_INDEX
or
c(&TableFileSchema::file_type_) == (int) TableFileSchema::TO_INDEX)
and c(&TableFileSchema::table_id_) == table_id
));
if (selected.size() >= 1) {
has = true;
} else {
has = false;
int raw_count = 0, new_count = 0, new_merge_count = 0, new_index_count = 0, to_index_count = 0;
for (auto &file : selected) {
switch (std::get<1>(file)) {
case (int) TableFileSchema::RAW:
raw_count++;
break;
case (int) TableFileSchema::NEW:
new_count++;
break;
case (int) TableFileSchema::NEW_MERGE:
new_merge_count++;
break;
case (int) TableFileSchema::NEW_INDEX:
new_index_count++;
break;
case (int) TableFileSchema::TO_INDEX:
to_index_count++;
break;
default:
break;
}
}
ENGINE_LOG_DEBUG << "Table " << table_id << " currently has raw files:" << raw_count
<< " new files:" << new_count << " new_merge files:" << new_merge_count
<< " new_index files:" << new_index_count << " to_index files:" << to_index_count;
}
} catch (std::exception &e) {
......@@ -389,7 +419,6 @@ Status DBMetaImpl::CreateTableFile(TableFileSchema &file_schema) {
MetricCollector metric;
NextFileId(file_schema.file_id_);
file_schema.file_type_ = TableFileSchema::NEW;
file_schema.dimension_ = table_schema.dimension_;
file_schema.size_ = 0;
file_schema.created_on_ = utils::GetMicroSecTimeStamp();
......@@ -1031,7 +1060,11 @@ Status DBMetaImpl::CleanUp() {
std::lock_guard<std::mutex> meta_lock(meta_mutex_);
auto files = ConnectorPtr->select(columns(&TableFileSchema::id_),
where(c(&TableFileSchema::file_type_) == (int) TableFileSchema::NEW));
where(c(&TableFileSchema::file_type_) == (int) TableFileSchema::NEW
or
c(&TableFileSchema::file_type_) == (int) TableFileSchema::NEW_INDEX
or
c(&TableFileSchema::file_type_) == (int) TableFileSchema::NEW_MERGE));
auto commited = ConnectorPtr->transaction([&]() mutable {
for (auto &file : files) {
......
......@@ -43,6 +43,8 @@ struct TableFileSchema {
TO_INDEX,
INDEX,
TO_DELETE,
NEW_MERGE,
NEW_INDEX,
} FILE_TYPE;
size_t id_ = 0;
......
......@@ -404,6 +404,8 @@ Status MySQLMetaImpl::HasNonIndexFiles(const std::string &table_id, bool &has) {
"WHERE table_id = " << quote << table_id << " AND " <<
"(file_type = " << std::to_string(TableFileSchema::RAW) << " OR " <<
"file_type = " << std::to_string(TableFileSchema::NEW) << " OR " <<
"file_type = " << std::to_string(TableFileSchema::NEW_MERGE) << " OR " <<
"file_type = " << std::to_string(TableFileSchema::NEW_INDEX) << " OR " <<
"file_type = " << std::to_string(TableFileSchema::TO_INDEX) << ")) " <<
"AS " << quote << "check" << ";";
......@@ -706,7 +708,6 @@ Status MySQLMetaImpl::CreateTableFile(TableFileSchema &file_schema) {
MetricCollector metric;
NextFileId(file_schema.file_id_);
file_schema.file_type_ = TableFileSchema::NEW;
file_schema.dimension_ = table_schema.dimension_;
file_schema.size_ = 0;
file_schema.created_on_ = utils::GetMicroSecTimeStamp();
......@@ -1812,7 +1813,10 @@ Status MySQLMetaImpl::CleanUp() {
if (!res.empty()) {
ENGINE_LOG_DEBUG << "Remove table file type as NEW";
cleanUpQuery << "DELETE FROM TableFiles WHERE file_type = " << std::to_string(TableFileSchema::NEW) << ";";
cleanUpQuery << "DELETE FROM TableFiles WHERE file_type IN ("
<< std::to_string(TableFileSchema::NEW) << ","
<< std::to_string(TableFileSchema::NEW_MERGE) << ","
<< std::to_string(TableFileSchema::NEW_INDEX) << ");";
ENGINE_LOG_DEBUG << "MySQLMetaImpl::CleanUp: " << cleanUpQuery.str();
......
......@@ -34,7 +34,7 @@ std::string GetTableFileParentFolder(const DBMetaOptions& options, const meta::T
std::string target_path = options.path;
uint64_t index = 0;
if(meta::TableFileSchema::INDEX == table_file.file_type_) {
if(meta::TableFileSchema::NEW_INDEX == table_file.file_type_) {
// index file is large file and to be persisted permanently
// we need to distribute index files to each db_path averagely
// round robin according to a file counter
......
......@@ -64,17 +64,6 @@ set(knowhere_libs
cublas
)
target_link_libraries(db_test
${knowhere_libs}
${db_libs}
${unittest_libs}
sqlite
boost_system_static
boost_filesystem_static
lz4
mysqlpp
)
if(${BUILD_FAISS_WITH_MKL} STREQUAL "ON")
set(db_libs ${db_libs} ${MKL_LIBS} ${MKL_LIBS})
else()
......@@ -83,7 +72,7 @@ else()
openblas)
endif()
target_link_libraries(db_test ${db_libs} ${unittest_libs})
target_link_libraries(db_test ${db_libs} ${knowhere_libs} ${unittest_libs})
install(TARGETS db_test DESTINATION bin)
......@@ -46,7 +46,7 @@ namespace {
}
TEST_F(MySQLDBTest, DB_TEST) {
TEST_F(DISABLED_MySQLDBTest, DB_TEST) {
auto options = GetOptions();
auto db_ = engine::DBFactory::Build(options);
......@@ -139,7 +139,7 @@ TEST_F(MySQLDBTest, DB_TEST) {
delete dummyDB;
};
TEST_F(MySQLDBTest, SEARCH_TEST) {
TEST_F(DISABLED_MySQLDBTest, SEARCH_TEST) {
auto options = GetOptions();
auto db_ = engine::DBFactory::Build(options);
......@@ -202,7 +202,7 @@ TEST_F(MySQLDBTest, SEARCH_TEST) {
// TODO(linxj): add groundTruth assert
};
TEST_F(MySQLDBTest, ARHIVE_DISK_CHECK) {
TEST_F(DISABLED_MySQLDBTest, ARHIVE_DISK_CHECK) {
auto options = GetOptions();
options.meta.archive_conf = engine::ArchiveConf("delete", "disk:1");
......@@ -258,7 +258,7 @@ TEST_F(MySQLDBTest, ARHIVE_DISK_CHECK) {
delete dummyDB;
};
TEST_F(MySQLDBTest, DELETE_TEST) {
TEST_F(DISABLED_MySQLDBTest, DELETE_TEST) {
auto options = GetOptions();
options.meta.archive_conf = engine::ArchiveConf("delete", "disk:1");
......
......@@ -21,7 +21,7 @@
using namespace zilliz::milvus::engine;
TEST_F(MySQLTest, TABLE_TEST) {
TEST_F(DISABLED_MySQLTest, TABLE_TEST) {
DBMetaOptions options;
try {
options = getDBMetaOptions();
......@@ -63,7 +63,7 @@ TEST_F(MySQLTest, TABLE_TEST) {
ASSERT_TRUE(status.ok());
}
TEST_F(MySQLTest, TABLE_FILE_TEST) {
TEST_F(DISABLED_MySQLTest, TABLE_FILE_TEST) {
DBMetaOptions options;
try {
options = getDBMetaOptions();
......@@ -136,7 +136,7 @@ TEST_F(MySQLTest, TABLE_FILE_TEST) {
ASSERT_TRUE(status.ok());
}
TEST_F(MySQLTest, ARCHIVE_TEST_DAYS) {
TEST_F(DISABLED_MySQLTest, ARCHIVE_TEST_DAYS) {
srand(time(0));
DBMetaOptions options;
try {
......@@ -198,7 +198,7 @@ TEST_F(MySQLTest, ARCHIVE_TEST_DAYS) {
ASSERT_TRUE(status.ok());
}
TEST_F(MySQLTest, ARCHIVE_TEST_DISK) {
TEST_F(DISABLED_MySQLTest, ARCHIVE_TEST_DISK) {
DBMetaOptions options;
try {
options = getDBMetaOptions();
......@@ -252,7 +252,7 @@ TEST_F(MySQLTest, ARCHIVE_TEST_DISK) {
ASSERT_TRUE(status.ok());
}
TEST_F(MySQLTest, TABLE_FILES_TEST) {
TEST_F(DISABLED_MySQLTest, TABLE_FILES_TEST) {
DBMetaOptions options;
try {
options = getDBMetaOptions();
......
......@@ -85,7 +85,7 @@ void MetaTest::TearDown() {
impl_->DropAll();
}
zilliz::milvus::engine::DBMetaOptions MySQLTest::getDBMetaOptions() {
zilliz::milvus::engine::DBMetaOptions DISABLED_MySQLTest::getDBMetaOptions() {
// std::string path = "/tmp/milvus_test";
// engine::DBMetaOptions options = engine::DBMetaOptionsFactory::Build(path);
zilliz::milvus::engine::DBMetaOptions options;
......@@ -99,7 +99,7 @@ zilliz::milvus::engine::DBMetaOptions MySQLTest::getDBMetaOptions() {
return options;
}
zilliz::milvus::engine::Options MySQLDBTest::GetOptions() {
zilliz::milvus::engine::Options DISABLED_MySQLDBTest::GetOptions() {
auto options = engine::OptionsFactory::Build();
options.meta.path = "/tmp/milvus_test";
options.meta.backend_uri = DBTestEnvironment::getURI();
......
......@@ -77,13 +77,13 @@ class MetaTest : public DBTest {
virtual void TearDown() override;
};
class MySQLTest : public ::testing::Test {
class DISABLED_MySQLTest : public ::testing::Test {
protected:
// std::shared_ptr<zilliz::milvus::engine::meta::MySQLMetaImpl> impl_;
zilliz::milvus::engine::DBMetaOptions getDBMetaOptions();
};
class MySQLDBTest : public ::testing::Test {
class DISABLED_MySQLDBTest : public ::testing::Test {
protected:
zilliz::milvus::engine::Options GetOptions();
};
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册