Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
milvus
提交
75a9b4fb
milvus
项目概览
BaiXuePrincess
/
milvus
与 Fork 源项目一致
从无法访问的项目Fork
通知
7
Star
4
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
milvus
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
体验新版 GitCode,发现更多精彩内容 >>
提交
75a9b4fb
编写于
6月 20, 2019
作者:
Z
zhiru
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
finished MySQLMetaImpl; need to test with DBImpl
Former-commit-id: f3a9848f5f2d7a8b15945ae824827371316d0b79
上级
47537d07
变更
7
隐藏空白更改
内联
并排
Showing
7 changed file
with
1433 addition
and
735 deletion
+1433
-735
cpp/src/db/DBMetaImpl.cpp
cpp/src/db/DBMetaImpl.cpp
+1
-1
cpp/src/db/MetaTypes.h
cpp/src/db/MetaTypes.h
+1
-1
cpp/src/db/MySQLMetaImpl.cpp
cpp/src/db/MySQLMetaImpl.cpp
+1004
-722
cpp/src/db/MySQLMetaImpl.h
cpp/src/db/MySQLMetaImpl.h
+1
-2
cpp/unittest/CMakeLists.txt
cpp/unittest/CMakeLists.txt
+2
-2
cpp/unittest/db/MySQLMetaImpl_test.cpp
cpp/unittest/db/MySQLMetaImpl_test.cpp
+422
-5
cpp/unittest/db/utils.h
cpp/unittest/db/utils.h
+2
-2
未找到文件。
cpp/src/db/DBMetaImpl.cpp
浏览文件 @
75a9b4fb
...
@@ -164,7 +164,7 @@ Status DBMetaImpl::DropPartitionsByDates(const std::string &table_id,
...
@@ -164,7 +164,7 @@ Status DBMetaImpl::DropPartitionsByDates(const std::string &table_id,
Status
DBMetaImpl
::
CreateTable
(
TableSchema
&
table_schema
)
{
Status
DBMetaImpl
::
CreateTable
(
TableSchema
&
table_schema
)
{
server
::
Metrics
::
GetInstance
().
MetaAccessTotalIncrement
();
server
::
Metrics
::
GetInstance
().
MetaAccessTotalIncrement
();
if
(
table_schema
.
table_id_
==
""
)
{
if
(
table_schema
.
table_id_
.
empty
()
)
{
NextTableId
(
table_schema
.
table_id_
);
NextTableId
(
table_schema
.
table_id_
);
}
}
table_schema
.
files_cnt_
=
0
;
table_schema
.
files_cnt_
=
0
;
...
...
cpp/src/db/MetaTypes.h
浏览文件 @
75a9b4fb
...
@@ -24,7 +24,7 @@ struct TableSchema {
...
@@ -24,7 +24,7 @@ struct TableSchema {
size_t
id_
;
size_t
id_
;
std
::
string
table_id_
;
std
::
string
table_id_
;
size_t
files_cnt_
=
0
;
size_t
files_cnt_
=
0
;
uint16_t
dimension_
;
uint16_t
dimension_
=
0
;
std
::
string
location_
;
std
::
string
location_
;
long
created_on_
;
long
created_on_
;
int
engine_type_
=
(
int
)
EngineType
::
FAISS_IDMAP
;
int
engine_type_
=
(
int
)
EngineType
::
FAISS_IDMAP
;
...
...
cpp/src/db/MySQLMetaImpl.cpp
浏览文件 @
75a9b4fb
...
@@ -29,8 +29,7 @@ namespace meta {
...
@@ -29,8 +29,7 @@ namespace meta {
using
namespace
mysqlpp
;
using
namespace
mysqlpp
;
// static std::unique_ptr<Connection> connectionPtr(new Connection());
static
std
::
unique_ptr
<
Connection
>
connectionPtr
(
new
Connection
());
static
Connection
*
connectionPtr
=
new
Connection
();
namespace
{
namespace
{
...
@@ -79,23 +78,19 @@ namespace meta {
...
@@ -79,23 +78,19 @@ namespace meta {
MySQLMetaImpl
::
MySQLMetaImpl
(
const
DBMetaOptions
&
options_
)
MySQLMetaImpl
::
MySQLMetaImpl
(
const
DBMetaOptions
&
options_
)
:
options_
(
options_
)
{
:
options_
(
options_
)
{
//
Initialize();
Initialize
();
}
}
Status
MySQLMetaImpl
::
Initialize
()
{
Status
MySQLMetaImpl
::
Initialize
()
{
// if (!boost::filesystem::is_directory(options_.path)) {
// auto ret = boost::filesystem::create_directory(options_.path);
std
::
string
path
=
options_
.
path
;
// if (!ret) {
if
(
!
boost
::
filesystem
::
is_directory
(
path
))
{
// ENGINE_LOG_ERROR << "Create directory " << options_.path << " Error";
auto
ret
=
boost
::
filesystem
::
create_directory
(
path
);
// }
if
(
!
ret
)
{
// assert(ret);
ENGINE_LOG_ERROR
<<
"Create directory "
<<
path
<<
" Error"
;
// }
}
assert
(
ret
);
// ConnectorPtr = std::make_unique<ConnectorT>(StoragePrototype(options_.path + "/meta.sqlite"));
}
//
// ConnectorPtr->sync_schema();
// ConnectorPtr->open_forever(); // thread safe option
// ConnectorPtr->pragma.journal_mode(journal_mode::WAL); // WAL => write ahead log
std
::
string
uri
=
options_
.
backend_uri
;
std
::
string
uri
=
options_
.
backend_uri
;
...
@@ -129,7 +124,7 @@ namespace meta {
...
@@ -129,7 +124,7 @@ namespace meta {
}
}
const
char
*
dbName
=
pieces_match
[
6
].
str
().
c_str
();
const
char
*
dbName
=
pieces_match
[
6
].
str
().
c_str
();
//std::cout << dbName << " " << serverAddress << " " << username << " " << password << " " << port << std::endl;
//std::cout << dbName << " " << serverAddress << " " << username << " " << password << " " << port << std::endl;
connectionPtr
->
set_option
(
new
MultiStatementsOption
(
true
));
//
connectionPtr->set_option(new MultiStatementsOption(true));
try
{
try
{
if
(
!
connectionPtr
->
connect
(
dbName
,
serverAddress
,
username
,
password
,
port
))
{
if
(
!
connectionPtr
->
connect
(
dbName
,
serverAddress
,
username
,
password
,
port
))
{
...
@@ -137,42 +132,54 @@ namespace meta {
...
@@ -137,42 +132,54 @@ namespace meta {
}
}
CleanUp
();
CleanUp
();
Query
InitializeQuery
=
connectionPtr
->
query
();
Query
InitializeQuery
=
connectionPtr
->
query
();
InitializeQuery
<<
"DROP TABLE IF EXISTS meta, metaFile;"
;
// InitializeQuery << "DROP TABLE IF EXISTS meta, metaFile;";
InitializeQuery
<<
"CREATE TABLE meta ("
<<
InitializeQuery
<<
"CREATE TABLE IF NOT EXISTS meta ("
<<
"id BIGINT AUTO INCREMENT PRIMARY KEY, "
<<
"id BIGINT PRIMARY KEY AUTO_INCREMENT, "
<<
"table_id VARCHAR(255) UNIQUE, "
<<
"table_id VARCHAR(255) UNIQUE NOT NULL, "
<<
"dimension SMALLINT, "
<<
"dimension SMALLINT NOT NULL, "
<<
"created_on BIGINT, "
<<
"created_on BIGINT NOT NULL, "
<<
"files_cnt BIGINT DEFAULT 0, "
<<
"files_cnt BIGINT DEFAULT 0 NOT NULL, "
<<
"engine_type INT DEFAULT 1, "
<<
"engine_type INT DEFAULT 1 NOT NULL, "
<<
"store_raw_data BOOL DEFAULT false);"
;
"store_raw_data BOOL DEFAULT false NOT NULL);"
;
InitializeQuery
<<
"CREATE TABLE metaFile ("
<<
if
(
!
InitializeQuery
.
exec
())
{
"id BIGINT AUTO INCREMENT PRIMARY KEY, "
<<
return
Status
::
DBTransactionError
(
"Initialization Error"
,
InitializeQuery
.
error
());
"table_id VARCHAR(255), "
<<
"engine_type INT DEFAULT 1, "
<<
"file_id VARCHAR(255), "
<<
"file_type INT DEFAULT 0, "
<<
"size BIGINT DEFAULT 0, "
<<
"updated_time BIGINT, "
<<
"created_on BIGINT, "
<<
"date INT DEFAULT -1);"
;
if
(
InitializeQuery
.
exec
())
{
return
Status
::
OK
();
}
else
{
return
Status
::
DBTransactionError
(
"Initialization Error: "
,
InitializeQuery
.
error
());
}
}
InitializeQuery
<<
"CREATE TABLE IF NOT EXISTS metaFile ("
<<
"id BIGINT PRIMARY KEY AUTO_INCREMENT, "
<<
"table_id VARCHAR(255) NOT NULL, "
<<
"engine_type INT DEFAULT 1 NOT NULL, "
<<
"file_id VARCHAR(255) NOT NULL, "
<<
"file_type INT DEFAULT 0 NOT NULL, "
<<
"size BIGINT DEFAULT 0 NOT NULL, "
<<
"updated_time BIGINT NOT NULL, "
<<
"created_on BIGINT NOT NULL, "
<<
"date INT DEFAULT -1 NOT NULL);"
;
if
(
!
InitializeQuery
.
exec
())
{
return
Status
::
DBTransactionError
(
"Initialization Error"
,
InitializeQuery
.
error
());
}
return
Status
::
OK
();
// if (InitializeQuery.exec()) {
// std::cout << "XXXXXXXXXXXXXXXXXXXXXXXXX" << std::endl;
// while (InitializeQuery.more_results()) {
// InitializeQuery.store_next();
// }
// return Status::OK();
// } else {
// return Status::DBTransactionError("Initialization Error", InitializeQuery.error());
// }
}
catch
(
const
ConnectionFailed
&
er
)
{
}
catch
(
const
ConnectionFailed
&
er
)
{
return
Status
::
DBTransactionError
(
"Failed to connect to
MySQL server:
"
,
er
.
what
());
return
Status
::
DBTransactionError
(
"Failed to connect to
database server
"
,
er
.
what
());
}
catch
(
const
BadQuery
&
er
)
{
}
catch
(
const
BadQuery
&
er
)
{
// Handle any query errors
// Handle any query errors
return
Status
::
DBTransactionError
(
"QUERY ERROR DURING INITIALIZATION
:
"
,
er
.
what
());
return
Status
::
DBTransactionError
(
"QUERY ERROR DURING INITIALIZATION"
,
er
.
what
());
}
catch
(
const
Exception
&
er
)
{
}
catch
(
const
Exception
&
er
)
{
// Catch-all for any other MySQL++ exceptions
// Catch-all for any other MySQL++ exceptions
return
Status
::
DBTransactionError
(
"GENERAL ERROR DURING INITIALIZATION
:
"
,
er
.
what
());
return
Status
::
DBTransactionError
(
"GENERAL ERROR DURING INITIALIZATION"
,
er
.
what
());
}
}
}
}
else
{
else
{
...
@@ -183,272 +190,404 @@ namespace meta {
...
@@ -183,272 +190,404 @@ namespace meta {
// PXU TODO: Temp solution. Will fix later
// PXU TODO: Temp solution. Will fix later
Status
MySQLMetaImpl
::
DropPartitionsByDates
(
const
std
::
string
&
table_id
,
Status
MySQLMetaImpl
::
DropPartitionsByDates
(
const
std
::
string
&
table_id
,
const
DatesT
&
dates
)
{
const
DatesT
&
dates
)
{
// if (dates.size() == 0) {
if
(
dates
.
size
()
==
0
)
{
// return Status::OK();
return
Status
::
OK
();
// }
}
//
// TableSchema table_schema;
TableSchema
table_schema
;
// table_schema.table_id_ = table_id;
table_schema
.
table_id_
=
table_id
;
// auto status = DescribeTable(table_schema);
auto
status
=
DescribeTable
(
table_schema
);
// if (!status.ok()) {
if
(
!
status
.
ok
())
{
// return status;
return
status
;
// }
}
//
// auto yesterday = GetDateWithDelta(-1);
auto
yesterday
=
GetDateWithDelta
(
-
1
);
//
// for (auto &date : dates) {
for
(
auto
&
date
:
dates
)
{
// if (date >= yesterday) {
if
(
date
>=
yesterday
)
{
// return Status::Error("Could not delete partitions with 2 days");
return
Status
::
Error
(
"Could not delete partitions within 2 days"
);
// }
}
// }
}
//
// try {
try
{
// ConnectorPtr->update_all(
// set(
Query
dropPartitionsByDatesQuery
=
connectionPtr
->
query
();
// c(&TableFileSchema::file_type_) = (int) TableFileSchema::TO_DELETE
// ),
std
::
stringstream
dateListSS
;
// where(
for
(
auto
&
date
:
dates
)
{
// c(&TableFileSchema::table_id_) == table_id and
dateListSS
<<
std
::
to_string
(
date
)
<<
", "
;
// in(&TableFileSchema::date_, dates)
}
// ));
std
::
string
dateListStr
=
dateListSS
.
str
();
// } catch (std::exception &e) {
dateListStr
=
dateListStr
.
substr
(
0
,
dateListStr
.
size
()
-
2
);
//remove the last ", "
// HandleException(e);
// }
dropPartitionsByDatesQuery
<<
"UPDATE metaFile "
<<
"SET file_type = "
<<
std
::
to_string
(
TableFileSchema
::
TO_DELETE
)
<<
" "
<<
"WHERE table_id = "
<<
quote
<<
table_id
<<
" AND "
<<
"date in ("
<<
dateListStr
<<
");"
;
if
(
!
dropPartitionsByDatesQuery
.
exec
())
{
return
Status
::
DBTransactionError
(
"QUERY ERROR WHEN DROPPING PARTITIONS BY DATES"
,
dropPartitionsByDatesQuery
.
error
());
}
}
catch
(
const
BadQuery
&
er
)
{
// Handle any query errors
return
Status
::
DBTransactionError
(
"QUERY ERROR WHEN DROPPING PARTITIONS BY DATES"
,
er
.
what
());
}
catch
(
const
Exception
&
er
)
{
// Catch-all for any other MySQL++ exceptions
return
Status
::
DBTransactionError
(
"GENERAL ERROR WHEN DROPPING PARTITIONS BY DATES"
,
er
.
what
());
}
return
Status
::
OK
();
return
Status
::
OK
();
}
}
Status
MySQLMetaImpl
::
CreateTable
(
TableSchema
&
table_schema
)
{
Status
MySQLMetaImpl
::
CreateTable
(
TableSchema
&
table_schema
)
{
// server::Metrics::GetInstance().MetaAccessTotalIncrement();
server
::
Metrics
::
GetInstance
().
MetaAccessTotalIncrement
();
// if (table_schema.table_id_.empty()) {
if
(
table_schema
.
table_id_
.
empty
())
{
// NextTableId(table_schema.table_id_);
NextTableId
(
table_schema
.
table_id_
);
// }
}
// table_schema.files_cnt_ = 0;
table_schema
.
files_cnt_
=
0
;
// table_schema.id_ = -1;
table_schema
.
id_
=
-
1
;
// table_schema.created_on_ = utils::GetMicroSecTimeStamp();
table_schema
.
created_on_
=
utils
::
GetMicroSecTimeStamp
();
// auto start_time = METRICS_NOW_TIME;
auto
start_time
=
METRICS_NOW_TIME
;
// {
{
// try {
try
{
// Query addTableQuery = connectionPtr->query();
Query
createTableQuery
=
connectionPtr
->
query
();
//
std
::
string
id
=
"NULL"
;
//auto-increment
// } catch (...) {
std
::
string
table_id
=
table_schema
.
table_id_
;
// return Status::DBTransactionError("Add Table Error");
std
::
string
dimension
=
std
::
to_string
(
table_schema
.
dimension_
);
// }
std
::
string
created_on
=
std
::
to_string
(
table_schema
.
created_on_
);
// }
std
::
string
files_cnt
=
"0"
;
// auto end_time = METRICS_NOW_TIME;
std
::
string
engine_type
=
std
::
to_string
(
table_schema
.
engine_type_
);
// auto total_time = METRICS_MICROSECONDS(start_time, end_time);
std
::
string
store_raw_data
=
table_schema
.
store_raw_data_
?
"true"
:
"false"
;
// server::Metrics::GetInstance().MetaAccessDurationSecondsHistogramObserve(total_time);
createTableQuery
<<
"INSERT INTO meta VALUES"
<<
//
"("
<<
id
<<
", "
<<
quote
<<
table_id
<<
", "
<<
dimension
<<
", "
<<
// auto table_path = GetTablePath(table_schema.table_id_);
created_on
<<
", "
<<
files_cnt
<<
", "
<<
engine_type
<<
", "
<<
store_raw_data
// table_schema.location_ = table_path;
<<
");"
;
// if (!boost::filesystem::is_directory(table_path)) {
if
(
SimpleResult
res
=
createTableQuery
.
execute
())
{
// auto ret = boost::filesystem::create_directories(table_path);
table_schema
.
id_
=
res
.
insert_id
();
//Might need to use SELECT LAST_INSERT_ID()?
// if (!ret) {
// std::cout << table_schema.id_ << std::endl;
// ENGINE_LOG_ERROR << "Create directory " << table_path << " Error";
}
// }
else
{
// assert(ret);
return
Status
::
DBTransactionError
(
"Add Table Error"
,
createTableQuery
.
error
());
// }
}
}
catch
(
const
BadQuery
&
er
)
{
// Handle any query errors
return
Status
::
DBTransactionError
(
"QUERY ERROR WHEN ADDING TABLE"
,
er
.
what
());
}
catch
(
const
Exception
&
er
)
{
// Catch-all for any other MySQL++ exceptions
return
Status
::
DBTransactionError
(
"GENERAL ERROR WHEN ADDING TABLE"
,
er
.
what
());
}
}
auto
end_time
=
METRICS_NOW_TIME
;
auto
total_time
=
METRICS_MICROSECONDS
(
start_time
,
end_time
);
server
::
Metrics
::
GetInstance
().
MetaAccessDurationSecondsHistogramObserve
(
total_time
);
auto
table_path
=
GetTablePath
(
table_schema
.
table_id_
);
table_schema
.
location_
=
table_path
;
if
(
!
boost
::
filesystem
::
is_directory
(
table_path
))
{
auto
ret
=
boost
::
filesystem
::
create_directories
(
table_path
);
if
(
!
ret
)
{
ENGINE_LOG_ERROR
<<
"Create directory "
<<
table_path
<<
" Error"
;
}
assert
(
ret
);
}
return
Status
::
OK
();
return
Status
::
OK
();
}
}
Status
MySQLMetaImpl
::
DeleteTable
(
const
std
::
string
&
table_id
)
{
Status
MySQLMetaImpl
::
DeleteTable
(
const
std
::
string
&
table_id
)
{
// try {
try
{
// //drop the table from meta
//drop the table from meta
// auto tables = ConnectorPtr->select(columns(&TableSchema::id_),
Query
deleteTableQuery
=
connectionPtr
->
query
();
// where(c(&TableSchema::table_id_) == table_id));
deleteTableQuery
<<
"DELETE FROM meta WHERE table_id = "
<<
quote
<<
table_id
<<
";"
;
// for (auto &table : tables) {
if
(
deleteTableQuery
.
exec
())
{
// ConnectorPtr->remove<TableSchema>(std::get<0>(table));
return
Status
::
OK
();
// }
}
// } catch (std::exception &e) {
else
{
// HandleException(e);
return
Status
::
DBTransactionError
(
"Delete Table Error"
,
deleteTableQuery
.
error
());
// }
}
}
catch
(
const
BadQuery
&
er
)
{
return
Status
::
OK
();
// Handle any query errors
return
Status
::
DBTransactionError
(
"QUERY ERROR WHEN DELETING TABLE"
,
er
.
what
());
}
catch
(
const
Exception
&
er
)
{
// Catch-all for any other MySQL++ exceptions
return
Status
::
DBTransactionError
(
"GENERAL ERROR WHEN DELETING TABLE"
,
er
.
what
());
}
}
}
Status
MySQLMetaImpl
::
DescribeTable
(
TableSchema
&
table_schema
)
{
Status
MySQLMetaImpl
::
DescribeTable
(
TableSchema
&
table_schema
)
{
// try {
try
{
// server::Metrics::GetInstance().MetaAccessTotalIncrement();
server
::
Metrics
::
GetInstance
().
MetaAccessTotalIncrement
();
// auto start_time = METRICS_NOW_TIME;
auto
start_time
=
METRICS_NOW_TIME
;
// auto groups = ConnectorPtr->select(columns(&TableSchema::id_,
// &TableSchema::table_id_,
Query
describeTableQuery
=
connectionPtr
->
query
();
// &TableSchema::files_cnt_,
describeTableQuery
<<
"SELECT id, table_id, dimension, files_cnt, engine_type, store_raw_data "
<<
// &TableSchema::dimension_,
"FROM meta "
<<
// &TableSchema::engine_type_,
"WHERE table_id = "
<<
quote
<<
table_schema
.
table_id_
<<
";"
;
// &TableSchema::store_raw_data_),
StoreQueryResult
res
=
describeTableQuery
.
store
();
// where(c(&TableSchema::table_id_) == table_schema.table_id_));
// auto end_time = METRICS_NOW_TIME;
auto
end_time
=
METRICS_NOW_TIME
;
// auto total_time = METRICS_MICROSECONDS(start_time, end_time);
auto
total_time
=
METRICS_MICROSECONDS
(
start_time
,
end_time
);
// server::Metrics::GetInstance().MetaAccessDurationSecondsHistogramObserve(total_time);
server
::
Metrics
::
GetInstance
().
MetaAccessDurationSecondsHistogramObserve
(
total_time
);
// assert(groups.size() <= 1);
// if (groups.size() == 1) {
assert
(
res
&&
res
.
num_rows
()
<=
1
);
// table_schema.id_ = std::get<0>(groups[0]);
if
(
res
.
num_rows
()
==
1
)
{
// table_schema.files_cnt_ = std::get<2>(groups[0]);
const
Row
&
resRow
=
res
[
0
];
// table_schema.dimension_ = std::get<3>(groups[0]);
// table_schema.engine_type_ = std::get<4>(groups[0]);
// std::string id;
// table_schema.store_raw_data_ = std::get<5>(groups[0]);
// resRow["id"].to_string(id);
// } else {
// table_schema.id_ = std::stoul(id);
// return Status::NotFound("Table " + table_schema.table_id_ + " not found");
table_schema
.
id_
=
resRow
[
"id"
];
//implicit conversion
// }
//
std
::
string
table_id
;
// auto table_path = GetTablePath(table_schema.table_id_);
resRow
[
"table_id"
].
to_string
(
table_id
);
// table_schema.location_ = table_path;
table_schema
.
table_id_
=
table_id
;
//
// } catch (std::exception &e) {
// std::string created_on;
// HandleException(e);
// resRow["created_on"].to_string(created_on);
// }
// table_schema.created_on_ = std::stol(created_on);
table_schema
.
dimension_
=
resRow
[
"dimension"
];
// std::string files_cnt;
// resRow["files_cnt"].to_string(files_cnt);
// table_schema.files_cnt_ = std::stoul(files_cnt);
table_schema
.
files_cnt_
=
resRow
[
"files_cnt"
];
// std::string engine_type;
// resRow["engine_type"].to_string(engine_type);
// table_schema.engine_type_ = std::stoi(engine_type);
table_schema
.
engine_type_
=
resRow
[
"engine_type"
];
table_schema
.
store_raw_data_
=
(
resRow
[
"store_raw_data"
].
compare
(
"true"
)
==
0
);
}
else
{
return
Status
::
NotFound
(
"Table "
+
table_schema
.
table_id_
+
" not found"
);
}
auto
table_path
=
GetTablePath
(
table_schema
.
table_id_
);
table_schema
.
location_
=
table_path
;
}
catch
(
const
BadQuery
&
er
)
{
// Handle any query errors
return
Status
::
DBTransactionError
(
"QUERY ERROR WHEN DESCRIBING TABLE"
,
er
.
what
());
}
catch
(
const
Exception
&
er
)
{
// Catch-all for any other MySQL++ exceptions
return
Status
::
DBTransactionError
(
"GENERAL ERROR WHEN DESCRIBING TABLE"
,
er
.
what
());
}
return
Status
::
OK
();
return
Status
::
OK
();
}
}
Status
MySQLMetaImpl
::
HasTable
(
const
std
::
string
&
table_id
,
bool
&
has_or_not
)
{
Status
MySQLMetaImpl
::
HasTable
(
const
std
::
string
&
table_id
,
bool
&
has_or_not
)
{
// try {
try
{
// server::Metrics::GetInstance().MetaAccessTotalIncrement();
server
::
Metrics
::
GetInstance
().
MetaAccessTotalIncrement
();
// auto start_time = METRICS_NOW_TIME;
auto
start_time
=
METRICS_NOW_TIME
;
//
// auto tables = ConnectorPtr->select(columns(&TableSchema::id_),
Query
hasTableQuery
=
connectionPtr
->
query
();
// where(c(&TableSchema::table_id_) == table_id));
//since table_id is a unique column we just need to check whether it exists or not
// auto end_time = METRICS_NOW_TIME;
hasTableQuery
<<
"SELECT EXISTS (SELECT 1 FROM meta WHERE table_id = "
<<
quote
<<
table_id
<<
") "
// auto total_time = METRICS_MICROSECONDS(start_time, end_time);
<<
"AS "
<<
quote
<<
"check"
<<
";"
;
// server::Metrics::GetInstance().MetaAccessDurationSecondsHistogramObserve(total_time);
StoreQueryResult
res
=
hasTableQuery
.
store
();
// assert(tables.size() <= 1);
// if (tables.size() == 1) {
auto
end_time
=
METRICS_NOW_TIME
;
// has_or_not = true;
auto
total_time
=
METRICS_MICROSECONDS
(
start_time
,
end_time
);
// } else {
server
::
Metrics
::
GetInstance
().
MetaAccessDurationSecondsHistogramObserve
(
total_time
);
// has_or_not = false;
// }
assert
(
res
&&
res
.
num_rows
()
==
1
);
// } catch (std::exception &e) {
int
check
=
res
[
0
][
"check"
];
// HandleException(e);
has_or_not
=
(
check
==
1
);
// }
}
catch
(
const
BadQuery
&
er
)
{
// Handle any query errors
return
Status
::
DBTransactionError
(
"QUERY ERROR WHEN CHECKING IF TABLE EXISTS"
,
er
.
what
());
}
catch
(
const
Exception
&
er
)
{
// Catch-all for any other MySQL++ exceptions
return
Status
::
DBTransactionError
(
"GENERAL ERROR WHEN CHECKING IF TABLE EXISTS"
,
er
.
what
());
}
return
Status
::
OK
();
return
Status
::
OK
();
}
}
Status
MySQLMetaImpl
::
AllTables
(
std
::
vector
<
TableSchema
>&
table_schema_array
)
{
Status
MySQLMetaImpl
::
AllTables
(
std
::
vector
<
TableSchema
>&
table_schema_array
)
{
// try {
try
{
// server::Metrics::GetInstance().MetaAccessTotalIncrement();
server
::
Metrics
::
GetInstance
().
MetaAccessTotalIncrement
();
// auto start_time = METRICS_NOW_TIME;
auto
start_time
=
METRICS_NOW_TIME
;
// auto selected = ConnectorPtr->select(columns(&TableSchema::id_,
// &TableSchema::table_id_,
Query
allTablesQuery
=
connectionPtr
->
query
();
// &TableSchema::files_cnt_,
allTablesQuery
<<
"SELECT id, table_id, dimension, files_cnt, engine_type, store_raw_data "
<<
// &TableSchema::dimension_,
"FROM meta;"
;
// &TableSchema::engine_type_,
StoreQueryResult
res
=
allTablesQuery
.
store
();
// &TableSchema::store_raw_data_));
// auto end_time = METRICS_NOW_TIME;
auto
end_time
=
METRICS_NOW_TIME
;
// auto total_time = METRICS_MICROSECONDS(start_time, end_time);
auto
total_time
=
METRICS_MICROSECONDS
(
start_time
,
end_time
);
// server::Metrics::GetInstance().MetaAccessDurationSecondsHistogramObserve(total_time);
server
::
Metrics
::
GetInstance
().
MetaAccessDurationSecondsHistogramObserve
(
total_time
);
// for (auto &table : selected) {
// TableSchema schema;
for
(
auto
&
resRow
:
res
)
{
// schema.id_ = std::get<0>(table);
TableSchema
table_schema
;
// schema.table_id_ = std::get<1>(table);
// schema.files_cnt_ = std::get<2>(table);
table_schema
.
id_
=
resRow
[
"id"
];
//implicit conversion
// schema.dimension_ = std::get<3>(table);
// schema.engine_type_ = std::get<4>(table);
std
::
string
table_id
;
// schema.store_raw_data_ = std::get<5>(table);
resRow
[
"table_id"
].
to_string
(
table_id
);
//
table_schema
.
table_id_
=
table_id
;
// table_schema_array.emplace_back(schema);
// }
table_schema
.
dimension_
=
resRow
[
"dimension"
];
// } catch (std::exception &e) {
// HandleException(e);
table_schema
.
files_cnt_
=
resRow
[
"files_cnt"
];
// }
table_schema
.
engine_type_
=
resRow
[
"engine_type"
];
table_schema
.
store_raw_data_
=
(
resRow
[
"store_raw_data"
].
compare
(
"true"
)
==
0
);
table_schema_array
.
emplace_back
(
table_schema
);
}
}
catch
(
const
BadQuery
&
er
)
{
// Handle any query errors
return
Status
::
DBTransactionError
(
"QUERY ERROR WHEN DESCRIBING ALL TABLES"
,
er
.
what
());
}
catch
(
const
Exception
&
er
)
{
// Catch-all for any other MySQL++ exceptions
return
Status
::
DBTransactionError
(
"GENERAL ERROR WHEN DESCRIBING ALL TABLES"
,
er
.
what
());
}
return
Status
::
OK
();
return
Status
::
OK
();
}
}
Status
MySQLMetaImpl
::
CreateTableFile
(
TableFileSchema
&
file_schema
)
{
Status
MySQLMetaImpl
::
CreateTableFile
(
TableFileSchema
&
file_schema
)
{
// if (file_schema.date_ == EmptyDate) {
if
(
file_schema
.
date_
==
EmptyDate
)
{
// file_schema.date_ = Meta::GetDate();
file_schema
.
date_
=
Meta
::
GetDate
();
// }
}
// TableSchema table_schema;
TableSchema
table_schema
;
// table_schema.table_id_ = file_schema.table_id_;
table_schema
.
table_id_
=
file_schema
.
table_id_
;
// auto status = DescribeTable(table_schema);
auto
status
=
DescribeTable
(
table_schema
);
// if (!status.ok()) {
if
(
!
status
.
ok
())
{
// return status;
return
status
;
// }
}
//
// NextFileId(file_schema.file_id_);
NextFileId
(
file_schema
.
file_id_
);
// file_schema.file_type_ = TableFileSchema::NEW;
file_schema
.
file_type_
=
TableFileSchema
::
NEW
;
// file_schema.dimension_ = table_schema.dimension_;
file_schema
.
dimension_
=
table_schema
.
dimension_
;
// file_schema.size_ = 0;
file_schema
.
size_
=
0
;
// file_schema.created_on_ = utils::GetMicroSecTimeStamp();
file_schema
.
created_on_
=
utils
::
GetMicroSecTimeStamp
();
// file_schema.updated_time_ = file_schema.created_on_;
file_schema
.
updated_time_
=
file_schema
.
created_on_
;
// file_schema.engine_type_ = table_schema.engine_type_;
file_schema
.
engine_type_
=
table_schema
.
engine_type_
;
// GetTableFilePath(file_schema);
GetTableFilePath
(
file_schema
);
//
// {
{
// try {
try
{
// server::Metrics::GetInstance().MetaAccessTotalIncrement();
server
::
Metrics
::
GetInstance
().
MetaAccessTotalIncrement
();
// auto start_time = METRICS_NOW_TIME;
auto
start_time
=
METRICS_NOW_TIME
;
// auto id = ConnectorPtr->insert(file_schema);
// file_schema.id_ = id;
Query
createTableFileQuery
=
connectionPtr
->
query
();
// auto end_time = METRICS_NOW_TIME;
std
::
string
id
=
"NULL"
;
//auto-increment
// auto total_time = METRICS_MICROSECONDS(start_time, end_time);
std
::
string
table_id
=
file_schema
.
table_id_
;
// server::Metrics::GetInstance().MetaAccessDurationSecondsHistogramObserve(total_time);
std
::
string
engine_type
=
std
::
to_string
(
file_schema
.
engine_type_
);
// } catch (...) {
std
::
string
file_id
=
file_schema
.
file_id_
;
// return Status::DBTransactionError("Add file Error");
std
::
string
file_type
=
std
::
to_string
(
file_schema
.
file_type_
);
// }
std
::
string
size
=
std
::
to_string
(
file_schema
.
size_
);
// }
std
::
string
updated_time
=
std
::
to_string
(
file_schema
.
updated_time_
);
//
std
::
string
created_on
=
std
::
to_string
(
file_schema
.
created_on_
);
// auto partition_path = GetTableDatePartitionPath(file_schema.table_id_, file_schema.date_);
std
::
string
date
=
std
::
to_string
(
file_schema
.
date_
);
//
// if (!boost::filesystem::is_directory(partition_path)) {
createTableFileQuery
<<
"INSERT INTO metaFile VALUES"
<<
// auto ret = boost::filesystem::create_directory(partition_path);
"("
<<
id
<<
", "
<<
quote
<<
table_id
<<
", "
<<
engine_type
<<
", "
<<
// if (!ret) {
quote
<<
file_id
<<
", "
<<
file_type
<<
", "
<<
size
<<
", "
<<
// ENGINE_LOG_ERROR << "Create directory " << partition_path << " Error";
updated_time
<<
", "
<<
created_on
<<
", "
<<
date
<<
");"
;
// }
// assert(ret);
if
(
SimpleResult
res
=
createTableFileQuery
.
execute
())
{
// }
file_schema
.
id_
=
res
.
insert_id
();
//Might need to use SELECT LAST_INSERT_ID()?
}
else
{
return
Status
::
DBTransactionError
(
"Add file Error"
,
createTableFileQuery
.
error
());
}
auto
end_time
=
METRICS_NOW_TIME
;
auto
total_time
=
METRICS_MICROSECONDS
(
start_time
,
end_time
);
server
::
Metrics
::
GetInstance
().
MetaAccessDurationSecondsHistogramObserve
(
total_time
);
}
catch
(
const
BadQuery
&
er
)
{
// Handle any query errors
return
Status
::
DBTransactionError
(
"QUERY ERROR WHEN ADDING TABLE FILE"
,
er
.
what
());
}
catch
(
const
Exception
&
er
)
{
// Catch-all for any other MySQL++ exceptions
return
Status
::
DBTransactionError
(
"GENERAL ERROR WHEN ADDING TABLE FILE"
,
er
.
what
());
}
}
auto
partition_path
=
GetTableDatePartitionPath
(
file_schema
.
table_id_
,
file_schema
.
date_
);
if
(
!
boost
::
filesystem
::
is_directory
(
partition_path
))
{
auto
ret
=
boost
::
filesystem
::
create_directory
(
partition_path
);
if
(
!
ret
)
{
ENGINE_LOG_ERROR
<<
"Create directory "
<<
partition_path
<<
" Error"
;
}
assert
(
ret
);
}
return
Status
::
OK
();
return
Status
::
OK
();
}
}
Status
MySQLMetaImpl
::
FilesToIndex
(
TableFilesSchema
&
files
)
{
Status
MySQLMetaImpl
::
FilesToIndex
(
TableFilesSchema
&
files
)
{
// files.clear();
files
.
clear
();
//
// try {
try
{
// server::Metrics::GetInstance().MetaAccessTotalIncrement();
server
::
Metrics
::
GetInstance
().
MetaAccessTotalIncrement
();
// auto start_time = METRICS_NOW_TIME;
auto
start_time
=
METRICS_NOW_TIME
;
// auto selected = ConnectorPtr->select(columns(&TableFileSchema::id_,
// &TableFileSchema::table_id_,
Query
filesToIndexQuery
=
connectionPtr
->
query
();
// &TableFileSchema::file_id_,
filesToIndexQuery
<<
"SELECT id, table_id, engine_type, file_id, file_type, size, date "
<<
// &TableFileSchema::file_type_,
"FROM metaFile "
<<
// &TableFileSchema::size_,
"WHERE file_type = "
<<
std
::
to_string
(
TableFileSchema
::
TO_INDEX
)
<<
";"
;
// &TableFileSchema::date_,
StoreQueryResult
res
=
filesToIndexQuery
.
store
();
// &TableFileSchema::engine_type_),
// where(c(&TableFileSchema::file_type_)
auto
end_time
=
METRICS_NOW_TIME
;
// == (int) TableFileSchema::TO_INDEX));
auto
total_time
=
METRICS_MICROSECONDS
(
start_time
,
end_time
);
// auto end_time = METRICS_NOW_TIME;
server
::
Metrics
::
GetInstance
().
MetaAccessDurationSecondsHistogramObserve
(
total_time
);
// auto total_time = METRICS_MICROSECONDS(start_time, end_time);
// server::Metrics::GetInstance().MetaAccessDurationSecondsHistogramObserve(total_time);
std
::
map
<
std
::
string
,
TableSchema
>
groups
;
//
TableFileSchema
table_file
;
// std::map<std::string, TableSchema> groups;
for
(
auto
&
resRow
:
res
)
{
// TableFileSchema table_file;
//
table_file
.
id_
=
resRow
[
"id"
];
//implicit conversion
// for (auto &file : selected) {
// table_file.id_ = std::get<0>(file);
std
::
string
table_id
;
// table_file.table_id_ = std::get<1>(file);
resRow
[
"table_id"
].
to_string
(
table_id
);
// table_file.file_id_ = std::get<2>(file);
table_file
.
table_id_
=
table_id
;
// table_file.file_type_ = std::get<3>(file);
// table_file.size_ = std::get<4>(file);
table_file
.
engine_type_
=
resRow
[
"engine_type"
];
// table_file.date_ = std::get<5>(file);
// table_file.engine_type_ = std::get<6>(file);
std
::
string
file_id
;
//
resRow
[
"file_id"
].
to_string
(
file_id
);
// GetTableFilePath(table_file);
table_file
.
file_id_
=
file_id
;
// auto groupItr = groups.find(table_file.table_id_);
// if (groupItr == groups.end()) {
table_file
.
file_type_
=
resRow
[
"file_type"
];
// TableSchema table_schema;
// table_schema.table_id_ = table_file.table_id_;
table_file
.
size_
=
resRow
[
"size"
];
// auto status = DescribeTable(table_schema);
// if (!status.ok()) {
table_file
.
date_
=
resRow
[
"date"
];
// return status;
// }
auto
groupItr
=
groups
.
find
(
table_file
.
table_id_
);
// groups[table_file.table_id_] = table_schema;
if
(
groupItr
==
groups
.
end
())
{
// }
TableSchema
table_schema
;
// table_file.dimension_ = groups[table_file.table_id_].dimension_;
table_schema
.
table_id_
=
table_file
.
table_id_
;
// files.push_back(table_file);
auto
status
=
DescribeTable
(
table_schema
);
// }
if
(
!
status
.
ok
())
{
// } catch (std::exception &e) {
return
status
;
// HandleException(e);
}
// }
groups
[
table_file
.
table_id_
]
=
table_schema
;
// std::cout << table_schema.dimension_ << std::endl;
}
table_file
.
dimension_
=
groups
[
table_file
.
table_id_
].
dimension_
;
GetTableFilePath
(
table_file
);
files
.
push_back
(
table_file
);
}
}
catch
(
const
BadQuery
&
er
)
{
// Handle any query errors
return
Status
::
DBTransactionError
(
"QUERY ERROR WHEN FINDING TABLE FILES TO INDEX"
,
er
.
what
());
}
catch
(
const
Exception
&
er
)
{
// Catch-all for any other MySQL++ exceptions
return
Status
::
DBTransactionError
(
"GENERAL ERROR WHEN FINDING TABLE FILES TO INDEX"
,
er
.
what
());
}
return
Status
::
OK
();
return
Status
::
OK
();
}
}
...
@@ -456,440 +595,521 @@ namespace meta {
...
@@ -456,440 +595,521 @@ namespace meta {
Status
MySQLMetaImpl
::
FilesToSearch
(
const
std
::
string
&
table_id
,
Status
MySQLMetaImpl
::
FilesToSearch
(
const
std
::
string
&
table_id
,
const
DatesT
&
partition
,
const
DatesT
&
partition
,
DatePartionedTableFilesSchema
&
files
)
{
DatePartionedTableFilesSchema
&
files
)
{
//
files.clear();
files
.
clear
();
//
//
try {
try
{
//
server::Metrics::GetInstance().MetaAccessTotalIncrement();
server
::
Metrics
::
GetInstance
().
MetaAccessTotalIncrement
();
//
auto start_time = METRICS_NOW_TIME;
auto
start_time
=
METRICS_NOW_TIME
;
// if (partition.empty()) {
// auto selected = ConnectorPtr->select(columns(&TableFileSchema::id_,
StoreQueryResult
res
;
// &TableFileSchema::table_id_,
// &TableFileSchema::file_id_,
if
(
partition
.
empty
())
{
// &TableFileSchema::file_type_,
// &TableFileSchema::size_,
Query
filesToSearchQuery
=
connectionPtr
->
query
();
// &TableFileSchema::date_,
filesToSearchQuery
<<
"SELECT id, table_id, engine_type, file_id, file_type, size, date "
<<
// &TableFileSchema::engine_type_),
"FROM metaFile "
<<
// where(c(&TableFileSchema::table_id_) == table_id and
"WHERE table_id = "
<<
quote
<<
table_id
<<
" AND "
<<
// (c(&TableFileSchema::file_type_) == (int) TableFileSchema::RAW or
"(file_type = "
<<
std
::
to_string
(
TableFileSchema
::
RAW
)
<<
" OR "
<<
// c(&TableFileSchema::file_type_)
"file_type = "
<<
std
::
to_string
(
TableFileSchema
::
TO_INDEX
)
<<
" OR "
<<
// == (int) TableFileSchema::TO_INDEX or
"file_type = "
<<
std
::
to_string
(
TableFileSchema
::
INDEX
)
<<
");"
;
// c(&TableFileSchema::file_type_)
res
=
filesToSearchQuery
.
store
();
// == (int) TableFileSchema::INDEX)));
//
auto end_time = METRICS_NOW_TIME;
auto
end_time
=
METRICS_NOW_TIME
;
//
auto total_time = METRICS_MICROSECONDS(start_time, end_time);
auto
total_time
=
METRICS_MICROSECONDS
(
start_time
,
end_time
);
//
server::Metrics::GetInstance().MetaAccessDurationSecondsHistogramObserve(total_time);
server
::
Metrics
::
GetInstance
().
MetaAccessDurationSecondsHistogramObserve
(
total_time
);
// TableSchema table_schema;
}
// table_schema.table_id_ = table_id;
else
{
// auto status = DescribeTable(table_schema);
// if (!status.ok()) {
Query
filesToSearchQuery
=
connectionPtr
->
query
();
// return status;
// }
std
::
stringstream
partitionListSS
;
//
for
(
auto
&
date
:
partition
)
{
// TableFileSchema table_file
;
partitionListSS
<<
std
::
to_string
(
date
)
<<
", "
;
//
}
// for (auto &file : selected) {
std
::
string
partitionListStr
=
partitionListSS
.
str
();
// table_file.id_ = std::get<0>(file);
partitionListStr
=
partitionListStr
.
substr
(
0
,
partitionListStr
.
size
()
-
2
);
//remove the last ", "
// table_file.table_id_ = std::get<1>(file);
// table_file.file_id_ = std::get<2>(file);
filesToSearchQuery
<<
"SELECT id, table_id, engine_type, file_id, file_type, size, date "
<<
// table_file.file_type_ = std::get<3>(file);
"FROM metaFile "
<<
// table_file.size_ = std::get<4>(file);
"WHERE table_id = "
<<
quote
<<
table_id
<<
" AND "
<<
// table_file.date_ = std::get<5>(file);
"date IN ("
<<
partitionListStr
<<
") AND "
<<
// table_file.engine_type_ = std::get<6>(file);
"(file_type = "
<<
std
::
to_string
(
TableFileSchema
::
RAW
)
<<
" OR "
<<
// table_file.dimension_ = table_schema.dimension_;
"file_type = "
<<
std
::
to_string
(
TableFileSchema
::
TO_INDEX
)
<<
" OR "
<<
// GetTableFilePath(table_file)
;
"file_type = "
<<
std
::
to_string
(
TableFileSchema
::
INDEX
)
<<
");"
;
// auto dateItr = files.find(table_file.date_
);
res
=
filesToSearchQuery
.
store
(
);
// if (dateItr == files.end()) {
// files[table_file.date_] = TableFilesSchema()
;
auto
end_time
=
METRICS_NOW_TIME
;
// }
auto
total_time
=
METRICS_MICROSECONDS
(
start_time
,
end_time
);
// files[table_file.date_].push_back(table_fil
e);
server
::
Metrics
::
GetInstance
().
MetaAccessDurationSecondsHistogramObserve
(
total_tim
e
);
//
}
}
// }
// else {
TableSchema
table_schema
;
// auto selected = ConnectorPtr->select(columns(&TableFileSchema::id_,
table_schema
.
table_id_
=
table_id
;
// &TableFileSchema::table_id_,
auto
status
=
DescribeTable
(
table_schema
);
// &TableFileSchema::file_id_,
if
(
!
status
.
ok
())
{
// &TableFileSchema::file_type_,
return
status
;
// &TableFileSchema::size_,
}
// &TableFileSchema::date_),
// where(c(&TableFileSchema::table_id_) == table_id and
TableFileSchema
table_file
;
// in(&TableFileSchema::date_, partition) and
for
(
auto
&
resRow
:
res
)
{
// (c(&TableFileSchema::file_type_) == (int) TableFileSchema::RAW or
// c(&TableFileSchema::file_type_)
table_file
.
id_
=
resRow
[
"id"
];
//implicit conversion
// == (int) TableFileSchema::TO_INDEX or
// c(&TableFileSchema::file_type_)
std
::
string
table_id_str
;
// == (int) TableFileSchema::INDEX))
);
resRow
[
"table_id"
].
to_string
(
table_id_str
);
// auto end_time = METRICS_NOW_TIME
;
table_file
.
table_id_
=
table_id_str
;
// auto total_time = METRICS_MICROSECONDS(start_time, end_time);
// server::Metrics::GetInstance().MetaAccessDurationSecondsHistogramObserve(total_time)
;
table_file
.
engine_type_
=
resRow
[
"engine_type"
]
;
// TableSchema table_schema;
// table_schema.table_id_ = tab
le_id;
std
::
string
fi
le_id
;
// auto status = DescribeTable(table_schema
);
resRow
[
"file_id"
].
to_string
(
file_id
);
// if (!status.ok()) {
table_file
.
file_id_
=
file_id
;
// return status;
// }
table_file
.
file_type_
=
resRow
[
"file_type"
];
//
// TableFileSchema table_file
;
table_file
.
size_
=
resRow
[
"size"
]
;
//
// for (auto &file : selected) {
table_file
.
date_
=
resRow
[
"date"
];
// table_file.id_ = std::get<0>(file);
// table_file.table_id_ = std::get<1>(file)
;
table_file
.
dimension_
=
table_schema
.
dimension_
;
// table_file.file_id_ = std::get<2>(file);
// table_file.file_type_ = std::get<3>(
file);
GetTableFilePath
(
table_
file
);
// table_file.size_ = std::get<4>(file);
// table_file.date_ = std::get<5>(file
);
auto
dateItr
=
files
.
find
(
table_file
.
date_
);
// table_file.dimension_ = table_schema.dimension_;
if
(
dateItr
==
files
.
end
())
{
// GetTableFilePath(table_file
);
files
[
table_file
.
date_
]
=
TableFilesSchema
(
);
// auto dateItr = files.find(table_file.date_);
}
// if (dateItr == files.end()) {
// files[table_file.date_] = TableFilesSchema(
);
files
[
table_file
.
date_
].
push_back
(
table_file
);
//
}
}
// files[table_file.date_].push_back(table_file);
}
catch
(
const
BadQuery
&
er
)
{
// }
// Handle any query errors
//
return
Status
::
DBTransactionError
(
"QUERY ERROR WHEN FINDING TABLE FILES TO SEARCH"
,
er
.
what
());
// }
}
catch
(
const
Exception
&
er
)
{
// } catch (std::exception &e) {
// Catch-all for any other MySQL++ exceptions
// HandleException(e
);
return
Status
::
DBTransactionError
(
"GENERAL ERROR WHEN FINDING TABLE FILES TO SEARCH"
,
er
.
what
()
);
//
}
}
return
Status
::
OK
();
return
Status
::
OK
();
}
}
Status
MySQLMetaImpl
::
FilesToMerge
(
const
std
::
string
&
table_id
,
Status
MySQLMetaImpl
::
FilesToMerge
(
const
std
::
string
&
table_id
,
DatePartionedTableFilesSchema
&
files
)
{
DatePartionedTableFilesSchema
&
files
)
{
// files.clear();
files
.
clear
();
//
// try {
try
{
// server::Metrics::GetInstance().MetaAccessTotalIncrement();
server
::
Metrics
::
GetInstance
().
MetaAccessTotalIncrement
();
// auto start_time = METRICS_NOW_TIME;
auto
start_time
=
METRICS_NOW_TIME
;
// auto selected = ConnectorPtr->select(columns(&TableFileSchema::id_,
// &TableFileSchema::table_id_,
Query
filesToMergeQuery
=
connectionPtr
->
query
();
// &TableFileSchema::file_id_,
filesToMergeQuery
<<
"SELECT id, table_id, file_id, file_type, size, date "
<<
// &TableFileSchema::file_type_,
"FROM metaFile "
<<
// &TableFileSchema::size_,
"WHERE table_id = "
<<
quote
<<
table_id
<<
" AND "
<<
// &TableFileSchema::date_),
"file_type = "
<<
std
::
to_string
(
TableFileSchema
::
RAW
)
<<
";"
;
// where(c(&TableFileSchema::file_type_) == (int) TableFileSchema::RAW and
StoreQueryResult
res
=
filesToMergeQuery
.
store
();
// c(&TableFileSchema::table_id_) == table_id));
// auto end_time = METRICS_NOW_TIME;
auto
end_time
=
METRICS_NOW_TIME
;
// auto total_time = METRICS_MICROSECONDS(start_time, end_time);
auto
total_time
=
METRICS_MICROSECONDS
(
start_time
,
end_time
);
// server::Metrics::GetInstance().MetaAccessDurationSecondsHistogramObserve(total_time);
server
::
Metrics
::
GetInstance
().
MetaAccessDurationSecondsHistogramObserve
(
total_time
);
// TableSchema table_schema;
// table_schema.table_id_ = table_id;
TableSchema
table_schema
;
// auto status = DescribeTable(table_schema);
table_schema
.
table_id_
=
table_id
;
//
auto
status
=
DescribeTable
(
table_schema
);
// if (!status.ok()) {
// return status;
if
(
!
status
.
ok
())
{
// }
return
status
;
//
}
// TableFileSchema table_file;
// for (auto &file : selected) {
TableFileSchema
table_file
;
// table_file.id_ = std::get<0>(file);
for
(
auto
&
resRow
:
res
)
{
// table_file.table_id_ = std::get<1>(file);
// table_file.file_id_ = std::get<2>(file);
table_file
.
id_
=
resRow
[
"id"
];
//implicit conversion
// table_file.file_type_ = std::get<3>(file);
// table_file.size_ = std::get<4>(file);
std
::
string
table_id_str
;
// table_file.date_ = std::get<5>(file);
resRow
[
"table_id"
].
to_string
(
table_id_str
);
// table_file.dimension_ = table_schema.dimension_;
table_file
.
table_id_
=
table_id_str
;
// GetTableFilePath(table_file);
// auto dateItr = files.find(table_file.date_);
std
::
string
file_id
;
// if (dateItr == files.end()) {
resRow
[
"file_id"
].
to_string
(
file_id
);
// files[table_file.date_] = TableFilesSchema();
table_file
.
file_id_
=
file_id
;
// }
// files[table_file.date_].push_back(table_file);
table_file
.
file_type_
=
resRow
[
"file_type"
];
// }
// } catch (std::exception &e) {
table_file
.
size_
=
resRow
[
"size"
];
// HandleException(e);
// }
table_file
.
date_
=
resRow
[
"date"
];
table_file
.
dimension_
=
table_schema
.
dimension_
;
GetTableFilePath
(
table_file
);
auto
dateItr
=
files
.
find
(
table_file
.
date_
);
if
(
dateItr
==
files
.
end
())
{
files
[
table_file
.
date_
]
=
TableFilesSchema
();
}
files
[
table_file
.
date_
].
push_back
(
table_file
);
}
}
catch
(
const
BadQuery
&
er
)
{
// Handle any query errors
return
Status
::
DBTransactionError
(
"QUERY ERROR WHEN FINDING TABLE FILES TO MERGE"
,
er
.
what
());
}
catch
(
const
Exception
&
er
)
{
// Catch-all for any other MySQL++ exceptions
return
Status
::
DBTransactionError
(
"GENERAL ERROR WHEN FINDING TABLE FILES TO MERGE"
,
er
.
what
());
}
return
Status
::
OK
();
return
Status
::
OK
();
}
}
//ZR: TODO: this function is pending to be removed, so not gonna implemented for now
Status
MySQLMetaImpl
::
FilesToDelete
(
const
std
::
string
&
table_id
,
Status
MySQLMetaImpl
::
FilesToDelete
(
const
std
::
string
&
table_id
,
const
DatesT
&
partition
,
const
DatesT
&
partition
,
DatePartionedTableFilesSchema
&
files
)
{
DatePartionedTableFilesSchema
&
files
)
{
// auto now = utils::GetMicroSecTimeStamp();
// try {
// if(partition.empty()) {
// //step 1: get table files by dates
// auto selected = ConnectorPtr->select(columns(&TableFileSchema::id_,
// &TableFileSchema::table_id_,
// &TableFileSchema::file_id_,
// &TableFileSchema::size_,
// &TableFileSchema::date_),
// where(c(&TableFileSchema::file_type_) !=
// (int) TableFileSchema::TO_DELETE
// and c(&TableFileSchema::table_id_) == table_id));
//
// //step 2: erase table files from meta
// for (auto &file : selected) {
// TableFileSchema table_file;
// table_file.id_ = std::get<0>(file);
// table_file.table_id_ = std::get<1>(file);
// table_file.file_id_ = std::get<2>(file);
// table_file.size_ = std::get<3>(file);
// table_file.date_ = std::get<4>(file);
// GetTableFilePath(table_file);
// auto dateItr = files.find(table_file.date_);
// if (dateItr == files.end()) {
// files[table_file.date_] = TableFilesSchema();
// }
// files[table_file.date_].push_back(table_file);
//
// ConnectorPtr->remove<TableFileSchema>(std::get<0>(file));
// }
//
// } else {
// //step 1: get all table files
// auto selected = ConnectorPtr->select(columns(&TableFileSchema::id_,
// &TableFileSchema::table_id_,
// &TableFileSchema::file_id_,
// &TableFileSchema::size_,
// &TableFileSchema::date_),
// where(c(&TableFileSchema::file_type_) !=
// (int) TableFileSchema::TO_DELETE
// and in(&TableFileSchema::date_, partition)
// and c(&TableFileSchema::table_id_) == table_id));
//
// //step 2: erase table files from meta
// for (auto &file : selected) {
// TableFileSchema table_file;
// table_file.id_ = std::get<0>(file);
// table_file.table_id_ = std::get<1>(file);
// table_file.file_id_ = std::get<2>(file);
// table_file.size_ = std::get<3>(file);
// table_file.date_ = std::get<4>(file);
// GetTableFilePath(table_file);
// auto dateItr = files.find(table_file.date_);
// if (dateItr == files.end()) {
// files[table_file.date_] = TableFilesSchema();
// }
// files[table_file.date_].push_back(table_file);
//
// ConnectorPtr->remove<TableFileSchema>(std::get<0>(file));
// }
// }
//
// } catch (std::exception &e) {
// HandleException(e);
// }
return
Status
::
OK
();
return
Status
::
OK
();
}
}
Status
MySQLMetaImpl
::
GetTableFile
(
TableFileSchema
&
file_schema
)
{
Status
MySQLMetaImpl
::
GetTableFile
(
TableFileSchema
&
file_schema
)
{
// try {
try
{
// auto files = ConnectorPtr->select(columns(&TableFileSchema::id_,
// &TableFileSchema::table_id_,
Query
getTableFileQuery
=
connectionPtr
->
query
();
// &TableFileSchema::file_id_,
getTableFileQuery
<<
"SELECT id, table_id, file_id, file_type, size, date "
<<
// &TableFileSchema::file_type_,
"FROM metaFile "
<<
// &TableFileSchema::size_,
"WHERE file_id = "
<<
quote
<<
file_schema
.
file_id_
<<
" AND "
<<
// &TableFileSchema::date_),
"table_id = "
<<
quote
<<
file_schema
.
table_id_
<<
";"
;
// where(c(&TableFileSchema::file_id_) == file_schema.file_id_ and
StoreQueryResult
res
=
getTableFileQuery
.
store
();
// c(&TableFileSchema::table_id_) == file_schema.table_id_
// ));
assert
(
res
&&
res
.
num_rows
()
<=
1
);
// assert(files.size() <= 1);
if
(
res
.
num_rows
()
==
1
)
{
// if (files.size() == 1) {
// file_schema.id_ = std::get<0>(files[0]);
const
Row
&
resRow
=
res
[
0
];
// file_schema.table_id_ = std::get<1>(files[0]);
// file_schema.file_id_ = std::get<2>(files[0]);
file_schema
.
id_
=
resRow
[
"id"
];
//implicit conversion
// file_schema.file_type_ = std::get<3>(files[0]);
// file_schema.size_ = std::get<4>(files[0]);
std
::
string
table_id
;
// file_schema.date_ = std::get<5>(files[0]);
resRow
[
"table_id"
].
to_string
(
table_id
);
// } else {
file_schema
.
table_id_
=
table_id
;
// return Status::NotFound("Table:" + file_schema.table_id_ +
// " File:" + file_schema.file_id_ + " not found");
std
::
string
file_id
;
// }
resRow
[
"file_id"
].
to_string
(
file_id
);
// } catch (std::exception &e) {
file_schema
.
file_id_
=
file_id
;
// HandleException(e);
// }
file_schema
.
file_type_
=
resRow
[
"file_type"
];
file_schema
.
size_
=
resRow
[
"size"
];
file_schema
.
date_
=
resRow
[
"date"
];
}
else
{
return
Status
::
NotFound
(
"Table:"
+
file_schema
.
table_id_
+
" File:"
+
file_schema
.
file_id_
+
" not found"
);
}
}
catch
(
const
BadQuery
&
er
)
{
// Handle any query errors
return
Status
::
DBTransactionError
(
"QUERY ERROR WHEN RETRIEVING TABLE FILE"
,
er
.
what
());
}
catch
(
const
Exception
&
er
)
{
// Catch-all for any other MySQL++ exceptions
return
Status
::
DBTransactionError
(
"GENERAL ERROR WHEN RETRIEVING TABLE FILE"
,
er
.
what
());
}
return
Status
::
OK
();
return
Status
::
OK
();
}
}
// PXU TODO: Support Swap
// PXU TODO: Support Swap
Status
MySQLMetaImpl
::
Archive
()
{
Status
MySQLMetaImpl
::
Archive
()
{
// auto &criterias = options_.archive_conf.GetCriterias();
auto
&
criterias
=
options_
.
archive_conf
.
GetCriterias
();
// if (criterias.size() == 0) {
if
(
criterias
.
empty
())
{
// return Status::OK();
return
Status
::
OK
();
// }
}
//
// for (auto kv : criterias) {
for
(
auto
&
kv
:
criterias
)
{
// auto &criteria = kv.first;
auto
&
criteria
=
kv
.
first
;
// auto &limit = kv.second;
auto
&
limit
=
kv
.
second
;
// if (criteria == "days") {
if
(
criteria
==
"days"
)
{
// long usecs = limit * D_SEC * US_PS;
size_t
usecs
=
limit
*
D_SEC
*
US_PS
;
// long now = utils::GetMicroSecTimeStamp();
long
now
=
utils
::
GetMicroSecTimeStamp
();
// try {
try
{
// ConnectorPtr->update_all(
// set(
Query
archiveQuery
=
connectionPtr
->
query
();
// c(&TableFileSchema::file_type_) = (int) TableFileSchema::TO_DELETE
archiveQuery
<<
"UPDATE metaFile "
<<
// ),
"SET file_type = "
<<
std
::
to_string
(
TableFileSchema
::
TO_DELETE
)
<<
" "
<<
// where(
"WHERE created_on < "
<<
std
::
to_string
(
now
-
usecs
)
<<
" AND "
<<
// c(&TableFileSchema::created_on_) < (long) (now - usecs) and
"file_type <> "
<<
std
::
to_string
(
TableFileSchema
::
TO_DELETE
)
<<
";"
;
// c(&TableFileSchema::file_type_) != (int) TableFileSchema::TO_DELETE
if
(
!
archiveQuery
.
exec
())
{
// ));
return
Status
::
DBTransactionError
(
"QUERY ERROR DURING ARCHIVE"
,
archiveQuery
.
error
());
// } catch (std::exception &e) {
}
// HandleException(e);
// }
}
catch
(
const
BadQuery
&
er
)
{
// }
// Handle any query errors
// if (criteria == "disk") {
return
Status
::
DBTransactionError
(
"QUERY ERROR WHEN DURING ARCHIVE"
,
er
.
what
());
// uint64_t sum = 0;
}
catch
(
const
Exception
&
er
)
{
// Size(sum);
// Catch-all for any other MySQL++ exceptions
//
return
Status
::
DBTransactionError
(
"GENERAL ERROR WHEN DURING ARCHIVE"
,
er
.
what
());
// auto to_delete = (sum - limit * G);
}
// DiscardFiles(to_delete);
}
// }
if
(
criteria
==
"disk"
)
{
// }
uint64_t
sum
=
0
;
Size
(
sum
);
auto
to_delete
=
(
sum
-
limit
*
G
);
DiscardFiles
(
to_delete
);
}
}
return
Status
::
OK
();
return
Status
::
OK
();
}
}
Status
MySQLMetaImpl
::
Size
(
uint64_t
&
result
)
{
Status
MySQLMetaImpl
::
Size
(
uint64_t
&
result
)
{
// result = 0;
result
=
0
;
// try {
try
{
// auto selected = ConnectorPtr->select(columns(sum(&TableFileSchema::size_)),
// where(
Query
getSizeQuery
=
connectionPtr
->
query
();
// c(&TableFileSchema::file_type_) != (int) TableFileSchema::TO_DELETE
getSizeQuery
<<
"SELECT SUM(size) AS sum "
<<
// ));
"FROM metaFile "
<<
//
"WHERE file_type <> "
<<
std
::
to_string
(
TableFileSchema
::
TO_DELETE
)
<<
";"
;
// for (auto &sub_query : selected) {
StoreQueryResult
res
=
getSizeQuery
.
store
();
// if (!std::get<0>(sub_query)) {
// continue;
assert
(
res
&&
res
.
num_rows
()
==
1
);
// }
result
=
res
[
0
][
"sum"
];
// result += (uint64_t) (*std::get<0>(sub_query));
// }
}
catch
(
const
BadQuery
&
er
)
{
// } catch (std::exception &e) {
// Handle any query errors
// HandleException(e);
return
Status
::
DBTransactionError
(
"QUERY ERROR WHEN RETRIEVING SIZE"
,
er
.
what
());
// }
}
catch
(
const
Exception
&
er
)
{
// Catch-all for any other MySQL++ exceptions
return
Status
::
DBTransactionError
(
"GENERAL ERROR WHEN RETRIEVING SIZE"
,
er
.
what
());
}
return
Status
::
OK
();
return
Status
::
OK
();
}
}
Status
MySQLMetaImpl
::
DiscardFiles
(
long
to_discard_size
)
{
Status
MySQLMetaImpl
::
DiscardFiles
(
long
to_discard_size
)
{
// LOG(DEBUG) << "About to discard size=" << to_discard_size;
LOG
(
DEBUG
)
<<
"About to discard size="
<<
to_discard_size
;
// if (to_discard_size <= 0) {
if
(
to_discard_size
<=
0
)
{
// return Status::OK();
// std::cout << "in" << std::endl;
// }
return
Status
::
OK
();
// try {
}
// auto selected = ConnectorPtr->select(columns(&TableFileSchema::id_,
try
{
// &TableFileSchema::size_),
// where(c(&TableFileSchema::file_type_)
Query
discardFilesQuery
=
connectionPtr
->
query
();
// != (int) TableFileSchema::TO_DELETE),
discardFilesQuery
<<
"SELECT id, size "
<<
// order_by(&TableFileSchema::id_),
"FROM metaFile "
<<
// limit(10));
"WHERE file_type <> "
<<
std
::
to_string
(
TableFileSchema
::
TO_DELETE
)
<<
" "
<<
//
"ORDER BY id ASC "
<<
// std::vector<int> ids;
"LIMIT 10;"
;
// TableFileSchema table_file;
// std::cout << discardFilesQuery.str() << std::endl;
//
StoreQueryResult
res
=
discardFilesQuery
.
store
();
// for (auto &file : selected) {
// if (to_discard_size <= 0) break;
assert
(
res
);
// table_file.id_ = std::get<0>(file);
if
(
res
.
num_rows
()
==
0
)
{
// table_file.size_ = std::get<1>(file);
return
Status
::
OK
();
// ids.push_back(table_file.id_);
}
// ENGINE_LOG_DEBUG << "Discard table_file.id=" << table_file.file_id_
// << " table_file.size=" << table_file.size_;
TableFileSchema
table_file
;
// to_discard_size -= table_file.size_;
std
::
stringstream
idsToDiscardSS
;
// }
for
(
auto
&
resRow
:
res
)
{
//
if
(
to_discard_size
<=
0
)
{
// if (ids.size() == 0) {
break
;
// return Status::OK();
}
// }
table_file
.
id_
=
resRow
[
"id"
];
//
table_file
.
size_
=
resRow
[
"size"
];
// ConnectorPtr->update_all(
idsToDiscardSS
<<
"id = "
<<
std
::
to_string
(
table_file
.
id_
)
<<
" OR "
;
// set(
ENGINE_LOG_DEBUG
<<
"Discard table_file.id="
<<
table_file
.
file_id_
// c(&TableFileSchema::file_type_) = (int) TableFileSchema::TO_DELETE
<<
" table_file.size="
<<
table_file
.
size_
;
// ),
to_discard_size
-=
table_file
.
size_
;
// where(
}
// in(&TableFileSchema::id_, ids)
// ));
std
::
string
idsToDiscardStr
=
idsToDiscardSS
.
str
();
//
idsToDiscardStr
=
idsToDiscardStr
.
substr
(
0
,
idsToDiscardStr
.
size
()
-
4
);
//remove the last " OR "
// } catch (std::exception &e) {
// HandleException(e);
discardFilesQuery
<<
"UPDATE metaFile "
<<
// }
"SET file_type = "
<<
std
::
to_string
(
TableFileSchema
::
TO_DELETE
)
<<
" "
<<
"WHERE "
<<
idsToDiscardStr
<<
";"
;
return
DiscardFiles
(
to_discard_size
);
if
(
discardFilesQuery
.
exec
())
{
return
DiscardFiles
(
to_discard_size
);
}
else
{
return
Status
::
DBTransactionError
(
"QUERY ERROR WHEN DISCARDING FILES"
,
discardFilesQuery
.
error
());
}
}
catch
(
const
BadQuery
&
er
)
{
// Handle any query errors
return
Status
::
DBTransactionError
(
"QUERY ERROR WHEN DISCARDING FILES"
,
er
.
what
());
}
catch
(
const
Exception
&
er
)
{
// Catch-all for any other MySQL++ exceptions
return
Status
::
DBTransactionError
(
"GENERAL ERROR WHEN DISCARDING FILES"
,
er
.
what
());
}
}
}
//ZR: this function assumes all fields in file_schema have value
Status
MySQLMetaImpl
::
UpdateTableFile
(
TableFileSchema
&
file_schema
)
{
Status
MySQLMetaImpl
::
UpdateTableFile
(
TableFileSchema
&
file_schema
)
{
// file_schema.updated_time_ = utils::GetMicroSecTimeStamp();
file_schema
.
updated_time_
=
utils
::
GetMicroSecTimeStamp
();
// try {
try
{
// server::Metrics::GetInstance().MetaAccessTotalIncrement();
server
::
Metrics
::
GetInstance
().
MetaAccessTotalIncrement
();
// auto start_time = METRICS_NOW_TIME;
auto
start_time
=
METRICS_NOW_TIME
;
// ConnectorPtr->update(file_schema);
// auto end_time = METRICS_NOW_TIME;
Query
updateTableFileQuery
=
connectionPtr
->
query
();
// auto total_time = METRICS_MICROSECONDS(start_time, end_time);
// server::Metrics::GetInstance().MetaAccessDurationSecondsHistogramObserve(total_time);
std
::
string
id
=
std
::
to_string
(
file_schema
.
id_
);
// } catch (std::exception &e) {
std
::
string
table_id
=
file_schema
.
table_id_
;
// ENGINE_LOG_DEBUG << "table_id= " << file_schema.table_id_ << " file_id=" << file_schema.file_id_;
std
::
string
engine_type
=
std
::
to_string
(
file_schema
.
engine_type_
);
// HandleException(e);
std
::
string
file_id
=
file_schema
.
file_id_
;
// }
std
::
string
file_type
=
std
::
to_string
(
file_schema
.
file_type_
);
std
::
string
size
=
std
::
to_string
(
file_schema
.
size_
);
std
::
string
updated_time
=
std
::
to_string
(
file_schema
.
updated_time_
);
std
::
string
created_on
=
std
::
to_string
(
file_schema
.
created_on_
);
std
::
string
date
=
std
::
to_string
(
file_schema
.
date_
);
updateTableFileQuery
<<
"UPDATE metaFile "
<<
"SET table_id = "
<<
quote
<<
table_id
<<
", "
<<
"engine_type = "
<<
engine_type
<<
", "
<<
"file_id = "
<<
quote
<<
file_id
<<
", "
<<
"file_type = "
<<
file_type
<<
", "
<<
"size = "
<<
size
<<
", "
<<
"updated_time = "
<<
updated_time
<<
", "
<<
"created_on = "
<<
created_on
<<
", "
<<
"date = "
<<
date
<<
" "
<<
"WHERE id = "
<<
id
<<
";"
;
// std::cout << updateTableFileQuery.str() << std::endl;
if
(
!
updateTableFileQuery
.
exec
())
{
ENGINE_LOG_DEBUG
<<
"table_id= "
<<
file_schema
.
table_id_
<<
" file_id="
<<
file_schema
.
file_id_
;
return
Status
::
DBTransactionError
(
"QUERY ERROR WHEN UPDATING TABLE FILE"
,
updateTableFileQuery
.
error
());
}
auto
end_time
=
METRICS_NOW_TIME
;
auto
total_time
=
METRICS_MICROSECONDS
(
start_time
,
end_time
);
server
::
Metrics
::
GetInstance
().
MetaAccessDurationSecondsHistogramObserve
(
total_time
);
}
catch
(
const
BadQuery
&
er
)
{
// Handle any query errors
ENGINE_LOG_DEBUG
<<
"table_id= "
<<
file_schema
.
table_id_
<<
" file_id="
<<
file_schema
.
file_id_
;
return
Status
::
DBTransactionError
(
"QUERY ERROR WHEN UPDATING TABLE FILE"
,
er
.
what
());
}
catch
(
const
Exception
&
er
)
{
// Catch-all for any other MySQL++ exceptions
ENGINE_LOG_DEBUG
<<
"table_id= "
<<
file_schema
.
table_id_
<<
" file_id="
<<
file_schema
.
file_id_
;
return
Status
::
DBTransactionError
(
"GENERAL ERROR WHEN UPDATING TABLE FILE"
,
er
.
what
());
}
return
Status
::
OK
();
return
Status
::
OK
();
}
}
Status
MySQLMetaImpl
::
UpdateTableFiles
(
TableFilesSchema
&
files
)
{
Status
MySQLMetaImpl
::
UpdateTableFiles
(
TableFilesSchema
&
files
)
{
// try {
try
{
// server::Metrics::GetInstance().MetaAccessTotalIncrement();
server
::
Metrics
::
GetInstance
().
MetaAccessTotalIncrement
();
// auto start_time = METRICS_NOW_TIME;
auto
start_time
=
METRICS_NOW_TIME
;
// auto commited = ConnectorPtr->transaction([&]() mutable {
// for (auto &file : files) {
Query
updateTableFilesQuery
=
connectionPtr
->
query
();
// file.updated_time_ = utils::GetMicroSecTimeStamp();
// ConnectorPtr->update(file);
for
(
auto
&
file_schema
:
files
)
{
// }
// auto end_time = METRICS_NOW_TIME;
std
::
string
id
=
std
::
to_string
(
file_schema
.
id_
);
// auto total_time = METRICS_MICROSECONDS(start_time, end_time);
std
::
string
table_id
=
file_schema
.
table_id_
;
// server::Metrics::GetInstance().MetaAccessDurationSecondsHistogramObserve(total_time);
std
::
string
engine_type
=
std
::
to_string
(
file_schema
.
engine_type_
);
// return true;
std
::
string
file_id
=
file_schema
.
file_id_
;
// });
std
::
string
file_type
=
std
::
to_string
(
file_schema
.
file_type_
);
// if (!commited) {
std
::
string
size
=
std
::
to_string
(
file_schema
.
size_
);
// return Status::DBTransactionError("Update files Error");
std
::
string
updated_time
=
std
::
to_string
(
file_schema
.
updated_time_
);
// }
std
::
string
created_on
=
std
::
to_string
(
file_schema
.
created_on_
);
// } catch (std::exception &e) {
std
::
string
date
=
std
::
to_string
(
file_schema
.
date_
);
// HandleException(e);
// }
updateTableFilesQuery
<<
"UPDATE metaFile "
<<
"SET table_id = "
<<
quote
<<
table_id
<<
", "
<<
"engine_type = "
<<
engine_type
<<
", "
<<
"file_id = "
<<
quote
<<
file_id
<<
", "
<<
"file_type = "
<<
file_type
<<
", "
<<
"size = "
<<
size
<<
", "
<<
"updated_time = "
<<
updated_time
<<
", "
<<
"created_on = "
<<
created_on
<<
", "
<<
"date = "
<<
date
<<
" "
<<
"WHERE id = "
<<
id
<<
";"
;
}
if
(
!
updateTableFilesQuery
.
exec
())
{
return
Status
::
DBTransactionError
(
"QUERY ERROR WHEN UPDATING TABLE FILES"
,
updateTableFilesQuery
.
error
());
}
}
catch
(
const
BadQuery
&
er
)
{
// Handle any query errors
return
Status
::
DBTransactionError
(
"QUERY ERROR WHEN UPDATING TABLE FILES"
,
er
.
what
());
}
catch
(
const
Exception
&
er
)
{
// Catch-all for any other MySQL++ exceptions
return
Status
::
DBTransactionError
(
"GENERAL ERROR WHEN UPDATING TABLE FILES"
,
er
.
what
());
}
return
Status
::
OK
();
return
Status
::
OK
();
}
}
Status
MySQLMetaImpl
::
CleanUpFilesWithTTL
(
uint16_t
seconds
)
{
Status
MySQLMetaImpl
::
CleanUpFilesWithTTL
(
uint16_t
seconds
)
{
// auto now = utils::GetMicroSecTimeStamp();
auto
now
=
utils
::
GetMicroSecTimeStamp
();
// try {
try
{
// auto selected = ConnectorPtr->select(columns(&TableFileSchema::id_,
// &TableFileSchema::table_id_,
Query
cleanUpFilesWithTTLQuery
=
connectionPtr
->
query
();
// &TableFileSchema::file_id_,
cleanUpFilesWithTTLQuery
<<
"SELECT id, table_id, file_id, file_type, size, date "
<<
// &TableFileSchema::file_type_,
"FROM metaFile "
<<
// &TableFileSchema::size_,
"WHERE file_type = "
<<
std
::
to_string
(
TableFileSchema
::
TO_DELETE
)
<<
" AND "
<<
// &TableFileSchema::date_),
"updated_time > "
<<
std
::
to_string
(
now
-
seconds
*
US_PS
)
<<
";"
;
// where(
StoreQueryResult
res
=
cleanUpFilesWithTTLQuery
.
store
();
// c(&TableFileSchema::file_type_) == (int) TableFileSchema::TO_DELETE
// and
assert
(
res
);
// c(&TableFileSchema::updated_time_)
// > now - seconds * US_PS));
TableFileSchema
table_file
;
//
std
::
vector
<
std
::
string
>
idsToDelete
;
// TableFilesSchema updated;
// TableFileSchema table_file;
for
(
auto
&
resRow
:
res
)
{
//
// for (auto &file : selected) {
table_file
.
id_
=
resRow
[
"id"
];
//implicit conversion
// table_file.id_ = std::get<0>(file);
// table_file.table_id_ = std::get<1>(file);
std
::
string
table_id
;
// table_file.file_id_ = std::get<2>(file);
resRow
[
"table_id"
].
to_string
(
table_id
);
// table_file.file_type_ = std::get<3>(file);
table_file
.
table_id_
=
table_id
;
// table_file.size_ = std::get<4>(file);
// table_file.date_ = std::get<5>(file);
std
::
string
file_id
;
// GetTableFilePath(table_file);
resRow
[
"file_id"
].
to_string
(
file_id
);
// if (table_file.file_type_ == TableFileSchema::TO_DELETE) {
table_file
.
file_id_
=
file_id
;
// boost::filesystem::remove(table_file.location_);
// }
table_file
.
file_type_
=
resRow
[
"file_type"
];
// ConnectorPtr->remove<TableFileSchema>(table_file.id_);
// /* LOG(DEBUG) << "Removing deleted id=" << table_file.id << " location=" << table_file.location << std::endl; */
table_file
.
size_
=
resRow
[
"size"
];
// }
// } catch (std::exception &e) {
table_file
.
date_
=
resRow
[
"date"
];
// HandleException(e);
// }
GetTableFilePath
(
table_file
);
if
(
table_file
.
file_type_
==
TableFileSchema
::
TO_DELETE
)
{
boost
::
filesystem
::
remove
(
table_file
.
location_
);
}
idsToDelete
.
emplace_back
(
std
::
to_string
(
table_file
.
id_
));
}
std
::
stringstream
idsToDeleteSS
;
for
(
auto
&
id
:
idsToDelete
)
{
idsToDeleteSS
<<
"id = "
<<
id
<<
" OR "
;
}
std
::
string
idsToDeleteStr
=
idsToDeleteSS
.
str
();
idsToDeleteStr
=
idsToDeleteStr
.
substr
(
0
,
idsToDeleteStr
.
size
()
-
4
);
//remove the last " OR "
cleanUpFilesWithTTLQuery
<<
"DELETE FROM metaFile WHERE "
<<
idsToDeleteStr
<<
";"
;
if
(
!
cleanUpFilesWithTTLQuery
.
exec
())
{
return
Status
::
DBTransactionError
(
"CleanUpFilesWithTTL Error"
,
cleanUpFilesWithTTLQuery
.
error
());
}
}
catch
(
const
BadQuery
&
er
)
{
// Handle any query errors
return
Status
::
DBTransactionError
(
"QUERY ERROR WHEN CLEANING UP FILES WITH TTL"
,
er
.
what
());
}
catch
(
const
Exception
&
er
)
{
// Catch-all for any other MySQL++ exceptions
return
Status
::
DBTransactionError
(
"GENERAL ERROR WHEN CLEANING UP FILES WITH TTL"
,
er
.
what
());
}
return
Status
::
OK
();
return
Status
::
OK
();
}
}
Status
MySQLMetaImpl
::
CleanUp
()
{
Status
MySQLMetaImpl
::
CleanUp
()
{
//
try {
try
{
// auto selected = ConnectorPtr->select(columns(&TableFileSchema::id_,
// auto selected = ConnectorPtr->select(columns(&TableFileSchema::id_,
// &TableFileSchema::table_id_,
// &TableFileSchema::table_id_,
// &TableFileSchema::file_id_,
// &TableFileSchema::file_id_,
...
@@ -901,75 +1121,137 @@ namespace meta {
...
@@ -901,75 +1121,137 @@ namespace meta {
// or
// or
// c(&TableFileSchema::file_type_)
// c(&TableFileSchema::file_type_)
// == (int) TableFileSchema::NEW));
// == (int) TableFileSchema::NEW));
//
// TableFilesSchema updated;
Query
cleanUpQuery
=
connectionPtr
->
query
();
// TableFileSchema table_file;
cleanUpQuery
<<
"SELECT id, table_id, file_id, file_type, size, date "
<<
//
"FROM metaFile "
<<
// for (auto &file : selected) {
"WHERE file_type = "
<<
std
::
to_string
(
TableFileSchema
::
TO_DELETE
)
<<
" OR "
<<
// table_file.id_ = std::get<0>(file);
"file_type = "
<<
std
::
to_string
(
TableFileSchema
::
NEW
)
<<
";"
;
// table_file.table_id_ = std::get<1>(file);
StoreQueryResult
res
=
cleanUpQuery
.
store
();
// table_file.file_id_ = std::get<2>(file);
// table_file.file_type_ = std::get<3>(file);
assert
(
res
);
// table_file.size_ = std::get<4>(file);
// table_file.date_ = std::get<5>(file);
TableFileSchema
table_file
;
// GetTableFilePath(table_file);
std
::
vector
<
std
::
string
>
idsToDelete
;
// if (table_file.file_type_ == TableFileSchema::TO_DELETE) {
// boost::filesystem::remove(table_file.location_);
for
(
auto
&
resRow
:
res
)
{
// }
// ConnectorPtr->remove<TableFileSchema>(table_file.id_);
table_file
.
id_
=
resRow
[
"id"
];
//implicit conversion
// /* LOG(DEBUG) << "Removing id=" << table_file.id << " location=" << table_file.location << std::endl; */
// }
std
::
string
table_id
;
// } catch (std::exception &e) {
resRow
[
"table_id"
].
to_string
(
table_id
);
// HandleException(e);
table_file
.
table_id_
=
table_id
;
// }
std
::
string
file_id
;
resRow
[
"file_id"
].
to_string
(
file_id
);
table_file
.
file_id_
=
file_id
;
table_file
.
file_type_
=
resRow
[
"file_type"
];
table_file
.
size_
=
resRow
[
"size"
];
table_file
.
date_
=
resRow
[
"date"
];
GetTableFilePath
(
table_file
);
if
(
table_file
.
file_type_
==
TableFileSchema
::
TO_DELETE
)
{
boost
::
filesystem
::
remove
(
table_file
.
location_
);
}
idsToDelete
.
emplace_back
(
std
::
to_string
(
table_file
.
id_
));
}
std
::
stringstream
idsToDeleteSS
;
for
(
auto
&
id
:
idsToDelete
)
{
idsToDeleteSS
<<
"id = "
<<
id
<<
" OR "
;
}
std
::
string
idsToDeleteStr
=
idsToDeleteSS
.
str
();
idsToDeleteStr
=
idsToDeleteStr
.
substr
(
0
,
idsToDeleteStr
.
size
()
-
4
);
//remove the last " OR "
cleanUpQuery
<<
"DELETE FROM metaFile WHERE "
<<
idsToDeleteStr
<<
";"
;
if
(
!
cleanUpQuery
.
exec
())
{
return
Status
::
DBTransactionError
(
"Clean up Error"
,
cleanUpQuery
.
error
());
}
}
catch
(
const
BadQuery
&
er
)
{
// Handle any query errors
return
Status
::
DBTransactionError
(
"QUERY ERROR WHEN CLEANING UP FILES"
,
er
.
what
());
}
catch
(
const
Exception
&
er
)
{
// Catch-all for any other MySQL++ exceptions
return
Status
::
DBTransactionError
(
"GENERAL ERROR WHEN CLEANING UP FILES"
,
er
.
what
());
}
return
Status
::
OK
();
return
Status
::
OK
();
}
}
Status
MySQLMetaImpl
::
Count
(
const
std
::
string
&
table_id
,
uint64_t
&
result
)
{
Status
MySQLMetaImpl
::
Count
(
const
std
::
string
&
table_id
,
uint64_t
&
result
)
{
// try {
try
{
//
// server::Metrics::GetInstance().MetaAccessTotalIncrement();
server
::
Metrics
::
GetInstance
().
MetaAccessTotalIncrement
();
// auto start_time = METRICS_NOW_TIME;
auto
start_time
=
METRICS_NOW_TIME
;
// auto selected = ConnectorPtr->select(columns(&TableFileSchema::size_,
// &TableFileSchema::date_),
Query
countQuery
=
connectionPtr
->
query
();
// where((c(&TableFileSchema::file_type_) == (int) TableFileSchema::RAW or
countQuery
<<
"SELECT size "
<<
// c(&TableFileSchema::file_type_) == (int) TableFileSchema::TO_INDEX
"FROM metaFile "
<<
// or
"WHERE table_id = "
<<
quote
<<
table_id
<<
" AND "
<<
// c(&TableFileSchema::file_type_) == (int) TableFileSchema::INDEX)
"(file_type = "
<<
std
::
to_string
(
TableFileSchema
::
RAW
)
<<
" OR "
<<
// and
"file_type = "
<<
std
::
to_string
(
TableFileSchema
::
TO_INDEX
)
<<
" OR "
<<
// c(&TableFileSchema::table_id_) == table_id));
"file_type = "
<<
std
::
to_string
(
TableFileSchema
::
INDEX
)
<<
");"
;
// auto end_time = METRICS_NOW_TIME;
StoreQueryResult
res
=
countQuery
.
store
();
// auto total_time = METRICS_MICROSECONDS(start_time, end_time);
// server::Metrics::GetInstance().MetaAccessDurationSecondsHistogramObserve(total_time);
auto
end_time
=
METRICS_NOW_TIME
;
// TableSchema table_schema;
auto
total_time
=
METRICS_MICROSECONDS
(
start_time
,
end_time
);
// table_schema.table_id_ = table_id;
server
::
Metrics
::
GetInstance
().
MetaAccessDurationSecondsHistogramObserve
(
total_time
);
// auto status = DescribeTable(table_schema);
//
TableSchema
table_schema
;
// if (!status.ok()) {
table_schema
.
table_id_
=
table_id
;
// return status;
auto
status
=
DescribeTable
(
table_schema
);
// }
//
if
(
!
status
.
ok
())
{
// result = 0;
return
status
;
// for (auto &file : selected) {
}
// result += std::get<0>(file);
// }
result
=
0
;
//
for
(
auto
&
resRow
:
res
)
{
// result /= table_schema.dimension_;
size_t
size
=
resRow
[
"size"
];
// result /= sizeof(float);
result
+=
size
;
//
}
// } catch (std::exception &e) {
// HandleException(e);
assert
(
table_schema
.
dimension_
!=
0
);
// }
result
/=
table_schema
.
dimension_
;
result
/=
sizeof
(
float
);
}
catch
(
const
BadQuery
&
er
)
{
// Handle any query errors
return
Status
::
DBTransactionError
(
"QUERY ERROR WHEN RETRIEVING COUNT"
,
er
.
what
());
}
catch
(
const
Exception
&
er
)
{
// Catch-all for any other MySQL++ exceptions
return
Status
::
DBTransactionError
(
"GENERAL ERROR WHEN RETRIEVING COUNT"
,
er
.
what
());
}
return
Status
::
OK
();
return
Status
::
OK
();
}
}
Status
MySQLMetaImpl
::
DropAll
()
{
Status
MySQLMetaImpl
::
DropAll
()
{
// if (boost::filesystem::is_directory(options_.path)) {
if
(
boost
::
filesystem
::
is_directory
(
options_
.
path
))
{
// boost::filesystem::remove_all(options_.path);
boost
::
filesystem
::
remove_all
(
options_
.
path
);
// }
}
return
Status
::
OK
();
try
{
Query
dropTableQuery
=
connectionPtr
->
query
();
dropTableQuery
<<
"DROP TABLE IF EXISTS meta, metaFile;"
;
if
(
dropTableQuery
.
exec
())
{
return
Status
::
OK
();
}
else
{
return
Status
::
DBTransactionError
(
"DROP TABLE ERROR"
,
dropTableQuery
.
error
());
}
}
catch
(
const
BadQuery
&
er
)
{
// Handle any query errors
return
Status
::
DBTransactionError
(
"QUERY ERROR WHEN DROPPING TABLE"
,
er
.
what
());
}
catch
(
const
Exception
&
er
)
{
// Catch-all for any other MySQL++ exceptions
return
Status
::
DBTransactionError
(
"GENERAL ERROR WHEN DROPPING TABLE"
,
er
.
what
());
}
}
}
MySQLMetaImpl
::~
MySQLMetaImpl
()
{
MySQLMetaImpl
::~
MySQLMetaImpl
()
{
...
...
cpp/src/db/MySQLMetaImpl.h
浏览文件 @
75a9b4fb
...
@@ -19,8 +19,6 @@ namespace meta {
...
@@ -19,8 +19,6 @@ namespace meta {
public:
public:
MySQLMetaImpl
(
const
DBMetaOptions
&
options_
);
MySQLMetaImpl
(
const
DBMetaOptions
&
options_
);
Status
Initialize
();
virtual
Status
CreateTable
(
TableSchema
&
table_schema
)
override
;
virtual
Status
CreateTable
(
TableSchema
&
table_schema
)
override
;
virtual
Status
DeleteTable
(
const
std
::
string
&
table_id
)
override
;
virtual
Status
DeleteTable
(
const
std
::
string
&
table_id
)
override
;
virtual
Status
DescribeTable
(
TableSchema
&
group_info_
)
override
;
virtual
Status
DescribeTable
(
TableSchema
&
group_info_
)
override
;
...
@@ -71,6 +69,7 @@ namespace meta {
...
@@ -71,6 +69,7 @@ namespace meta {
std
::
string
GetTablePath
(
const
std
::
string
&
table_id
);
std
::
string
GetTablePath
(
const
std
::
string
&
table_id
);
std
::
string
GetTableDatePartitionPath
(
const
std
::
string
&
table_id
,
DateT
&
date
);
std
::
string
GetTableDatePartitionPath
(
const
std
::
string
&
table_id
,
DateT
&
date
);
void
GetTableFilePath
(
TableFileSchema
&
group_file
);
void
GetTableFilePath
(
TableFileSchema
&
group_file
);
Status
Initialize
();
const
DBMetaOptions
options_
;
const
DBMetaOptions
options_
;
};
// DBMetaImpl
};
// DBMetaImpl
...
...
cpp/unittest/CMakeLists.txt
浏览文件 @
75a9b4fb
...
@@ -6,10 +6,10 @@
...
@@ -6,10 +6,10 @@
link_directories
(
link_directories
(
"
${
CMAKE_BINARY_DIR
}
/lib"
"
${
CMAKE_BINARY_DIR
}
/lib"
#"${VECWISE_THIRD_PARTY_BUILD}/lib"
#"${VECWISE_THIRD_PARTY_BUILD}/lib"
"
${
GTEST_PREFIX
}
/lib/"
#
"${GTEST_PREFIX}/lib/"
)
)
message
(
STATUS
"GTEST LIB:
${
GTEST_PREFIX
}
/lib"
)
#
message(STATUS "GTEST LIB: ${GTEST_PREFIX}/lib")
set
(
unittest_srcs
set
(
unittest_srcs
${
CMAKE_CURRENT_SOURCE_DIR
}
/vecwise_test.cpp
)
${
CMAKE_CURRENT_SOURCE_DIR
}
/vecwise_test.cpp
)
...
...
cpp/unittest/db/MySQLMetaImpl_test.cpp
浏览文件 @
75a9b4fb
...
@@ -15,14 +15,431 @@
...
@@ -15,14 +15,431 @@
#include "db/Utils.h"
#include "db/Utils.h"
#include "db/MetaConsts.h"
#include "db/MetaConsts.h"
#include "mysql++/mysql++.h"
#include <iostream>
using
namespace
zilliz
::
milvus
::
engine
;
using
namespace
zilliz
::
milvus
::
engine
;
TEST_F
(
MySQLTest
,
InitializeTest
)
{
//TEST_F(MySQLTest, InitializeTest) {
// DBMetaOptions options;
// //dialect+driver://username:password@host:port/database
// options.backend_uri = "mysql://root:1234@:/test";
// meta::MySQLMetaImpl impl(options);
// auto status = impl.Initialize();
// std::cout << status.ToString() << std::endl;
// ASSERT_TRUE(status.ok());
//}
TEST_F
(
MySQLTest
,
core
)
{
DBMetaOptions
options
;
DBMetaOptions
options
;
//dialect+driver://username:password@host:port/database
//dialect+driver://username:password@host:port/database
options
.
backend_uri
=
"mysql://root:1234@:/test"
;
options
.
backend_uri
=
"mysql://root:1234@:/test"
;
options
.
path
=
"/tmp/vecwise_test"
;
meta
::
MySQLMetaImpl
impl
(
options
);
// auto status = impl.Initialize();
// ASSERT_TRUE(status.ok());
meta
::
TableSchema
schema1
;
schema1
.
table_id_
=
"test1"
;
schema1
.
dimension_
=
123
;
auto
status
=
impl
.
CreateTable
(
schema1
);
// std::cout << status.ToString() << std::endl;
ASSERT_TRUE
(
status
.
ok
());
meta
::
TableSchema
schema2
;
schema2
.
table_id_
=
"test2"
;
schema2
.
dimension_
=
321
;
status
=
impl
.
CreateTable
(
schema2
);
// std::cout << status.ToString() << std::endl;
ASSERT_TRUE
(
status
.
ok
());
status
=
impl
.
CreateTable
(
schema2
);
// std::cout << status.ToString() << std::endl;
// ASSERT_THROW(impl.CreateTable(schema), mysqlpp::BadQuery);
ASSERT_FALSE
(
status
.
ok
());
status
=
impl
.
DeleteTable
(
schema2
.
table_id_
);
// std::cout << status.ToString() << std::endl;
ASSERT_TRUE
(
status
.
ok
());
size_t
id1
=
schema1
.
id_
;
long
created_on1
=
schema1
.
created_on_
;
status
=
impl
.
DescribeTable
(
schema1
);
ASSERT_TRUE
(
status
.
ok
());
ASSERT_EQ
(
schema1
.
id_
,
id1
);
ASSERT_EQ
(
schema1
.
table_id_
,
"test1"
);
ASSERT_EQ
(
schema1
.
created_on_
,
created_on1
);
ASSERT_EQ
(
schema1
.
files_cnt_
,
0
);
ASSERT_EQ
(
schema1
.
engine_type_
,
1
);
ASSERT_EQ
(
schema1
.
store_raw_data_
,
false
);
bool
check
;
status
=
impl
.
HasTable
(
"test1"
,
check
);
ASSERT_TRUE
(
status
.
ok
());
ASSERT_EQ
(
check
,
true
);
std
::
vector
<
meta
::
TableSchema
>
table_schema_array
;
status
=
impl
.
AllTables
(
table_schema_array
);
ASSERT_TRUE
(
status
.
ok
());
ASSERT_EQ
(
table_schema_array
.
size
(),
1
);
meta
::
TableSchema
resultSchema
=
table_schema_array
[
0
];
ASSERT_EQ
(
resultSchema
.
id_
,
id1
);
ASSERT_EQ
(
resultSchema
.
table_id_
,
"test1"
);
ASSERT_EQ
(
resultSchema
.
dimension_
,
123
);
ASSERT_EQ
(
resultSchema
.
files_cnt_
,
0
);
ASSERT_EQ
(
resultSchema
.
engine_type_
,
1
);
ASSERT_EQ
(
resultSchema
.
store_raw_data_
,
false
);
meta
::
TableFileSchema
tableFileSchema
;
tableFileSchema
.
table_id_
=
"test1"
;
status
=
impl
.
CreateTableFile
(
tableFileSchema
);
// std::cout << status.ToString() << std::endl;
ASSERT_TRUE
(
status
.
ok
());
tableFileSchema
.
file_type_
=
meta
::
TableFileSchema
::
TO_INDEX
;
status
=
impl
.
UpdateTableFile
(
tableFileSchema
);
// std::cout << status.ToString() << std::endl;
ASSERT_TRUE
(
status
.
ok
());
meta
::
TableFilesSchema
filesToIndex
;
status
=
impl
.
FilesToIndex
(
filesToIndex
);
ASSERT_TRUE
(
status
.
ok
());
ASSERT_EQ
(
filesToIndex
.
size
(),
1
);
meta
::
TableFileSchema
fileToIndex
=
filesToIndex
[
0
];
ASSERT_EQ
(
fileToIndex
.
table_id_
,
"test1"
);
ASSERT_EQ
(
fileToIndex
.
dimension_
,
123
);
// meta::TableFilesSchema filesToIndex;
// status = impl.FilesToIndex(filesToIndex);
// ASSERT_TRUE(status.ok());
// ASSERT_EQ(filesToIndex.size(), 0);
meta
::
DatesT
partition
;
partition
.
push_back
(
tableFileSchema
.
date_
);
meta
::
DatePartionedTableFilesSchema
filesToSearch
;
status
=
impl
.
FilesToSearch
(
tableFileSchema
.
table_id_
,
partition
,
filesToSearch
);
ASSERT_TRUE
(
status
.
ok
());
ASSERT_EQ
(
filesToSearch
.
size
(),
1
);
ASSERT_EQ
(
filesToSearch
[
tableFileSchema
.
date_
].
size
(),
1
);
meta
::
TableFileSchema
fileToSearch
=
filesToSearch
[
tableFileSchema
.
date_
][
0
];
ASSERT_EQ
(
fileToSearch
.
table_id_
,
"test1"
);
ASSERT_EQ
(
fileToSearch
.
dimension_
,
123
);
tableFileSchema
.
file_type_
=
meta
::
TableFileSchema
::
RAW
;
status
=
impl
.
UpdateTableFile
(
tableFileSchema
);
ASSERT_TRUE
(
status
.
ok
());
meta
::
DatePartionedTableFilesSchema
filesToMerge
;
status
=
impl
.
FilesToMerge
(
tableFileSchema
.
table_id_
,
filesToMerge
);
// std::cout << status.ToString() << std::endl;
ASSERT_TRUE
(
status
.
ok
());
ASSERT_EQ
(
filesToMerge
.
size
(),
1
);
ASSERT_EQ
(
filesToMerge
[
tableFileSchema
.
date_
].
size
(),
1
);
meta
::
TableFileSchema
fileToMerge
=
filesToMerge
[
tableFileSchema
.
date_
][
0
];
ASSERT_EQ
(
fileToMerge
.
table_id_
,
"test1"
);
ASSERT_EQ
(
fileToMerge
.
dimension_
,
123
);
meta
::
TableFileSchema
resultTableFileSchema
;
resultTableFileSchema
.
table_id_
=
tableFileSchema
.
table_id_
;
resultTableFileSchema
.
file_id_
=
tableFileSchema
.
file_id_
;
status
=
impl
.
GetTableFile
(
resultTableFileSchema
);
ASSERT_TRUE
(
status
.
ok
());
ASSERT_EQ
(
resultTableFileSchema
.
id_
,
tableFileSchema
.
id_
);
ASSERT_EQ
(
resultTableFileSchema
.
table_id_
,
tableFileSchema
.
table_id_
);
ASSERT_EQ
(
resultTableFileSchema
.
file_id_
,
tableFileSchema
.
file_id_
);
ASSERT_EQ
(
resultTableFileSchema
.
file_type_
,
tableFileSchema
.
file_type_
);
ASSERT_EQ
(
resultTableFileSchema
.
size_
,
tableFileSchema
.
size_
);
ASSERT_EQ
(
resultTableFileSchema
.
date_
,
tableFileSchema
.
date_
);
tableFileSchema
.
size_
=
234
;
status
=
impl
.
CreateTable
(
schema2
);
ASSERT_TRUE
(
status
.
ok
());
meta
::
TableFileSchema
tableFileSchema2
;
tableFileSchema2
.
table_id_
=
"test2"
;
tableFileSchema2
.
size_
=
345
;
status
=
impl
.
CreateTableFile
(
tableFileSchema2
);
ASSERT_TRUE
(
status
.
ok
());
meta
::
TableFilesSchema
filesToUpdate
;
filesToUpdate
.
emplace_back
(
tableFileSchema
);
filesToUpdate
.
emplace_back
(
tableFileSchema2
);
status
=
impl
.
UpdateTableFile
(
tableFileSchema
);
ASSERT_TRUE
(
status
.
ok
());
uint64_t
resultSize
;
status
=
impl
.
Size
(
resultSize
);
// std::cout << status.ToString() << std::endl;
ASSERT_TRUE
(
status
.
ok
());
ASSERT_EQ
(
resultSize
,
tableFileSchema
.
size_
+
tableFileSchema2
.
size_
);
uint64_t
countResult
;
status
=
impl
.
Count
(
tableFileSchema
.
table_id_
,
countResult
);
ASSERT_TRUE
(
status
.
ok
());
status
=
impl
.
DropAll
();
ASSERT_TRUE
(
status
.
ok
());
}
TEST_F
(
MySQLTest
,
GROUP_TEST
)
{
DBMetaOptions
options
;
options
.
backend_uri
=
"mysql://root:1234@:/test"
;
options
.
path
=
"/tmp/vecwise_test"
;
meta
::
MySQLMetaImpl
impl
(
options
);
auto
table_id
=
"meta_test_group"
;
meta
::
TableSchema
group
;
group
.
table_id_
=
table_id
;
auto
status
=
impl
.
CreateTable
(
group
);
// std::cout << status.ToString() << std::endl;
ASSERT_TRUE
(
status
.
ok
());
auto
gid
=
group
.
id_
;
group
.
id_
=
-
1
;
status
=
impl
.
DescribeTable
(
group
);
ASSERT_TRUE
(
status
.
ok
());
ASSERT_EQ
(
group
.
id_
,
gid
);
ASSERT_EQ
(
group
.
table_id_
,
table_id
);
group
.
table_id_
=
"not_found"
;
status
=
impl
.
DescribeTable
(
group
);
ASSERT_TRUE
(
!
status
.
ok
());
group
.
table_id_
=
table_id
;
status
=
impl
.
CreateTable
(
group
);
ASSERT_TRUE
(
!
status
.
ok
());
status
=
impl
.
DropAll
();
ASSERT_TRUE
(
status
.
ok
());
}
TEST_F
(
MySQLTest
,
table_file_TEST
)
{
DBMetaOptions
options
;
options
.
backend_uri
=
"mysql://root:1234@:/test"
;
options
.
path
=
"/tmp/vecwise_test"
;
meta
::
MySQLMetaImpl
impl
(
options
);
auto
table_id
=
"meta_test_group"
;
meta
::
TableSchema
group
;
group
.
table_id_
=
table_id
;
auto
status
=
impl
.
CreateTable
(
group
);
meta
::
TableFileSchema
table_file
;
table_file
.
table_id_
=
group
.
table_id_
;
status
=
impl
.
CreateTableFile
(
table_file
);
ASSERT_TRUE
(
status
.
ok
());
ASSERT_EQ
(
table_file
.
file_type_
,
meta
::
TableFileSchema
::
NEW
);
auto
file_id
=
table_file
.
file_id_
;
auto
new_file_type
=
meta
::
TableFileSchema
::
INDEX
;
table_file
.
file_type_
=
new_file_type
;
status
=
impl
.
UpdateTableFile
(
table_file
);
ASSERT_TRUE
(
status
.
ok
());
ASSERT_EQ
(
table_file
.
file_type_
,
new_file_type
);
meta
::
DatesT
dates
;
dates
.
push_back
(
meta
::
Meta
::
GetDate
());
status
=
impl
.
DropPartitionsByDates
(
table_file
.
table_id_
,
dates
);
ASSERT_FALSE
(
status
.
ok
());
dates
.
clear
();
for
(
auto
i
=
2
;
i
<
10
;
++
i
)
{
dates
.
push_back
(
meta
::
Meta
::
GetDateWithDelta
(
-
1
*
i
));
}
status
=
impl
.
DropPartitionsByDates
(
table_file
.
table_id_
,
dates
);
ASSERT_TRUE
(
status
.
ok
());
table_file
.
date_
=
meta
::
Meta
::
GetDateWithDelta
(
-
2
);
status
=
impl
.
UpdateTableFile
(
table_file
);
ASSERT_TRUE
(
status
.
ok
());
ASSERT_EQ
(
table_file
.
date_
,
meta
::
Meta
::
GetDateWithDelta
(
-
2
));
ASSERT_FALSE
(
table_file
.
file_type_
==
meta
::
TableFileSchema
::
TO_DELETE
);
dates
.
clear
();
dates
.
push_back
(
table_file
.
date_
);
status
=
impl
.
DropPartitionsByDates
(
table_file
.
table_id_
,
dates
);
ASSERT_TRUE
(
status
.
ok
());
status
=
impl
.
GetTableFile
(
table_file
);
ASSERT_TRUE
(
status
.
ok
());
ASSERT_TRUE
(
table_file
.
file_type_
==
meta
::
TableFileSchema
::
TO_DELETE
);
status
=
impl
.
DropAll
();
ASSERT_TRUE
(
status
.
ok
());
}
TEST_F
(
MySQLTest
,
ARCHIVE_TEST_DAYS
)
{
srand
(
time
(
0
));
DBMetaOptions
options
;
options
.
path
=
"/tmp/vecwise_test"
;
int
days_num
=
rand
()
%
100
;
std
::
stringstream
ss
;
ss
<<
"days:"
<<
days_num
;
options
.
archive_conf
=
ArchiveConf
(
"delete"
,
ss
.
str
());
options
.
backend_uri
=
"mysql://root:1234@:/test"
;
meta
::
MySQLMetaImpl
impl
(
options
);
auto
table_id
=
"meta_test_group"
;
meta
::
TableSchema
group
;
group
.
table_id_
=
table_id
;
auto
status
=
impl
.
CreateTable
(
group
);
meta
::
TableFilesSchema
files
;
meta
::
TableFileSchema
table_file
;
table_file
.
table_id_
=
group
.
table_id_
;
auto
cnt
=
100
;
long
ts
=
utils
::
GetMicroSecTimeStamp
();
std
::
vector
<
int
>
days
;
for
(
auto
i
=
0
;
i
<
cnt
;
++
i
)
{
status
=
impl
.
CreateTableFile
(
table_file
);
table_file
.
file_type_
=
meta
::
TableFileSchema
::
NEW
;
int
day
=
rand
()
%
(
days_num
*
2
);
table_file
.
created_on_
=
ts
-
day
*
meta
::
D_SEC
*
meta
::
US_PS
-
10000
;
status
=
impl
.
UpdateTableFile
(
table_file
);
files
.
push_back
(
table_file
);
days
.
push_back
(
day
);
}
impl
.
Archive
();
int
i
=
0
;
for
(
auto
file
:
files
)
{
status
=
impl
.
GetTableFile
(
file
);
ASSERT_TRUE
(
status
.
ok
());
if
(
days
[
i
]
<
days_num
)
{
ASSERT_EQ
(
file
.
file_type_
,
meta
::
TableFileSchema
::
NEW
);
}
else
{
ASSERT_EQ
(
file
.
file_type_
,
meta
::
TableFileSchema
::
TO_DELETE
);
}
i
++
;
}
status
=
impl
.
DropAll
();
ASSERT_TRUE
(
status
.
ok
());
}
TEST_F
(
MySQLTest
,
ARCHIVE_TEST_DISK
)
{
DBMetaOptions
options
;
options
.
path
=
"/tmp/vecwise_test"
;
options
.
archive_conf
=
ArchiveConf
(
"delete"
,
"disk:11"
);
options
.
backend_uri
=
"mysql://root:1234@:/test"
;
meta
::
MySQLMetaImpl
impl
(
options
);
meta
::
MySQLMetaImpl
impl
(
options
);
auto
status
=
impl
.
Initialize
();
std
::
cout
<<
status
.
ToString
()
<<
std
::
endl
;
auto
table_id
=
"meta_test_group"
;
//ASSERT_TRUE(status.ok());
}
meta
::
TableSchema
group
;
\ No newline at end of file
group
.
table_id_
=
table_id
;
auto
status
=
impl
.
CreateTable
(
group
);
meta
::
TableFilesSchema
files
;
meta
::
TableFileSchema
table_file
;
table_file
.
table_id_
=
group
.
table_id_
;
auto
cnt
=
10
;
auto
each_size
=
2UL
;
for
(
auto
i
=
0
;
i
<
cnt
;
++
i
)
{
status
=
impl
.
CreateTableFile
(
table_file
);
table_file
.
file_type_
=
meta
::
TableFileSchema
::
NEW
;
table_file
.
size_
=
each_size
*
meta
::
G
;
status
=
impl
.
UpdateTableFile
(
table_file
);
files
.
push_back
(
table_file
);
}
impl
.
Archive
();
int
i
=
0
;
for
(
auto
file
:
files
)
{
status
=
impl
.
GetTableFile
(
file
);
ASSERT_TRUE
(
status
.
ok
());
if
(
i
<
5
)
{
ASSERT_TRUE
(
file
.
file_type_
==
meta
::
TableFileSchema
::
TO_DELETE
);
}
else
{
ASSERT_EQ
(
file
.
file_type_
,
meta
::
TableFileSchema
::
NEW
);
}
++
i
;
}
status
=
impl
.
DropAll
();
ASSERT_TRUE
(
status
.
ok
());
}
TEST_F
(
MySQLTest
,
TABLE_FILES_TEST
)
{
DBMetaOptions
options
;
options
.
backend_uri
=
"mysql://root:1234@:/test"
;
options
.
path
=
"/tmp/vecwise_test"
;
meta
::
MySQLMetaImpl
impl
(
options
);
auto
table_id
=
"meta_test_group"
;
meta
::
TableSchema
group
;
group
.
table_id_
=
table_id
;
auto
status
=
impl
.
CreateTable
(
group
);
int
new_files_cnt
=
4
;
int
raw_files_cnt
=
5
;
int
to_index_files_cnt
=
6
;
int
index_files_cnt
=
7
;
meta
::
TableFileSchema
table_file
;
table_file
.
table_id_
=
group
.
table_id_
;
for
(
auto
i
=
0
;
i
<
new_files_cnt
;
++
i
)
{
status
=
impl
.
CreateTableFile
(
table_file
);
table_file
.
file_type_
=
meta
::
TableFileSchema
::
NEW
;
status
=
impl
.
UpdateTableFile
(
table_file
);
}
for
(
auto
i
=
0
;
i
<
raw_files_cnt
;
++
i
)
{
status
=
impl
.
CreateTableFile
(
table_file
);
table_file
.
file_type_
=
meta
::
TableFileSchema
::
RAW
;
status
=
impl
.
UpdateTableFile
(
table_file
);
}
for
(
auto
i
=
0
;
i
<
to_index_files_cnt
;
++
i
)
{
status
=
impl
.
CreateTableFile
(
table_file
);
table_file
.
file_type_
=
meta
::
TableFileSchema
::
TO_INDEX
;
status
=
impl
.
UpdateTableFile
(
table_file
);
}
for
(
auto
i
=
0
;
i
<
index_files_cnt
;
++
i
)
{
status
=
impl
.
CreateTableFile
(
table_file
);
table_file
.
file_type_
=
meta
::
TableFileSchema
::
INDEX
;
status
=
impl
.
UpdateTableFile
(
table_file
);
}
meta
::
TableFilesSchema
files
;
status
=
impl
.
FilesToIndex
(
files
);
ASSERT_TRUE
(
status
.
ok
());
ASSERT_EQ
(
files
.
size
(),
to_index_files_cnt
);
meta
::
DatePartionedTableFilesSchema
dated_files
;
status
=
impl
.
FilesToMerge
(
group
.
table_id_
,
dated_files
);
ASSERT_TRUE
(
status
.
ok
());
ASSERT_EQ
(
dated_files
[
table_file
.
date_
].
size
(),
raw_files_cnt
);
status
=
impl
.
FilesToIndex
(
files
);
ASSERT_TRUE
(
status
.
ok
());
ASSERT_EQ
(
files
.
size
(),
to_index_files_cnt
);
meta
::
DatesT
dates
=
{
table_file
.
date_
};
status
=
impl
.
FilesToSearch
(
table_id
,
dates
,
dated_files
);
ASSERT_TRUE
(
status
.
ok
());
ASSERT_EQ
(
dated_files
[
table_file
.
date_
].
size
(),
to_index_files_cnt
+
raw_files_cnt
+
index_files_cnt
);
status
=
impl
.
DropAll
();
ASSERT_TRUE
(
status
.
ok
());
}
cpp/unittest/db/utils.h
浏览文件 @
75a9b4fb
...
@@ -57,7 +57,7 @@ protected:
...
@@ -57,7 +57,7 @@ protected:
virtual
void
TearDown
()
override
;
virtual
void
TearDown
()
override
;
};
};
class
MySQLTest
:
public
DB
Test
{
class
MySQLTest
:
public
::
testing
::
Test
{
protected:
protected:
//
std::shared_ptr<zilliz::milvus::engine::meta::MySQLMetaImpl> impl_;
//
std::shared_ptr<zilliz::milvus::engine::meta::MySQLMetaImpl> impl_;
};
};
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录