提交 55e9b227 编写于 作者: J jinhai

Merge branch 'branch-0.3.1' into 'branch-0.3.1'

MS-260 refine log

See merge request megasearch/milvus!253

Former-commit-id: 9dd6a019fd738d244dce60f5ab4f8c5ba95d4159
......@@ -34,6 +34,7 @@ Please mark all change in change log and use the ticket from JIRA.
- MS-245 - Improve search result transfer performance
- MS-248 - Support AddVector/SearchVector profiling
- MS-256 - Add more cache config
- MS-260 - Refine log
## New Feature
- MS-180 - Add new mem manager
......
......@@ -67,7 +67,8 @@ void Cache::insert(const std::string& key, const DataObjPtr& data_ptr) {
usage_ += data_ptr->size();
}
SERVER_LOG_DEBUG << "Insert " << key << " size:" << data_ptr->size() << " into cache, usage: " << usage_;
SERVER_LOG_DEBUG << "Insert " << key << " size:" << data_ptr->size()
<< " bytes into cache, usage: " << usage_ << " bytes";
}
if (usage_ > capacity_) {
......@@ -198,8 +199,8 @@ void Cache::print() {
}
SERVER_LOG_DEBUG << "[Cache item count]: " << cache_count;
SERVER_LOG_DEBUG << "[Cache usage(byte)]: " << usage_;
SERVER_LOG_DEBUG << "[Cache capacity(byte)]: " << capacity_;
SERVER_LOG_DEBUG << "[Cache usage]: " << usage_ << " bytes";
SERVER_LOG_DEBUG << "[Cache capacity]: " << capacity_ << " bytes";
}
} // cache
......
......@@ -209,9 +209,10 @@ Status DBImpl::Query(const std::string& table_id, const std::vector<std::string>
Status DBImpl::QueryAsync(const std::string& table_id, const meta::TableFilesSchema& files,
uint64_t k, uint64_t nq, const float* vectors,
const meta::DatesT& dates, QueryResults& results) {
server::TimeRecorder rc("");
//step 1: get files to search
ENGINE_LOG_DEBUG << "Search DateT Size=" << files.size();
ENGINE_LOG_DEBUG << "Engine query begin, index file count:" << files.size() << " date range count:" << dates.size();
SearchContextPtr context = std::make_shared<SearchContext>(k, nq, vectors);
for (auto &file : files) {
TableFileSchemaPtr file_ptr = std::make_shared<meta::TableFileSchema>(file);
......@@ -224,8 +225,31 @@ Status DBImpl::QueryAsync(const std::string& table_id, const meta::TableFilesSch
context->WaitResult();
//step 3: construct results
//step 3: print time cost information
double load_cost = context->LoadCost();
double search_cost = context->SearchCost();
double reduce_cost = context->ReduceCost();
std::string load_info = server::TimeRecorder::GetTimeSpanStr(load_cost);
std::string search_info = server::TimeRecorder::GetTimeSpanStr(search_cost);
std::string reduce_info = server::TimeRecorder::GetTimeSpanStr(reduce_cost);
if(search_cost > 0.0 || reduce_cost > 0.0) {
double total_cost = load_cost + search_cost + reduce_cost;
double load_percent = load_cost/total_cost;
double search_percent = search_cost/total_cost;
double reduce_percent = reduce_cost/total_cost;
ENGINE_LOG_DEBUG << "Engine load index totally cost:" << load_info << " percent: " << load_percent*100 << "%";
ENGINE_LOG_DEBUG << "Engine search index totally cost:" << search_info << " percent: " << search_percent*100 << "%";
ENGINE_LOG_DEBUG << "Engine reduce topk totally cost:" << reduce_info << " percent: " << reduce_percent*100 << "%";
} else {
ENGINE_LOG_DEBUG << "Engine load cost:" << load_info
<< " search cost: " << search_info
<< " reduce cost: " << reduce_info;
}
//step 4: construct results
results = context->GetResult();
rc.ElapseFromBegin("Engine query totally cost");
return Status::OK();
}
......@@ -359,7 +383,7 @@ Status DBImpl::MergeFiles(const std::string& table_id, const meta::DateT& date,
updated.push_back(table_file);
status = meta_ptr_->UpdateTableFiles(updated);
ENGINE_LOG_DEBUG << "New merged file " << table_file.file_id_ <<
" of size=" << index->PhysicalSize()/(1024*1024) << " M";
" of size " << index->PhysicalSize() << " bytes";
if(options_.insert_cache_immediately_) {
index->Cache();
......@@ -499,7 +523,7 @@ Status DBImpl::BuildIndex(const meta::TableFileSchema& file) {
meta_ptr_->UpdateTableFiles(update_files);
ENGINE_LOG_DEBUG << "New index file " << table_file.file_id_ << " of size "
<< index->PhysicalSize()/(1024*1024) << " M"
<< index->PhysicalSize() << " bytes"
<< " from file " << to_remove.file_id_;
if(options_.insert_cache_immediately_) {
......
......@@ -865,7 +865,7 @@ Status DBMetaImpl::CleanUpFilesWithTTL(uint16_t seconds) {
table_file.date_ = std::get<3>(file);
utils::DeleteTableFilePath(options_, table_file);
ENGINE_LOG_DEBUG << "Removing deleted id =" << table_file.id_ << " location = " << table_file.location_ << std::endl;
ENGINE_LOG_DEBUG << "Removing file id:" << table_file.id_ << " location:" << table_file.location_;
ConnectorPtr->remove<TableFileSchema>(table_file.id_);
}
......
......@@ -149,10 +149,11 @@ Status FaissExecutionEngine::Search(long n,
std::shared_ptr<faiss::IndexIVF> ivf_index = std::dynamic_pointer_cast<faiss::IndexIVF>(pIndex_);
if(ivf_index) {
ENGINE_LOG_DEBUG << "Index type: IVFFLAT nProbe: " << nprobe_;
ENGINE_LOG_DEBUG << "Searching index type: " << build_index_type_ << " nProbe: " << nprobe_;
ivf_index->nprobe = nprobe_;
ivf_index->search(n, data, k, distances, labels);
} else {
ENGINE_LOG_DEBUG << "Searching raw file";
pIndex_->search(n, data, k, distances, labels);
}
......
......@@ -83,9 +83,8 @@ Status MemVectors::Serialize(std::string &table_id) {
auto status = meta_->UpdateTableFile(schema_);
LOG(DEBUG) << "New " << ((schema_.file_type_ == meta::TableFileSchema::RAW) ? "raw" : "to_index")
<< " file " << schema_.file_id_ << " of size " << (double) (active_engine_->Size()) / (double) meta::M
<< " M";
ENGINE_LOG_DEBUG << "New " << ((schema_.file_type_ == meta::TableFileSchema::RAW) ? "raw" : "to_index")
<< " file " << schema_.file_id_ << " of size " << active_engine_->Size() << " bytes";
if(options_.insert_cache_immediately_) {
active_engine_->Cache();
......
......@@ -95,8 +95,8 @@ Status MemTableFile::Serialize() {
auto status = meta_->UpdateTableFile(table_file_schema_);
LOG(DEBUG) << "New " << ((table_file_schema_.file_type_ == meta::TableFileSchema::RAW) ? "raw" : "to_index")
<< " file " << table_file_schema_.file_id_ << " of size " << (double) size / (double) M << " M";
ENGINE_LOG_DEBUG << "New " << ((table_file_schema_.file_type_ == meta::TableFileSchema::RAW) ? "raw" : "to_index")
<< " file " << table_file_schema_.file_id_ << " of size " << size << " bytes";
if(options_.insert_cache_immediately_) {
execution_engine_->Cache();
......
......@@ -1578,8 +1578,7 @@ Status MySQLMetaImpl::CleanUpFilesWithTTL(uint16_t seconds) {
utils::DeleteTableFilePath(options_, table_file);
ENGINE_LOG_DEBUG << "Removing deleted id =" << table_file.id_ << " location = "
<< table_file.location_ << std::endl;
ENGINE_LOG_DEBUG << "Removing file id:" << table_file.id_ << " location:" << table_file.location_;
idsToDelete.emplace_back(std::to_string(table_file.id_));
}
......
......@@ -37,9 +37,19 @@ public:
const ResultSet& GetResult() const { return result_; }
ResultSet& GetResult() { return result_; }
std::string Identity() const { return identity_; }
void IndexSearchDone(size_t index_id);
void WaitResult();
void AccumLoadCost(double span) { time_cost_load_ += span; }
void AccumSearchCost(double span) { time_cost_search_ += span; }
void AccumReduceCost(double span) { time_cost_reduce_ += span; }
double LoadCost() const { return time_cost_load_; }
double SearchCost() const { return time_cost_search_; }
double ReduceCost() const { return time_cost_reduce_; }
private:
uint64_t topk_ = 0;
uint64_t nq_ = 0;
......@@ -52,6 +62,10 @@ private:
std::condition_variable done_cond_;
std::string identity_; //for debug
double time_cost_load_ = 0.0; //time cost for load all index files, unit: us
double time_cost_search_ = 0.0; //time cost for entire search, unit: us
double time_cost_reduce_ = 0.0; //time cost for entire reduce, unit: us
};
using SearchContextPtr = std::shared_ptr<SearchContext>;
......
......@@ -41,20 +41,21 @@ IndexLoadTask::IndexLoadTask()
}
std::shared_ptr<IScheduleTask> IndexLoadTask::Execute() {
ENGINE_LOG_DEBUG << "Loading index(" << file_->id_ << ") from location: " << file_->location_;
server::TimeRecorder rc("Load index");
server::TimeRecorder rc("");
//step 1: load index
ExecutionEnginePtr index_ptr = EngineFactory::Build(file_->dimension_,
file_->location_,
(EngineType)file_->engine_type_);
index_ptr->Load();
rc.Record("load index file to memory");
size_t file_size = index_ptr->PhysicalSize();
ENGINE_LOG_DEBUG << "Index file type " << file_->file_type_ << " Of Size: "
<< file_size/(1024*1024) << " M";
std::string info = "Load file id:" + std::to_string(file_->id_) + " file type:" + std::to_string(file_->file_type_)
+ " size:" + std::to_string(file_size) + " bytes from location: " + file_->location_ + " totally cost";
double span = rc.ElapseFromBegin(info);
for(auto& context : search_contexts_) {
context->AccumLoadCost(span);
}
CollectFileMetrics(file_->file_type_, file_size);
......
......@@ -51,10 +51,10 @@ std::shared_ptr<IScheduleTask> SearchTask::Execute() {
return nullptr;
}
SERVER_LOG_DEBUG << "Searching in index(" << index_id_<< ") with "
SERVER_LOG_DEBUG << "Searching in file id:" << index_id_<< " with "
<< search_contexts_.size() << " tasks";
server::TimeRecorder rc("DoSearch index(" + std::to_string(index_id_) + ")");
server::TimeRecorder rc("DoSearch file id:" + std::to_string(index_id_));
auto start_time = METRICS_NOW_TIME;
......@@ -71,17 +71,19 @@ std::shared_ptr<IScheduleTask> SearchTask::Execute() {
index_engine_->Search(context->nq(), context->vectors(), inner_k, output_distence.data(),
output_ids.data());
rc.Record("do search");
double span = rc.RecordSection("do search for context:" + context->Identity());
context->AccumSearchCost(span);
//step 3: cluster result
SearchContext::ResultSet result_set;
auto spec_k = index_engine_->Count() < context->topk() ? index_engine_->Count() : context->topk();
SearchTask::ClusterResult(output_ids, output_distence, context->nq(), spec_k, result_set);
rc.Record("cluster result");
//step 4: pick up topk result
SearchTask::TopkResult(result_set, inner_k, metric_l2, context->GetResult());
rc.Record("reduce topk");
span = rc.RecordSection("reduce topk for context:" + context->Identity());
context->AccumReduceCost(span);
} catch (std::exception& ex) {
SERVER_LOG_ERROR << "SearchTask encounter exception: " << ex.what();
......@@ -97,7 +99,7 @@ std::shared_ptr<IScheduleTask> SearchTask::Execute() {
auto total_time = METRICS_MICROSECONDS(start_time, end_time);
CollectDurationMetrics(index_type_, total_time);
rc.Elapse("totally cost");
rc.ElapseFromBegin("totally cost");
return nullptr;
}
......
......@@ -194,7 +194,7 @@ ServerError CreateTableTask::OnExecute() {
return SetError(SERVER_UNEXPECTED_ERROR, ex.what());
}
rc.Record("done");
rc.ElapseFromBegin("totally cost");
return SERVER_SUCCESS;
}
......@@ -239,7 +239,7 @@ ServerError DescribeTableTask::OnExecute() {
return SetError(SERVER_UNEXPECTED_ERROR, ex.what());
}
rc.Record("done");
rc.ElapseFromBegin("totally cost");
return SERVER_SUCCESS;
}
......@@ -277,7 +277,7 @@ ServerError BuildIndexTask::OnExecute() {
return SetError(SERVER_BUILD_INDEX_ERROR, "Engine failed: " + stat.ToString());
}
rc.Elapse("totally cost");
rc.ElapseFromBegin("totally cost");
} catch (std::exception& ex) {
return SetError(SERVER_UNEXPECTED_ERROR, ex.what());
}
......@@ -314,7 +314,7 @@ ServerError HasTableTask::OnExecute() {
return SetError(DB_META_TRANSACTION_FAILED, "Engine failed: " + stat.ToString());
}
rc.Elapse("totally cost");
rc.ElapseFromBegin("totally cost");
} catch (std::exception& ex) {
return SetError(SERVER_UNEXPECTED_ERROR, ex.what());
}
......@@ -356,8 +356,6 @@ ServerError DeleteTableTask::OnExecute() {
}
}
rc.Record("check validation");
//step 3: delete table
std::vector<DB_DATE> dates;
stat = DBWrapper::DB()->DeleteTable(table_name_, dates);
......@@ -365,8 +363,7 @@ ServerError DeleteTableTask::OnExecute() {
return SetError(DB_META_TRANSACTION_FAILED, "Engine failed: " + stat.ToString());
}
rc.Record("deleta table");
rc.Elapse("total cost");
rc.ElapseFromBegin("totally cost");
} catch (std::exception& ex) {
return SetError(SERVER_UNEXPECTED_ERROR, ex.what());
}
......@@ -444,7 +441,7 @@ ServerError AddVectorTask::OnExecute() {
}
}
rc.Record("check validation");
rc.RecordSection("check validation");
#ifdef MILVUS_ENABLE_PROFILING
std::string fname = "/tmp/insert_" + std::to_string(this->record_array_.size()) +
......@@ -461,12 +458,11 @@ ServerError AddVectorTask::OnExecute() {
return SetError(error_code, error_msg);
}
rc.Record("prepare vectors data");
rc.RecordSection("prepare vectors data");
//step 4: insert vectors
uint64_t vec_count = (uint64_t)record_array_.size();
stat = DBWrapper::DB()->InsertVectors(table_name_, vec_count, vec_f.data(), record_ids_);
rc.Record("add vectors to engine");
if(!stat.ok()) {
return SetError(SERVER_CACHE_ERROR, "Cache error: " + stat.ToString());
}
......@@ -481,8 +477,8 @@ ServerError AddVectorTask::OnExecute() {
ProfilerStop();
#endif
rc.Record("do insert");
rc.Elapse("total cost");
rc.RecordSection("add vectors to engine");
rc.ElapseFromBegin("totally cost");
} catch (std::exception& ex) {
return SetError(SERVER_UNEXPECTED_ERROR, ex.what());
......@@ -517,7 +513,7 @@ ServerError SearchVectorTaskBase::OnExecute() {
return SetError(res, "Invalid table name: " + table_name_);
}
if(top_k_ <= 0) {
if(top_k_ <= 0 || top_k_ > 1024) {
return SetError(SERVER_INVALID_TOPK, "Invalid topk: " + std::to_string(top_k_));
}
if(record_array_.empty()) {
......@@ -545,7 +541,7 @@ ServerError SearchVectorTaskBase::OnExecute() {
return SetError(error_code, error_msg);
}
rc.Record("check validation");
double span_check = rc.RecordSection("check validation");
#ifdef MILVUS_ENABLE_PROFILING
std::string fname = "/tmp/search_nq_" + std::to_string(this->record_array_.size()) +
......@@ -561,7 +557,7 @@ ServerError SearchVectorTaskBase::OnExecute() {
return SetError(error_code, error_msg);
}
rc.Record("prepare vector data");
double span_prepare = rc.RecordSection("prepare vector data");
//step 4: search vectors
engine::QueryResults results;
......@@ -573,7 +569,7 @@ ServerError SearchVectorTaskBase::OnExecute() {
stat = DBWrapper::DB()->Query(table_name_, file_id_array_, (size_t) top_k_, record_count, vec_f.data(), dates, results);
}
rc.Record("search vectors from engine");
double span_search = rc.RecordSection("search vectors from engine");
if(!stat.ok()) {
return SetError(DB_META_TRANSACTION_FAILED, "Engine failed: " + stat.ToString());
}
......@@ -588,8 +584,6 @@ ServerError SearchVectorTaskBase::OnExecute() {
return SetError(SERVER_ILLEGAL_SEARCH_RESULT, msg);
}
rc.Record("do search");
//step 5: construct result array
ConstructResult(results);
......@@ -597,8 +591,15 @@ ServerError SearchVectorTaskBase::OnExecute() {
ProfilerStop();
#endif
rc.Record("construct result");
rc.Elapse("total cost");
double span_result = rc.RecordSection("construct result");
rc.ElapseFromBegin("totally cost");
//step 6: print time cost percent
double total_cost = span_check + span_prepare + span_search + span_result;
SERVER_LOG_DEBUG << "SearchVectorTask: " << "check validation(" << (span_check/total_cost)*100.0 << "%)"
<< " prepare data(" << (span_prepare/total_cost)*100.0 << "%)"
<< " search(" << (span_search/total_cost)*100.0 << "%)"
<< " construct result(" << (span_result/total_cost)*100.0 << "%)";
} catch (std::exception& ex) {
return SetError(SERVER_UNEXPECTED_ERROR, ex.what());
......@@ -733,7 +734,7 @@ ServerError GetTableRowCountTask::OnExecute() {
row_count_ = (int64_t) row_count;
rc.Elapse("total cost");
rc.ElapseFromBegin("totally cost");
} catch (std::exception& ex) {
return SetError(SERVER_UNEXPECTED_ERROR, ex.what());
......
......@@ -12,131 +12,81 @@ namespace milvus {
namespace server {
TimeRecorder::TimeRecorder(const std::string &header,
TimeRecorder::TimeDisplayUnit unit,
int64_t log_level) :
header_(header),
time_unit_(unit),
log_level_(log_level) {
header_(header),
log_level_(log_level) {
start_ = last_ = stdclock::now();
span_ = 0.0;
}
std::string
TimeRecorder::GetTimeSpanStr(TimeRecorder::TimeDisplayUnit &unit, double span) const {
std::string spanStr;
std::string unitStr;
TimeRecorder::~TimeRecorder() {
}
switch (unit) {
case TimeRecorder::eTimeAutoUnit: {
if (span >= 1000000) {
int64_t t = (int64_t) span;
int64_t hour, minute;
double second;
hour = t / 1000000 / 3600;
t -= hour * 3600 * 1000000;
minute = t / 1000000 / 60;
t -= minute * 60 * 1000000;
second = t * 0.000001;
spanStr += (hour < 10 ? "0" : "") + std::to_string(hour) + ":";
spanStr += (minute < 10 ? "0" : "") + std::to_string(minute) + ":";
spanStr += (second < 10 ? "0" : "") + std::to_string(second);
unitStr = "";
} else if (span >= 1000) {
spanStr = std::to_string(span * 0.001);
unitStr = " ms";
} else {
spanStr = std::to_string(span);
unitStr = " us";
}
}
break;
case TimeRecorder::eTimeHourUnit:
spanStr = std::to_string((span * 0.000001) / 3600);
unitStr = " hour";
break;
case TimeRecorder::eTimeMinuteUnit:
spanStr = std::to_string((span * 0.000001) / 60);
unitStr = " min";
break;
case TimeRecorder::eTimeSecondUnit:
spanStr = std::to_string(span * 0.000001);
unitStr = " sec";
break;
case TimeRecorder::eTimeMilliSecUnit:
spanStr = std::to_string(span * 0.001);
unitStr = " ms";
break;
case TimeRecorder::eTimeMicroSecUnit:
default:
spanStr = std::to_string(span);
unitStr = " us";
break;
}
std::string
TimeRecorder::GetTimeSpanStr(double span) {
std::string str_sec = std::to_string(span * 0.000001) + ((span > 1000000) ? " seconds" : " second");
std::string str_ms = std::to_string(span * 0.001) + " ms";
return spanStr + unitStr;
return str_sec + " [" + str_ms + "]";
}
void
TimeRecorder::PrintTimeRecord(const std::string &msg, double span) {
std::string strLog;
if (!header_.empty()) strLog += header_ + ": ";
strLog += msg;
strLog += " (";
strLog += GetTimeSpanStr(time_unit_, span);
strLog += ")";
std::string str_log;
if (!header_.empty()) str_log += header_ + ": ";
str_log += msg;
str_log += " (";
str_log += TimeRecorder::GetTimeSpanStr(span);
str_log += ")";
switch (log_level_) {
case 0: {
SERVER_LOG_TRACE << strLog;
SERVER_LOG_TRACE << str_log;
break;
}
case 1: {
SERVER_LOG_DEBUG << strLog;
SERVER_LOG_DEBUG << str_log;
break;
}
case 2: {
SERVER_LOG_INFO << strLog;
SERVER_LOG_INFO << str_log;
break;
}
case 3: {
SERVER_LOG_WARNING << strLog;
SERVER_LOG_WARNING << str_log;
break;
}
case 4: {
SERVER_LOG_ERROR << strLog;
SERVER_LOG_ERROR << str_log;
break;
}
case 5: {
SERVER_LOG_FATAL << strLog;
SERVER_LOG_FATAL << str_log;
break;
}
default: {
SERVER_LOG_INFO << strLog;
SERVER_LOG_INFO << str_log;
break;
}
}
}
void
TimeRecorder::Record(const std::string &msg) {
double
TimeRecorder::RecordSection(const std::string &msg) {
stdclock::time_point curr = stdclock::now();
span_ = (std::chrono::duration<double, std::micro>(curr - last_)).count();
double span = (std::chrono::duration<double, std::micro>(curr - last_)).count();
last_ = curr;
PrintTimeRecord(msg, span_);
PrintTimeRecord(msg, span);
return span;
}
void
TimeRecorder::Elapse(const std::string &msg) {
double
TimeRecorder::ElapseFromBegin(const std::string &msg) {
stdclock::time_point curr = stdclock::now();
span_ = (std::chrono::duration<double, std::micro>(curr - start_)).count();
PrintTimeRecord(msg, span_);
}
double span = (std::chrono::duration<double, std::micro>(curr - start_)).count();
double
TimeRecorder::Span() {
return span_;
PrintTimeRecord(msg, span);
return span;
}
}
......
......@@ -17,36 +17,24 @@ class TimeRecorder {
using stdclock = std::chrono::high_resolution_clock;
public:
enum TimeDisplayUnit {
eTimeAutoUnit = 0,
eTimeHourUnit,
eTimeMinuteUnit,
eTimeSecondUnit,
eTimeMilliSecUnit,
eTimeMicroSecUnit,
};
TimeRecorder(const std::string &header,
TimeRecorder::TimeDisplayUnit unit = TimeRecorder::eTimeAutoUnit,
int64_t log_level = 1); //trace = 0, debug = 1, info = 2, warn = 3, error = 4, critical = 5
int64_t log_level = 1);
void Record(const std::string &msg);
~TimeRecorder();//trace = 0, debug = 1, info = 2, warn = 3, error = 4, critical = 5
void Elapse(const std::string &msg);
double RecordSection(const std::string &msg);
double Span();
double ElapseFromBegin(const std::string &msg);
private:
std::string GetTimeSpanStr(TimeRecorder::TimeDisplayUnit &unit, double span) const;
static std::string GetTimeSpanStr(double span);
private:
void PrintTimeRecord(const std::string &msg, double span);
private:
std::string header_;
TimeRecorder::TimeDisplayUnit time_unit_;
stdclock::time_point start_;
stdclock::time_point last_;
double span_;
int64_t log_level_;
};
......
......@@ -61,7 +61,6 @@ ValidateTableIndexType(int32_t index_type) {
return SERVER_INVALID_INDEX_TYPE;
}
SERVER_LOG_DEBUG << "Index type: " << index_type;
return SERVER_SUCCESS;
}
......
......@@ -19,15 +19,6 @@ namespace {
static const std::string LOG_FILE_PATH = "./milvus/conf/log_config.conf";
using TimeUnit = server::TimeRecorder::TimeDisplayUnit;
double TestTimeRecorder(TimeUnit unit, int64_t log_level, int64_t sleep_ms) {
server::TimeRecorder rc("test rc", unit, log_level);
rc.Record("begin");
std::this_thread::sleep_for(std::chrono::milliseconds(sleep_ms));
rc.Elapse("end");
return rc.Span();
}
}
TEST(UtilTest, EXCEPTION_TEST) {
......@@ -124,23 +115,6 @@ TEST(UtilTest, STRINGFUNCTIONS_TEST) {
}
TEST(UtilTest, TIMERECORDER_TEST) {
double span = TestTimeRecorder(TimeUnit::eTimeAutoUnit, 0, 1001);
ASSERT_GT(span, 0.0);
span = TestTimeRecorder(TimeUnit::eTimeAutoUnit, 0, 101);
ASSERT_GT(span, 0.0);
span = TestTimeRecorder(TimeUnit::eTimeHourUnit, 1, 10);
ASSERT_GT(span, 0.0);
span = TestTimeRecorder(TimeUnit::eTimeMinuteUnit, 2, 10);
ASSERT_GT(span, 0.0);
span = TestTimeRecorder(TimeUnit::eTimeSecondUnit, 3, 10);
ASSERT_GT(span, 0.0);
span = TestTimeRecorder(TimeUnit::eTimeMilliSecUnit, 4, 10);
ASSERT_GT(span, 0.0);
span = TestTimeRecorder(TimeUnit::eTimeMicroSecUnit, -1, 10);
ASSERT_GT(span, 0.0);
}
TEST(UtilTest, BLOCKINGQUEUE_TEST) {
server::BlockingQueue<std::string> bq;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册