提交 447d970d 编写于 作者: T TeslaZhao

Update confict

上级 e90079ae
......@@ -55,7 +55,7 @@ int CubeCache::reload_data(const std::string& cache_path) {
// loading data from cache files
if (stat(cache_path.c_str(), &st) < 0 || !S_ISDIR(st.st_mode)) {
LOG(ERROR) << "invalid cache path " << cache_path;
LOG(WARNING) << "No cube cache directory " << cache_path << " provided, ignore it";
return -1;
}
if ((dp = opendir(cache_path.c_str())) == nullptr) {
......
......@@ -96,6 +96,10 @@ int ServerManager::start_and_wait() {
LOG(ERROR) << "Failed to start Paddle Inference Server";
return -1;
}
std::cout << "C++ Serving service started successfully!" << std::endl;
LOG(INFO) << "C++ Serving service started successfully!";
_server.RunUntilAskedToQuit();
ServerManager::stop_reloader();
......
......@@ -233,7 +233,7 @@ def serve_args():
"--request_cache_size",
type=int,
default=0,
help="Port of the Prometheus")
help="Max request cache size")
parser.add_argument(
"--use_dist_model",
default=False,
......@@ -274,11 +274,6 @@ def serve_args():
default=False,
action="store_true",
help="The master serving of distributed inference")
parser.add_argument(
"--request_cache_size",
type=int,
default=0,
help="Port of the Prometheus")
parser.add_argument(
"--min_subgraph_size",
type=str,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册