未验证 提交 579d9597 编写于 作者: M MRXLT 提交者: GitHub

Merge pull request #202 from guru4elephant/refine_log

make warning message of inference engine as VLOG2
......@@ -99,16 +99,16 @@ static void g_change_server_port() {
if (read_proto_conf(FLAGS_inferservice_path.c_str(),
FLAGS_inferservice_file.c_str(),
&conf) != 0) {
LOG(WARNING) << "failed to load configure[" << FLAGS_inferservice_path
<< "," << FLAGS_inferservice_file << "].";
VLOG(2) << "failed to load configure[" << FLAGS_inferservice_path
<< "," << FLAGS_inferservice_file << "].";
return;
}
uint32_t port = conf.port();
if (port != 0) {
FLAGS_port = port;
LOG(INFO) << "use configure[" << FLAGS_inferservice_path << "/"
<< FLAGS_inferservice_file << "] port[" << port
<< "] instead of flags";
VLOG(2) << "use configure[" << FLAGS_inferservice_path << "/"
<< FLAGS_inferservice_file << "] port[" << port
<< "] instead of flags";
}
return;
}
......@@ -157,8 +157,8 @@ int main(int argc, char** argv) {
mkdir(FLAGS_log_dir.c_str(), 0777);
ret = stat(FLAGS_log_dir.c_str(), &st_buf);
if (ret != 0) {
LOG(WARNING) << "Log path " << FLAGS_log_dir
<< " not exist, and create fail";
VLOG(2) << "Log path " << FLAGS_log_dir
<< " not exist, and create fail";
return -1;
}
}
......@@ -166,7 +166,7 @@ int main(int argc, char** argv) {
FLAGS_logbufsecs = 0;
FLAGS_logbuflevel = -1;
#endif
LOG(INFO) << "Succ initialize logger";
VLOG(2) << "Succ initialize logger";
// initialize resource manager
if (Resource::instance().initialize(FLAGS_resource_path,
......@@ -175,7 +175,7 @@ int main(int argc, char** argv) {
<< "/" << FLAGS_resource_file;
return -1;
}
LOG(INFO) << "Succ initialize resource";
VLOG(2) << "Succ initialize resource";
// initialize workflow manager
if (WorkflowManager::instance().initialize(FLAGS_workflow_path,
......@@ -184,7 +184,7 @@ int main(int argc, char** argv) {
<< FLAGS_workflow_path << "/" << FLAGS_workflow_file;
return -1;
}
LOG(INFO) << "Succ initialize workflow";
VLOG(2) << "Succ initialize workflow";
// initialize service manager
if (InferServiceManager::instance().initialize(
......@@ -193,7 +193,7 @@ int main(int argc, char** argv) {
<< FLAGS_inferservice_path << "/" << FLAGS_inferservice_file;
return -1;
}
LOG(INFO) << "Succ initialize inferservice";
VLOG(2) << "Succ initialize inferservice";
int errcode = bthread_set_worker_startfn(pthread_worker_start_fn);
if (errcode != 0) {
......@@ -201,7 +201,7 @@ int main(int argc, char** argv) {
<< errcode << "]";
return -1;
}
LOG(INFO) << "Succ call pthread worker start function";
VLOG(2) << "Succ call pthread worker start function";
if (Resource::instance().cube_initialize(FLAGS_resource_path,
FLAGS_resource_file) != 0) {
......@@ -209,7 +209,7 @@ int main(int argc, char** argv) {
<< FLAGS_resource_file;
return -1;
}
LOG(INFO) << "Succ initialize cube";
VLOG(2) << "Succ initialize cube";
#ifndef BCLOUD
......@@ -220,7 +220,7 @@ int main(int argc, char** argv) {
return -1;
}
LOG(INFO) << "Succ initialize general model";
VLOG(2) << "Succ initialize general model";
// FATAL messages are output to stderr
FLAGS_stderrthreshold = 3;
......@@ -230,7 +230,7 @@ int main(int argc, char** argv) {
LOG(ERROR) << "Failed start server and wait!";
return -1;
}
LOG(INFO) << "Succ start service manager";
VLOG(2) << "Succ start service manager";
if (InferServiceManager::instance().finalize() != 0) {
LOG(ERROR) << "Failed finalize infer service manager.";
......@@ -248,6 +248,6 @@ int main(int argc, char** argv) {
#else
google::ShutdownGoogleLogging();
#endif
LOG(INFO) << "Paddle Inference Server exit successfully!";
VLOG(2) << "Paddle Inference Server exit successfully!";
return 0;
}
......@@ -35,11 +35,11 @@ int StubImpl<T, C, R, I, O>::initialize(const VariantInfo& var,
}
_gchannel = init_channel(var, filter);
LOG(INFO) << "Create stub with tag: " << *tag << ", " << *tag_value
<< ", ep: " << ep;
VLOG(2) << "Create stub with tag: " << *tag << ", " << *tag_value
<< ", ep: " << ep;
} else {
_gchannel = init_channel(var, NULL);
LOG(INFO) << "Create stub without tag, ep " << ep;
VLOG(2) << "Create stub without tag, ep " << ep;
}
if (!_gchannel) {
......@@ -143,7 +143,7 @@ int StubImpl<T, C, R, I, O>::thrd_initialize() {
return -1;
}
LOG(WARNING) << "Succ thread initialize stub impl!";
VLOG(2) << "Succ thread initialize stub impl!";
return 0;
}
......@@ -370,7 +370,7 @@ google::protobuf::RpcChannel* StubImpl<T, C, R, I, O>::init_channel(
// brpc parallel channel
_pchannel = init_pchannel(_channel, _max_channel, _package_size, chn_options);
if (_pchannel) {
LOG(INFO) << "Succ create parallel channel, count: " << _max_channel;
VLOG(2) << "Succ create parallel channel, count: " << _max_channel;
return _pchannel;
}
......@@ -384,21 +384,21 @@ brpc::ParallelChannel* StubImpl<T, C, R, I, O>::init_pchannel(
uint32_t package_size,
const brpc::ChannelOptions& options) {
if (channel_count <= 1) { // noneed use parallel channel
LOG(INFO) << "channel count <= 1, noneed use pchannel.";
VLOG(2) << "channel count <= 1, noneed use pchannel.";
return NULL;
}
_pchannel = butil::get_object<brpc::ParallelChannel>();
if (!_pchannel) {
LOG(FATAL) << "Failed get pchannel from object pool";
VLOG(2) << "Failed get pchannel from object pool";
return NULL;
}
brpc::ParallelChannelOptions pchan_options;
pchan_options.timeout_ms = options.timeout_ms;
if (_pchannel->Init(&pchan_options) != 0) {
LOG(FATAL) << "Failed init parallel channel with tmo_us: "
<< pchan_options.timeout_ms;
VLOG(2) << "Failed init parallel channel with tmo_us: "
<< pchan_options.timeout_ms;
return NULL;
}
......
......@@ -52,9 +52,9 @@ int WeightedRandomRender::initialize(const google::protobuf::Message& conf) {
return -1;
}
LOG(INFO) << "Succ read weights list: " << weights
<< ", count: " << _variant_weight_list.size()
<< ", normalized: " << _normalized_sum;
VLOG(2) << "Succ read weights list: " << weights
<< ", count: " << _variant_weight_list.size()
<< ", normalized: " << _normalized_sum;
} catch (std::bad_cast& e) {
LOG(ERROR) << "Failed init WeightedRandomRender"
<< "from configure, err:" << e.what();
......@@ -87,9 +87,9 @@ Variant* WeightedRandomRender::route(const VariantList& variants) {
for (uint32_t ci = 0; ci < cand_size; ++ci) {
cur_total += _variant_weight_list[ci];
if (sample < cur_total) {
LOG(INFO) << "Sample " << sample << " on " << ci
<< ", _normalized: " << _normalized_sum
<< ", weight: " << _variant_weight_list[ci];
VLOG(2) << "Sample " << sample << " on " << ci
<< ", _normalized: " << _normalized_sum
<< ", weight: " << _variant_weight_list[ci];
return variants[ci];
}
}
......
......@@ -80,8 +80,8 @@ int EndpointConfigManager::load(const std::string& sdk_desc_str) {
LOG(ERROR) << "Failed load configure" << e.what();
return -1;
}
LOG(INFO) << "Success reload endpoint config file, id: "
<< _current_endpointmap_id;
VLOG(2) << "Success reload endpoint config file, id: "
<< _current_endpointmap_id;
return 0;
}
......@@ -128,8 +128,8 @@ int EndpointConfigManager::load() {
LOG(ERROR) << "Failed load configure" << e.what();
return -1;
}
LOG(INFO) << "Success reload endpoint config file, id: "
<< _current_endpointmap_id;
VLOG(2) << "Success reload endpoint config file, id: "
<< _current_endpointmap_id;
return 0;
}
......@@ -181,8 +181,8 @@ int EndpointConfigManager::init_one_endpoint(const configure::Predictor& conf,
return -1;
}
LOG(INFO) << "Succ load one endpoint, name: " << ep.endpoint_name
<< ", count of variants: " << ep.vars.size() << ".";
VLOG(2) << "Succ load one endpoint, name: " << ep.endpoint_name
<< ", count of variants: " << ep.vars.size() << ".";
} catch (std::exception& e) {
LOG(ERROR) << "Exception acccurs when load endpoint conf"
<< ", message: " << e.what();
......@@ -258,7 +258,7 @@ int EndpointConfigManager::merge_variant(const VariantInfo& default_var,
int EndpointConfigManager::parse_tag_values(SplitParameters& split) {
split.tag_values.clear();
if (!split.split_tag.init || !split.tag_cands_str.init) {
LOG(WARNING) << "split info not set, skip...";
VLOG(2) << "split info not set, skip...";
return 0;
}
......
......@@ -35,8 +35,8 @@ int Endpoint::initialize(const EndpointInfo& ep_info) {
return -1;
}
_variant_list.push_back(var);
LOG(INFO) << "Succ create variant: " << vi
<< ", endpoint:" << _endpoint_name;
VLOG(2) << "Succ create variant: " << vi
<< ", endpoint:" << _endpoint_name;
}
return 0;
......@@ -51,7 +51,7 @@ int Endpoint::thrd_initialize() {
return -1;
}
}
LOG(WARNING) << "Succ thrd initialize all vars: " << var_size;
VLOG(2) << "Succ thrd initialize all vars: " << var_size;
return 0;
}
......
......@@ -25,7 +25,7 @@ int PredictorApi::register_all() {
return -1;
}
LOG(WARNING) << "Succ register all components!";
VLOG(2) << "Succ register all components!";
return 0;
}
......@@ -66,8 +66,8 @@ int PredictorApi::create(const std::string & api_desc_str) {
return -1;
}
LOG(INFO) << "Succ create endpoint instance with name: "
<< ep_info.endpoint_name;
VLOG(2) << "Succ create endpoint instance with name: "
<< ep_info.endpoint_name;
}
return 0;
......@@ -101,7 +101,7 @@ int PredictorApi::create(const char* path, const char* file) {
return -1;
}
LOG(INFO) << "endpoint name: " << ep_info.endpoint_name;
VLOG(2) << "endpoint name: " << ep_info.endpoint_name;
std::pair<std::map<std::string, Endpoint*>::iterator, bool> r =
_endpoints.insert(std::make_pair(ep_info.endpoint_name, ep));
......@@ -110,8 +110,8 @@ int PredictorApi::create(const char* path, const char* file) {
return -1;
}
LOG(INFO) << "Succ create endpoint instance with name: "
<< ep_info.endpoint_name;
VLOG(2) << "Succ create endpoint instance with name: "
<< ep_info.endpoint_name;
}
return 0;
......@@ -126,7 +126,7 @@ int PredictorApi::thrd_initialize() {
return -1;
}
LOG(WARNING) << "Succ thrd initialize endpoint:" << it->first;
VLOG(2) << "Succ thrd initialize endpoint:" << it->first;
}
return 0;
}
......@@ -152,7 +152,7 @@ int PredictorApi::thrd_finalize() {
return -1;
}
LOG(INFO) << "Succ thrd finalize endpoint:" << it->first;
VLOG(2) << "Succ thrd finalize endpoint:" << it->first;
}
return 0;
}
......
......@@ -53,7 +53,7 @@ int Variant::initialize(const EndpointInfo& ep_info,
}
if (_stub_map.size() > 0) {
LOG(INFO) << "Initialize variants from VariantInfo"
VLOG(2) << "Initialize variants from VariantInfo"
<< ", stubs count: " << _stub_map.size();
return 0;
}
......@@ -66,7 +66,7 @@ int Variant::initialize(const EndpointInfo& ep_info,
}
_default_stub = stub;
LOG(INFO) << "Succ create default debug";
VLOG(2) << "Succ create default debug";
return 0;
}
......@@ -82,10 +82,10 @@ int Variant::thrd_initialize() {
LOG(ERROR) << "Failed thrd initialize stub: " << iter->first;
return -1;
}
LOG(INFO) << "Succ thrd initialize stub:" << iter->first;
VLOG(2) << "Succ thrd initialize stub:" << iter->first;
}
LOG(WARNING) << "Succ thrd initialize all stubs";
VLOG(2) << "Succ thrd initialize all stubs";
return 0;
}
......
......@@ -138,7 +138,7 @@ class FluidCpuAnalysisCore : public FluidFamilyCore {
return -1;
}
LOG(WARNING) << "create paddle predictor sucess, path: " << data_path;
VLOG(2) << "create paddle predictor sucess, path: " << data_path;
return 0;
}
};
......@@ -169,7 +169,7 @@ class FluidCpuNativeCore : public FluidFamilyCore {
return -1;
}
LOG(WARNING) << "create paddle predictor sucess, path: " << data_path;
VLOG(2) << "create paddle predictor sucess, path: " << data_path;
return 0;
}
};
......@@ -202,7 +202,7 @@ class FluidCpuAnalysisDirCore : public FluidFamilyCore {
return -1;
}
LOG(WARNING) << "create paddle predictor sucess, path: " << data_path;
VLOG(2) << "create paddle predictor sucess, path: " << data_path;
return 0;
}
};
......@@ -231,7 +231,7 @@ class FluidCpuNativeDirCore : public FluidFamilyCore {
return -1;
}
LOG(WARNING) << "create paddle predictor sucess, path: " << data_path;
VLOG(2) << "create paddle predictor sucess, path: " << data_path;
return 0;
}
};
......@@ -240,7 +240,7 @@ class Parameter {
public:
Parameter() : _row(0), _col(0), _params(NULL) {}
~Parameter() {
LOG(INFO) << "before destroy Parameter, file_name[" << _file_name << "]";
VLOG(2) << "before destroy Parameter, file_name[" << _file_name << "]";
destroy();
}
......@@ -254,7 +254,7 @@ class Parameter {
LOG(ERROR) << "Load " << _file_name << " malloc error.";
return -1;
}
LOG(WARNING) << "Load parameter file[" << _file_name << "] success.";
VLOG(2) << "Load parameter file[" << _file_name << "] success.";
return 0;
}
......@@ -296,7 +296,7 @@ class Parameter {
fclose(fs);
fs = NULL;
}
LOG(INFO) << "load " << _file_name << " read ok.";
VLOG(2) << "load " << _file_name << " read ok.";
return 0;
} else {
LOG(ERROR) << "load " << _file_name << " read error.";
......@@ -329,13 +329,13 @@ class SigmoidModel {
LOG(ERROR) << "load params sigmoid_w failed.";
return -1;
}
LOG(WARNING) << "load sigmoid_w [" << _sigmoid_w._params[0] << "] ["
<< _sigmoid_w._params[1] << "].";
VLOG(2) << "load sigmoid_w [" << _sigmoid_w._params[0] << "] ["
<< _sigmoid_w._params[1] << "].";
if (0 != _sigmoid_b.init(2, 1, sigmoid_b_file) || 0 != _sigmoid_b.load()) {
LOG(ERROR) << "load params sigmoid_b failed.";
return -1;
}
LOG(WARNING) << "load sigmoid_b [" << _sigmoid_b._params[0] << "] ["
VLOG(2) << "load sigmoid_b [" << _sigmoid_b._params[0] << "] ["
<< _sigmoid_b._params[1] << "].";
_exp_max_input = exp_max;
_exp_min_input = exp_min;
......@@ -412,8 +412,8 @@ class FluidCpuWithSigmoidCore : public FluidFamilyCore {
float exp_max = conf.exp_max_input();
float exp_min = conf.exp_min_input();
_core->_sigmoid_core.reset(new SigmoidModel);
LOG(INFO) << "create sigmoid core[" << _core->_sigmoid_core.get()
<< "], use count[" << _core->_sigmoid_core.use_count() << "].";
VLOG(2) << "create sigmoid core[" << _core->_sigmoid_core.get()
<< "], use count[" << _core->_sigmoid_core.use_count() << "].";
ret = _core->_sigmoid_core->load(
sigmoid_w_file, sigmoid_b_file, exp_max, exp_min);
if (ret < 0) {
......@@ -444,8 +444,8 @@ class FluidCpuWithSigmoidCore : public FluidFamilyCore {
LOG(ERROR) << "fail to clone paddle predictor: " << origin_core;
return -1;
}
LOG(INFO) << "clone sigmoid core[" << _core->_sigmoid_core.get()
<< "] use count[" << _core->_sigmoid_core.use_count() << "].";
VLOG(2) << "clone sigmoid core[" << _core->_sigmoid_core.get()
<< "] use count[" << _core->_sigmoid_core.use_count() << "].";
return 0;
}
......@@ -487,7 +487,7 @@ class FluidCpuNativeDirWithSigmoidCore : public FluidCpuWithSigmoidCore {
return -1;
}
LOG(WARNING) << "create paddle predictor sucess, path: " << data_path;
VLOG(2) << "create paddle predictor sucess, path: " << data_path;
return 0;
}
};
......@@ -520,7 +520,7 @@ class FluidCpuAnalysisDirWithSigmoidCore : public FluidCpuWithSigmoidCore {
return -1;
}
LOG(WARNING) << "create paddle predictor sucess, path: " << data_path;
VLOG(2) << "create paddle predictor sucess, path: " << data_path;
return 0;
}
};
......
......@@ -143,7 +143,7 @@ class FluidGpuAnalysisCore : public FluidFamilyCore {
return -1;
}
LOG(WARNING) << "create paddle predictor sucess, path: " << data_path;
VLOG(2) << "create paddle predictor sucess, path: " << data_path;
return 0;
}
};
......@@ -173,7 +173,7 @@ class FluidGpuNativeCore : public FluidFamilyCore {
return -1;
}
LOG(WARNING) << "create paddle predictor sucess, path: " << data_path;
VLOG(2) << "create paddle predictor sucess, path: " << data_path;
return 0;
}
};
......@@ -206,7 +206,7 @@ class FluidGpuAnalysisDirCore : public FluidFamilyCore {
return -1;
}
LOG(WARNING) << "create paddle predictor sucess, path: " << data_path;
VLOG(2) << "create paddle predictor sucess, path: " << data_path;
return 0;
}
};
......@@ -235,7 +235,7 @@ class FluidGpuNativeDirCore : public FluidFamilyCore {
return -1;
}
LOG(WARNING) << "create paddle predictor sucess, path: " << data_path;
VLOG(2) << "create paddle predictor sucess, path: " << data_path;
return 0;
}
};
......@@ -258,7 +258,7 @@ class Parameter {
LOG(ERROR) << "Load " << _file_name << " malloc error.";
return -1;
}
LOG(WARNING) << "Load parameter file[" << _file_name << "] success.";
VLOG(2) << "Load parameter file[" << _file_name << "] success.";
return 0;
}
......@@ -333,13 +333,13 @@ class SigmoidModel {
LOG(ERROR) << "load params sigmoid_w failed.";
return -1;
}
LOG(WARNING) << "load sigmoid_w [" << _sigmoid_w._params[0] << "] ["
VLOG(2) << "load sigmoid_w [" << _sigmoid_w._params[0] << "] ["
<< _sigmoid_w._params[1] << "].";
if (0 != _sigmoid_b.init(2, 1, sigmoid_b_file) || 0 != _sigmoid_b.load()) {
LOG(ERROR) << "load params sigmoid_b failed.";
return -1;
}
LOG(WARNING) << "load sigmoid_b [" << _sigmoid_b._params[0] << "] ["
VLOG(2) << "load sigmoid_b [" << _sigmoid_b._params[0] << "] ["
<< _sigmoid_b._params[1] << "].";
_exp_max_input = exp_max;
_exp_min_input = exp_min;
......@@ -491,7 +491,7 @@ class FluidGpuNativeDirWithSigmoidCore : public FluidGpuWithSigmoidCore {
return -1;
}
LOG(WARNING) << "create paddle predictor sucess, path: " << data_path;
VLOG(2) << "create paddle predictor sucess, path: " << data_path;
return 0;
}
};
......@@ -524,7 +524,7 @@ class FluidGpuAnalysisDirWithSigmoidCore : public FluidGpuWithSigmoidCore {
return -1;
}
LOG(WARNING) << "create paddle predictor sucess, path: " << data_path;
VLOG(2) << "create paddle predictor sucess, path: " << data_path;
return 0;
}
};
......
......@@ -32,5 +32,5 @@ for pass_id in range(30):
fetch_list=[avg_loss])
serving_io.save_model(
"serving_server_model", "serving_client_conf",
{"x": x}, {"y": y_predict}, fluid.default_main_program())
"uci_housing_model", "uci_housing_client",
{"x": x}, {"price": y_predict}, fluid.default_main_program())
......@@ -46,18 +46,17 @@ if __name__ == "__main__":
dataset.set_use_var([data, label])
pipe_command = "python imdb_reader.py"
dataset.set_pipe_command(pipe_command)
dataset.set_batch_size(4)
dataset.set_batch_size(128)
dataset.set_filelist(filelist)
dataset.set_thread(10)
from nets import cnn_net
avg_cost, acc, prediction = cnn_net(data, label, dict_dim)
from nets import bow_net
avg_cost, acc, prediction = bow_net(data, label, dict_dim)
optimizer = fluid.optimizer.SGD(learning_rate=0.01)
optimizer.minimize(avg_cost)
exe = fluid.Executor(fluid.CPUPlace())
exe.run(fluid.default_startup_program())
epochs = 6
save_dirname = "cnn_model"
import paddle_serving_client.io as serving_io
......@@ -67,9 +66,5 @@ if __name__ == "__main__":
logger.info("TRAIN --> pass: {}".format(i))
if i == 5:
serving_io.save_model("serving_server_model", "serving_client_conf",
{"words": data,
"label": label}, {
"cost": avg_cost,
"acc": acc,
"prediction": prediction
}, fluid.default_main_program())
{"words": data}, {"prediction": prediction},
fluid.default_main_program())
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册