提交 82f090d6 编写于 作者: F felixhjh

remove lac test case, add fit a line test case

上级 e9278e8a
[{"pid": 21703, "port": [9494], "model": "['uci_housing_model']", "start_time": 1637930597.8987885}]
\ No newline at end of file
serving.instance-hk6cehl7-2.invalid-user.log.INFO.20211126-124318.21847
\ No newline at end of file
serving.instance-hk6cehl7-2.invalid-user.log.WARNING.20211126-124318.21847
\ No newline at end of file
Log file created at: 2021/11/26 12:43:12
Running on machine: instance-hk6cehl7-2
Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg
E1126 12:43:12.788040 21784 cache.cpp:58] invalid cache path uci_housing_model/cube_cache
Log file created at: 2021/11/26 12:43:12
Running on machine: instance-hk6cehl7-2
Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg
W1126 12:43:12.729789 21781 resource.cpp:98] Successfully proc initialized mempool wrapper
I1126 12:43:12.730144 21781 infer.cpp:410] model_toolkit_conf.engines(0).name: general_infer_0
W1126 12:43:12.730182 21781 infer.cpp:43] Succ load model:uci_housing_model
W1126 12:43:12.730197 21781 infer.cpp:235] Succ proc initialize version engine: 18446744073709551615
W1126 12:43:12.730206 21781 infer.cpp:200] Succ proc initialize engine: general_infer_0
W1126 12:43:12.730217 21781 infer.cpp:430] Succ proc initialize engine: general_infer_0
W1126 12:43:12.730257 21781 kv_manager.h:70] general_infer_0:
W1126 12:43:12.730265 21781 kv_manager.h:72] Succ proc initialize kvmanager for engine: general_infer_0
I1126 12:43:12.732128 21781 dag.cpp:158] DAG: workflow1, Op Num: 3
I1126 12:43:12.732151 21781 dag.cpp:161] OP-1-general_reader_0-GeneralReaderOp depends: 0
I1126 12:43:12.732157 21781 dag.cpp:161] OP-2-general_infer_0-GeneralInferOp depends: 1
I1126 12:43:12.732163 21781 dag.cpp:166] general_reader_0 0
I1126 12:43:12.732169 21781 dag.cpp:161] OP-3-general_response_0-GeneralResponseOp depends: 1
I1126 12:43:12.732175 21781 dag.cpp:166] general_infer_0 0
I1126 12:43:12.732182 21781 dag.cpp:169]
I1126 12:43:12.732192 21781 manager.h:86] Succ init item:workflow1 from conf:workdir_9494/workflow.prototxt, at:0!
W1126 12:43:12.732260 21781 service.cpp:50] Succ get merger: default for service: GeneralModelService
I1126 12:43:12.732300 21781 service.cpp:62] service[GeneralModelService], enable_map_request_to_workflow[0].
I1126 12:43:12.732440 21781 predictor_metric.h:170] try to regist latency metric[workflow_GeneralModelService_workflow1].
I1126 12:43:12.732558 21781 predictor_metric.h:175] succ to regist latency metric[workflow_GeneralModelService_workflow1].
I1126 12:43:12.732568 21781 predictor_metric.h:170] try to regist latency metric[stage_GeneralModelService_workflow1_0].
I1126 12:43:12.732621 21781 predictor_metric.h:175] succ to regist latency metric[stage_GeneralModelService_workflow1_0].
I1126 12:43:12.732630 21781 predictor_metric.h:170] try to regist latency metric[op_GeneralModelService_workflow1_0_general_reader_0].
I1126 12:43:12.732689 21781 predictor_metric.h:175] succ to regist latency metric[op_GeneralModelService_workflow1_0_general_reader_0].
I1126 12:43:12.732699 21781 predictor_metric.h:170] try to regist latency metric[stage_GeneralModelService_workflow1_1].
I1126 12:43:12.732753 21781 predictor_metric.h:175] succ to regist latency metric[stage_GeneralModelService_workflow1_1].
I1126 12:43:12.732760 21781 predictor_metric.h:170] try to regist latency metric[op_GeneralModelService_workflow1_1_general_infer_0].
I1126 12:43:12.732811 21781 predictor_metric.h:175] succ to regist latency metric[op_GeneralModelService_workflow1_1_general_infer_0].
I1126 12:43:12.732820 21781 predictor_metric.h:170] try to regist latency metric[stage_GeneralModelService_workflow1_2].
I1126 12:43:12.732887 21781 predictor_metric.h:175] succ to regist latency metric[stage_GeneralModelService_workflow1_2].
I1126 12:43:12.732894 21781 predictor_metric.h:170] try to regist latency metric[op_GeneralModelService_workflow1_2_general_response_0].
I1126 12:43:12.732959 21781 predictor_metric.h:175] succ to regist latency metric[op_GeneralModelService_workflow1_2_general_response_0].
I1126 12:43:12.732968 21781 service.cpp:127] Succ load infer_service: GeneralModelService!
I1126 12:43:12.732975 21781 manager.h:184] Succ init item:GeneralModelService from conf:workdir_9494/infer_service.prototxt, at:0!
I1126 12:43:12.733331 21783 server.cpp:148] Entrence reload worker, interval_s: 10
I1126 12:43:12.733381 21783 server.cpp:151] Begin reload framework...
W1126 12:43:12.733402 21783 infer.cpp:287] Succ reload version engine: 18446744073709551615
I1126 12:43:12.733415 21783 manager.h:131] Finish reload 1 workflow(s)
W1126 12:43:12.736976 21784 memory.cpp:78] Succ thread initialize mempool wrapper
W1126 12:43:12.737040 21784 resource.cpp:302] Successfully thread initialized mempool wrapper
W1126 12:43:12.737097 21785 memory.cpp:78] Succ thread initialize mempool wrapper
W1126 12:43:12.737553 21788 memory.cpp:78] Succ thread initialize mempool wrapper
W1126 12:43:12.737778 21788 resource.cpp:302] Successfully thread initialized mempool wrapper
W1126 12:43:12.737504 21787 memory.cpp:78] Succ thread initialize mempool wrapper
W1126 12:43:12.737922 21791 memory.cpp:78] Succ thread initialize mempool wrapper
W1126 12:43:12.737401 21784 infer.h:431] tid:21784 Loading clone model ...
W1126 12:43:12.737659 21789 memory.cpp:78] Succ thread initialize mempool wrapper
W1126 12:43:12.738379 21789 resource.cpp:302] Successfully thread initialized mempool wrapper
W1126 12:43:12.737917 21790 memory.cpp:78] Succ thread initialize mempool wrapper
W1126 12:43:12.740527 21790 resource.cpp:302] Successfully thread initialized mempool wrapper
W1126 12:43:12.738045 21787 resource.cpp:302] Successfully thread initialized mempool wrapper
W1126 12:43:12.738178 21791 resource.cpp:302] Successfully thread initialized mempool wrapper
W1126 12:43:12.738246 21792 memory.cpp:78] Succ thread initialize mempool wrapper
W1126 12:43:12.742715 21792 resource.cpp:302] Successfully thread initialized mempool wrapper
W1126 12:43:12.737437 21786 memory.cpp:78] Succ thread initialize mempool wrapper
W1126 12:43:12.742803 21786 resource.cpp:302] Successfully thread initialized mempool wrapper
W1126 12:43:12.737576 21785 resource.cpp:302] Successfully thread initialized mempool wrapper
W1126 12:43:12.738313 21793 memory.cpp:78] Succ thread initialize mempool wrapper
W1126 12:43:12.744876 21793 resource.cpp:302] Successfully thread initialized mempool wrapper
W1126 12:43:12.738449 21794 memory.cpp:78] Succ thread initialize mempool wrapper
W1126 12:43:12.744952 21794 resource.cpp:302] Successfully thread initialized mempool wrapper
I1126 12:43:12.785578 21784 analysis_predictor.cc:668] ir_optim is turned off, no IR pass will be executed
I1126 12:43:12.787087 21784 memory_optimize_pass.cc:214] Cluster name : fc_0.tmp_0 size: 4
I1126 12:43:12.787122 21784 memory_optimize_pass.cc:214] Cluster name : x size: 52
I1126 12:43:12.787132 21784 memory_optimize_pass.cc:214] Cluster name : fc_0.tmp_1 size: 4
I1126 12:43:12.787883 21784 analysis_predictor.cc:717] ======= optimize end =======
I1126 12:43:12.787925 21784 naive_executor.cc:98] --- skip [feed], feed -> x
I1126 12:43:12.787981 21784 naive_executor.cc:98] --- skip [fc_0.tmp_1], fetch -> fetch
I1126 12:43:12.788025 21784 cache.cpp:48] cube cache is loading data, path: uci_housing_model/cube_cache
E1126 12:43:12.788040 21784 cache.cpp:58] invalid cache path uci_housing_model/cube_cache
W1126 12:43:12.788201 21784 infer.h:444] create cube cache[0] done.
W1126 12:43:12.788215 21784 infer.h:478] [21784] Reload clone model and cube cache done. switching to current_idx[0]
W1126 12:43:12.788246 21784 infer.cpp:255] Succ thrd initialize version engine: 18446744073709551615
W1126 12:43:12.788259 21784 infer.cpp:441] Succ thrd initialize engine, name: general_infer_0
W1126 12:43:12.788265 21788 infer.h:431] tid:21788 Loading clone model ...
I1126 12:43:12.788548 21788 naive_executor.cc:98] --- skip [feed], feed -> x
I1126 12:43:12.788621 21788 naive_executor.cc:98] --- skip [fc_0.tmp_1], fetch -> fetch
W1126 12:43:12.788641 21788 infer.h:471] tid:21788 clone caches done
W1126 12:43:12.788655 21788 infer.h:478] [21788] Reload clone model and cube cache done. switching to current_idx[0]
W1126 12:43:12.788681 21788 infer.cpp:255] Succ thrd initialize version engine: 18446744073709551615
W1126 12:43:12.788694 21788 infer.cpp:441] Succ thrd initialize engine, name: general_infer_0
W1126 12:43:12.788720 21789 infer.h:431] tid:21789 Loading clone model ...
I1126 12:43:12.790380 21789 naive_executor.cc:98] --- skip [feed], feed -> x
I1126 12:43:12.791141 21789 naive_executor.cc:98] --- skip [fc_0.tmp_1], fetch -> fetch
W1126 12:43:12.791312 21789 infer.h:471] tid:21789 clone caches done
W1126 12:43:12.791709 21789 infer.h:478] [21789] Reload clone model and cube cache done. switching to current_idx[0]
I1126 12:43:12.794636 21781 server.cpp:1046] Server[baidu::paddle_serving::predictor::general_model::GeneralModelServiceImpl] is serving on port=9494.
W1126 12:43:12.794720 21789 infer.cpp:255] Succ thrd initialize version engine: 18446744073709551615
W1126 12:43:12.794885 21789 infer.cpp:441] Succ thrd initialize engine, name: general_infer_0
W1126 12:43:12.794756 21790 infer.h:431] tid:21790 Loading clone model ...
I1126 12:43:12.794817 21781 server.cpp:1049] Check out http://instance-hk6cehl7-2:9494 in web browser.
I1126 12:43:12.796110 21790 naive_executor.cc:98] --- skip [feed], feed -> x
I1126 12:43:12.796280 21790 naive_executor.cc:98] --- skip [fc_0.tmp_1], fetch -> fetch
W1126 12:43:12.796314 21790 infer.h:471] tid:21790 clone caches done
W1126 12:43:12.796952 21790 infer.h:478] [21790] Reload clone model and cube cache done. switching to current_idx[0]
W1126 12:43:12.797003 21790 infer.cpp:255] Succ thrd initialize version engine: 18446744073709551615
W1126 12:43:12.797027 21790 infer.cpp:441] Succ thrd initialize engine, name: general_infer_0
W1126 12:43:12.797030 21787 infer.h:431] tid:21787 Loading clone model ...
I1126 12:43:12.797370 21787 naive_executor.cc:98] --- skip [feed], feed -> x
I1126 12:43:12.797439 21787 naive_executor.cc:98] --- skip [fc_0.tmp_1], fetch -> fetch
W1126 12:43:12.797475 21787 infer.h:471] tid:21787 clone caches done
W1126 12:43:12.797495 21787 infer.h:478] [21787] Reload clone model and cube cache done. switching to current_idx[0]
W1126 12:43:12.797523 21787 infer.cpp:255] Succ thrd initialize version engine: 18446744073709551615
W1126 12:43:12.797538 21787 infer.cpp:441] Succ thrd initialize engine, name: general_infer_0
W1126 12:43:12.797538 21791 infer.h:431] tid:21791 Loading clone model ...
I1126 12:43:12.797962 21791 naive_executor.cc:98] --- skip [feed], feed -> x
I1126 12:43:12.798038 21791 naive_executor.cc:98] --- skip [fc_0.tmp_1], fetch -> fetch
W1126 12:43:12.798058 21791 infer.h:471] tid:21791 clone caches done
W1126 12:43:12.798074 21791 infer.h:478] [21791] Reload clone model and cube cache done. switching to current_idx[0]
W1126 12:43:12.798102 21791 infer.cpp:255] Succ thrd initialize version engine: 18446744073709551615
W1126 12:43:12.798118 21791 infer.cpp:441] Succ thrd initialize engine, name: general_infer_0
W1126 12:43:12.798132 21792 infer.h:431] tid:21792 Loading clone model ...
I1126 12:43:12.798424 21792 naive_executor.cc:98] --- skip [feed], feed -> x
I1126 12:43:12.798504 21792 naive_executor.cc:98] --- skip [fc_0.tmp_1], fetch -> fetch
W1126 12:43:12.798525 21792 infer.h:471] tid:21792 clone caches done
W1126 12:43:12.798542 21792 infer.h:478] [21792] Reload clone model and cube cache done. switching to current_idx[0]
W1126 12:43:12.798570 21792 infer.cpp:255] Succ thrd initialize version engine: 18446744073709551615
W1126 12:43:12.798586 21792 infer.cpp:441] Succ thrd initialize engine, name: general_infer_0
W1126 12:43:12.798593 21786 infer.h:431] tid:21786 Loading clone model ...
I1126 12:43:12.798916 21786 naive_executor.cc:98] --- skip [feed], feed -> x
I1126 12:43:12.798980 21786 naive_executor.cc:98] --- skip [fc_0.tmp_1], fetch -> fetch
W1126 12:43:12.799000 21786 infer.h:471] tid:21786 clone caches done
W1126 12:43:12.799015 21786 infer.h:478] [21786] Reload clone model and cube cache done. switching to current_idx[0]
W1126 12:43:12.799044 21786 infer.cpp:255] Succ thrd initialize version engine: 18446744073709551615
W1126 12:43:12.799059 21786 infer.cpp:441] Succ thrd initialize engine, name: general_infer_0
W1126 12:43:12.799073 21785 infer.h:431] tid:21785 Loading clone model ...
I1126 12:43:12.799361 21785 naive_executor.cc:98] --- skip [feed], feed -> x
I1126 12:43:12.799427 21785 naive_executor.cc:98] --- skip [fc_0.tmp_1], fetch -> fetch
W1126 12:43:12.799448 21785 infer.h:471] tid:21785 clone caches done
W1126 12:43:12.799481 21785 infer.h:478] [21785] Reload clone model and cube cache done. switching to current_idx[0]
W1126 12:43:12.799513 21785 infer.cpp:255] Succ thrd initialize version engine: 18446744073709551615
W1126 12:43:12.799604 21785 infer.cpp:441] Succ thrd initialize engine, name: general_infer_0
W1126 12:43:12.799535 21793 infer.h:431] tid:21793 Loading clone model ...
I1126 12:43:12.799911 21793 naive_executor.cc:98] --- skip [feed], feed -> x
I1126 12:43:12.799966 21793 naive_executor.cc:98] --- skip [fc_0.tmp_1], fetch -> fetch
W1126 12:43:12.799984 21793 infer.h:471] tid:21793 clone caches done
W1126 12:43:12.800000 21793 infer.h:478] [21793] Reload clone model and cube cache done. switching to current_idx[0]
W1126 12:43:12.800026 21793 infer.cpp:255] Succ thrd initialize version engine: 18446744073709551615
W1126 12:43:12.800042 21793 infer.cpp:441] Succ thrd initialize engine, name: general_infer_0
W1126 12:43:12.800058 21794 infer.h:431] tid:21794 Loading clone model ...
I1126 12:43:12.800364 21794 naive_executor.cc:98] --- skip [feed], feed -> x
I1126 12:43:12.800477 21794 naive_executor.cc:98] --- skip [fc_0.tmp_1], fetch -> fetch
W1126 12:43:12.800499 21794 infer.h:471] tid:21794 clone caches done
W1126 12:43:12.800518 21794 infer.h:478] [21794] Reload clone model and cube cache done. switching to current_idx[0]
W1126 12:43:12.800535 21794 infer.cpp:255] Succ thrd initialize version engine: 18446744073709551615
W1126 12:43:12.800550 21794 infer.cpp:441] Succ thrd initialize engine, name: general_infer_0
I1126 12:43:17.148336 21793 general_model_service.pb.cc:3319] (logid=0) remote_side=[127.0.0.1:48280]
I1126 12:43:17.154588 21793 general_model_service.pb.cc:3320] (logid=0) local_side=[127.0.0.1:9494]
I1126 12:43:17.154615 21793 general_model_service.pb.cc:3321] (logid=0) service_name=[GeneralModelService]
I1126 12:43:17.159812 21793 op.cpp:164] (logid=0) general_reader_0_time=[2091]
I1126 12:43:17.160607 21793 op.cpp:164] (logid=0) general_infer_0_time=[684]
I1126 12:43:17.160679 21793 op.cpp:164] (logid=0) general_response_0_time=[23]
I1126 12:43:17.160709 21793 service.cpp:263] (logid=0) workflow total time: 6057
I1126 12:43:17.160810 21793 general_model_service.pb.cc:3343] [serving]logid=0,cost=12.418ms.
Log file created at: 2021/11/26 12:43:18
Running on machine: instance-hk6cehl7-2
Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg
W1126 12:43:18.049825 21847 resource.cpp:98] Successfully proc initialized mempool wrapper
I1126 12:43:18.050318 21847 infer.cpp:410] model_toolkit_conf.engines(0).name: general_infer_0
W1126 12:43:18.050359 21847 infer.cpp:43] Succ load model:uci_housing_model
W1126 12:43:18.050374 21847 infer.cpp:235] Succ proc initialize version engine: 18446744073709551615
W1126 12:43:18.050384 21847 infer.cpp:200] Succ proc initialize engine: general_infer_0
W1126 12:43:18.050395 21847 infer.cpp:430] Succ proc initialize engine: general_infer_0
W1126 12:43:18.050432 21847 kv_manager.h:70] general_infer_0:
W1126 12:43:18.050443 21847 kv_manager.h:72] Succ proc initialize kvmanager for engine: general_infer_0
I1126 12:43:18.052297 21847 dag.cpp:158] DAG: workflow1, Op Num: 3
I1126 12:43:18.052320 21847 dag.cpp:161] OP-1-general_reader_0-GeneralReaderOp depends: 0
I1126 12:43:18.052327 21847 dag.cpp:161] OP-2-general_infer_0-GeneralInferOp depends: 1
I1126 12:43:18.052345 21847 dag.cpp:166] general_reader_0 0
I1126 12:43:18.052359 21847 dag.cpp:161] OP-3-general_response_0-GeneralResponseOp depends: 1
I1126 12:43:18.052369 21847 dag.cpp:166] general_infer_0 0
I1126 12:43:18.052377 21847 dag.cpp:169]
I1126 12:43:18.052388 21847 manager.h:86] Succ init item:workflow1 from conf:workdir_9494/workflow.prototxt, at:0!
W1126 12:43:18.052489 21847 service.cpp:50] Succ get merger: default for service: GeneralModelService
I1126 12:43:18.052532 21847 service.cpp:62] service[GeneralModelService], enable_map_request_to_workflow[0].
I1126 12:43:18.052670 21847 predictor_metric.h:170] try to regist latency metric[workflow_GeneralModelService_workflow1].
I1126 12:43:18.052788 21847 predictor_metric.h:175] succ to regist latency metric[workflow_GeneralModelService_workflow1].
I1126 12:43:18.052799 21847 predictor_metric.h:170] try to regist latency metric[stage_GeneralModelService_workflow1_0].
I1126 12:43:18.052850 21847 predictor_metric.h:175] succ to regist latency metric[stage_GeneralModelService_workflow1_0].
I1126 12:43:18.052873 21847 predictor_metric.h:170] try to regist latency metric[op_GeneralModelService_workflow1_0_general_reader_0].
I1126 12:43:18.052927 21847 predictor_metric.h:175] succ to regist latency metric[op_GeneralModelService_workflow1_0_general_reader_0].
I1126 12:43:18.052937 21847 predictor_metric.h:170] try to regist latency metric[stage_GeneralModelService_workflow1_1].
I1126 12:43:18.052989 21847 predictor_metric.h:175] succ to regist latency metric[stage_GeneralModelService_workflow1_1].
I1126 12:43:18.052996 21847 predictor_metric.h:170] try to regist latency metric[op_GeneralModelService_workflow1_1_general_infer_0].
I1126 12:43:18.053045 21847 predictor_metric.h:175] succ to regist latency metric[op_GeneralModelService_workflow1_1_general_infer_0].
I1126 12:43:18.053056 21847 predictor_metric.h:170] try to regist latency metric[stage_GeneralModelService_workflow1_2].
I1126 12:43:18.053120 21847 predictor_metric.h:175] succ to regist latency metric[stage_GeneralModelService_workflow1_2].
I1126 12:43:18.053128 21847 predictor_metric.h:170] try to regist latency metric[op_GeneralModelService_workflow1_2_general_response_0].
I1126 12:43:18.053182 21847 predictor_metric.h:175] succ to regist latency metric[op_GeneralModelService_workflow1_2_general_response_0].
I1126 12:43:18.053193 21847 service.cpp:127] Succ load infer_service: GeneralModelService!
I1126 12:43:18.053201 21847 manager.h:184] Succ init item:GeneralModelService from conf:workdir_9494/infer_service.prototxt, at:0!
I1126 12:43:18.053576 21849 server.cpp:148] Entrence reload worker, interval_s: 10
I1126 12:43:18.053624 21849 server.cpp:151] Begin reload framework...
W1126 12:43:18.053643 21849 infer.cpp:287] Succ reload version engine: 18446744073709551615
I1126 12:43:18.053654 21849 manager.h:131] Finish reload 1 workflow(s)
W1126 12:43:18.057180 21850 memory.cpp:78] Succ thread initialize mempool wrapper
W1126 12:43:18.057214 21850 resource.cpp:302] Successfully thread initialized mempool wrapper
W1126 12:43:18.057308 21851 memory.cpp:78] Succ thread initialize mempool wrapper
W1126 12:43:18.057515 21850 infer.h:431] tid:21850 Loading clone model ...
W1126 12:43:18.057585 21853 memory.cpp:78] Succ thread initialize mempool wrapper
W1126 12:43:18.057629 21851 resource.cpp:302] Successfully thread initialized mempool wrapper
W1126 12:43:18.057780 21852 memory.cpp:78] Succ thread initialize mempool wrapper
W1126 12:43:18.057869 21854 memory.cpp:78] Succ thread initialize mempool wrapper
W1126 12:43:18.058123 21855 memory.cpp:78] Succ thread initialize mempool wrapper
W1126 12:43:18.058228 21855 resource.cpp:302] Successfully thread initialized mempool wrapper
W1126 12:43:18.057819 21853 resource.cpp:302] Successfully thread initialized mempool wrapper
W1126 12:43:18.058167 21854 resource.cpp:302] Successfully thread initialized mempool wrapper
W1126 12:43:18.058022 21852 resource.cpp:302] Successfully thread initialized mempool wrapper
W1126 12:43:18.058274 21856 memory.cpp:78] Succ thread initialize mempool wrapper
W1126 12:43:18.058522 21857 memory.cpp:78] Succ thread initialize mempool wrapper
W1126 12:43:18.058579 21856 resource.cpp:302] Successfully thread initialized mempool wrapper
W1126 12:43:18.058593 21858 memory.cpp:78] Succ thread initialize mempool wrapper
W1126 12:43:18.058630 21857 resource.cpp:302] Successfully thread initialized mempool wrapper
W1126 12:43:18.058743 21858 resource.cpp:302] Successfully thread initialized mempool wrapper
W1126 12:43:18.059038 21859 memory.cpp:78] Succ thread initialize mempool wrapper
W1126 12:43:18.059110 21860 memory.cpp:78] Succ thread initialize mempool wrapper
W1126 12:43:18.062522 21860 resource.cpp:302] Successfully thread initialized mempool wrapper
W1126 12:43:18.060531 21859 resource.cpp:302] Successfully thread initialized mempool wrapper
Log file created at: 2021/11/26 12:43:12
Running on machine: instance-hk6cehl7-2
Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg
W1126 12:43:12.729789 21781 resource.cpp:98] Successfully proc initialized mempool wrapper
W1126 12:43:12.730182 21781 infer.cpp:43] Succ load model:uci_housing_model
W1126 12:43:12.730197 21781 infer.cpp:235] Succ proc initialize version engine: 18446744073709551615
W1126 12:43:12.730206 21781 infer.cpp:200] Succ proc initialize engine: general_infer_0
W1126 12:43:12.730217 21781 infer.cpp:430] Succ proc initialize engine: general_infer_0
W1126 12:43:12.730257 21781 kv_manager.h:70] general_infer_0:
W1126 12:43:12.730265 21781 kv_manager.h:72] Succ proc initialize kvmanager for engine: general_infer_0
W1126 12:43:12.732260 21781 service.cpp:50] Succ get merger: default for service: GeneralModelService
W1126 12:43:12.733402 21783 infer.cpp:287] Succ reload version engine: 18446744073709551615
W1126 12:43:12.736976 21784 memory.cpp:78] Succ thread initialize mempool wrapper
W1126 12:43:12.737040 21784 resource.cpp:302] Successfully thread initialized mempool wrapper
W1126 12:43:12.737097 21785 memory.cpp:78] Succ thread initialize mempool wrapper
W1126 12:43:12.737553 21788 memory.cpp:78] Succ thread initialize mempool wrapper
W1126 12:43:12.737778 21788 resource.cpp:302] Successfully thread initialized mempool wrapper
W1126 12:43:12.737504 21787 memory.cpp:78] Succ thread initialize mempool wrapper
W1126 12:43:12.737922 21791 memory.cpp:78] Succ thread initialize mempool wrapper
W1126 12:43:12.737401 21784 infer.h:431] tid:21784 Loading clone model ...
W1126 12:43:12.737659 21789 memory.cpp:78] Succ thread initialize mempool wrapper
W1126 12:43:12.738379 21789 resource.cpp:302] Successfully thread initialized mempool wrapper
W1126 12:43:12.737917 21790 memory.cpp:78] Succ thread initialize mempool wrapper
W1126 12:43:12.740527 21790 resource.cpp:302] Successfully thread initialized mempool wrapper
W1126 12:43:12.738045 21787 resource.cpp:302] Successfully thread initialized mempool wrapper
W1126 12:43:12.738178 21791 resource.cpp:302] Successfully thread initialized mempool wrapper
W1126 12:43:12.738246 21792 memory.cpp:78] Succ thread initialize mempool wrapper
W1126 12:43:12.742715 21792 resource.cpp:302] Successfully thread initialized mempool wrapper
W1126 12:43:12.737437 21786 memory.cpp:78] Succ thread initialize mempool wrapper
W1126 12:43:12.742803 21786 resource.cpp:302] Successfully thread initialized mempool wrapper
W1126 12:43:12.737576 21785 resource.cpp:302] Successfully thread initialized mempool wrapper
W1126 12:43:12.738313 21793 memory.cpp:78] Succ thread initialize mempool wrapper
W1126 12:43:12.744876 21793 resource.cpp:302] Successfully thread initialized mempool wrapper
W1126 12:43:12.738449 21794 memory.cpp:78] Succ thread initialize mempool wrapper
W1126 12:43:12.744952 21794 resource.cpp:302] Successfully thread initialized mempool wrapper
E1126 12:43:12.788040 21784 cache.cpp:58] invalid cache path uci_housing_model/cube_cache
W1126 12:43:12.788201 21784 infer.h:444] create cube cache[0] done.
W1126 12:43:12.788215 21784 infer.h:478] [21784] Reload clone model and cube cache done. switching to current_idx[0]
W1126 12:43:12.788246 21784 infer.cpp:255] Succ thrd initialize version engine: 18446744073709551615
W1126 12:43:12.788259 21784 infer.cpp:441] Succ thrd initialize engine, name: general_infer_0
W1126 12:43:12.788265 21788 infer.h:431] tid:21788 Loading clone model ...
W1126 12:43:12.788641 21788 infer.h:471] tid:21788 clone caches done
W1126 12:43:12.788655 21788 infer.h:478] [21788] Reload clone model and cube cache done. switching to current_idx[0]
W1126 12:43:12.788681 21788 infer.cpp:255] Succ thrd initialize version engine: 18446744073709551615
W1126 12:43:12.788694 21788 infer.cpp:441] Succ thrd initialize engine, name: general_infer_0
W1126 12:43:12.788720 21789 infer.h:431] tid:21789 Loading clone model ...
W1126 12:43:12.791312 21789 infer.h:471] tid:21789 clone caches done
W1126 12:43:12.791709 21789 infer.h:478] [21789] Reload clone model and cube cache done. switching to current_idx[0]
W1126 12:43:12.794720 21789 infer.cpp:255] Succ thrd initialize version engine: 18446744073709551615
W1126 12:43:12.794885 21789 infer.cpp:441] Succ thrd initialize engine, name: general_infer_0
W1126 12:43:12.794756 21790 infer.h:431] tid:21790 Loading clone model ...
W1126 12:43:12.796314 21790 infer.h:471] tid:21790 clone caches done
W1126 12:43:12.796952 21790 infer.h:478] [21790] Reload clone model and cube cache done. switching to current_idx[0]
W1126 12:43:12.797003 21790 infer.cpp:255] Succ thrd initialize version engine: 18446744073709551615
W1126 12:43:12.797027 21790 infer.cpp:441] Succ thrd initialize engine, name: general_infer_0
W1126 12:43:12.797030 21787 infer.h:431] tid:21787 Loading clone model ...
W1126 12:43:12.797475 21787 infer.h:471] tid:21787 clone caches done
W1126 12:43:12.797495 21787 infer.h:478] [21787] Reload clone model and cube cache done. switching to current_idx[0]
W1126 12:43:12.797523 21787 infer.cpp:255] Succ thrd initialize version engine: 18446744073709551615
W1126 12:43:12.797538 21787 infer.cpp:441] Succ thrd initialize engine, name: general_infer_0
W1126 12:43:12.797538 21791 infer.h:431] tid:21791 Loading clone model ...
W1126 12:43:12.798058 21791 infer.h:471] tid:21791 clone caches done
W1126 12:43:12.798074 21791 infer.h:478] [21791] Reload clone model and cube cache done. switching to current_idx[0]
W1126 12:43:12.798102 21791 infer.cpp:255] Succ thrd initialize version engine: 18446744073709551615
W1126 12:43:12.798118 21791 infer.cpp:441] Succ thrd initialize engine, name: general_infer_0
W1126 12:43:12.798132 21792 infer.h:431] tid:21792 Loading clone model ...
W1126 12:43:12.798525 21792 infer.h:471] tid:21792 clone caches done
W1126 12:43:12.798542 21792 infer.h:478] [21792] Reload clone model and cube cache done. switching to current_idx[0]
W1126 12:43:12.798570 21792 infer.cpp:255] Succ thrd initialize version engine: 18446744073709551615
W1126 12:43:12.798586 21792 infer.cpp:441] Succ thrd initialize engine, name: general_infer_0
W1126 12:43:12.798593 21786 infer.h:431] tid:21786 Loading clone model ...
W1126 12:43:12.799000 21786 infer.h:471] tid:21786 clone caches done
W1126 12:43:12.799015 21786 infer.h:478] [21786] Reload clone model and cube cache done. switching to current_idx[0]
W1126 12:43:12.799044 21786 infer.cpp:255] Succ thrd initialize version engine: 18446744073709551615
W1126 12:43:12.799059 21786 infer.cpp:441] Succ thrd initialize engine, name: general_infer_0
W1126 12:43:12.799073 21785 infer.h:431] tid:21785 Loading clone model ...
W1126 12:43:12.799448 21785 infer.h:471] tid:21785 clone caches done
W1126 12:43:12.799481 21785 infer.h:478] [21785] Reload clone model and cube cache done. switching to current_idx[0]
W1126 12:43:12.799513 21785 infer.cpp:255] Succ thrd initialize version engine: 18446744073709551615
W1126 12:43:12.799604 21785 infer.cpp:441] Succ thrd initialize engine, name: general_infer_0
W1126 12:43:12.799535 21793 infer.h:431] tid:21793 Loading clone model ...
W1126 12:43:12.799984 21793 infer.h:471] tid:21793 clone caches done
W1126 12:43:12.800000 21793 infer.h:478] [21793] Reload clone model and cube cache done. switching to current_idx[0]
W1126 12:43:12.800026 21793 infer.cpp:255] Succ thrd initialize version engine: 18446744073709551615
W1126 12:43:12.800042 21793 infer.cpp:441] Succ thrd initialize engine, name: general_infer_0
W1126 12:43:12.800058 21794 infer.h:431] tid:21794 Loading clone model ...
W1126 12:43:12.800499 21794 infer.h:471] tid:21794 clone caches done
W1126 12:43:12.800518 21794 infer.h:478] [21794] Reload clone model and cube cache done. switching to current_idx[0]
W1126 12:43:12.800535 21794 infer.cpp:255] Succ thrd initialize version engine: 18446744073709551615
W1126 12:43:12.800550 21794 infer.cpp:441] Succ thrd initialize engine, name: general_infer_0
Log file created at: 2021/11/26 12:43:18
Running on machine: instance-hk6cehl7-2
Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg
W1126 12:43:18.049825 21847 resource.cpp:98] Successfully proc initialized mempool wrapper
W1126 12:43:18.050359 21847 infer.cpp:43] Succ load model:uci_housing_model
W1126 12:43:18.050374 21847 infer.cpp:235] Succ proc initialize version engine: 18446744073709551615
W1126 12:43:18.050384 21847 infer.cpp:200] Succ proc initialize engine: general_infer_0
W1126 12:43:18.050395 21847 infer.cpp:430] Succ proc initialize engine: general_infer_0
W1126 12:43:18.050432 21847 kv_manager.h:70] general_infer_0:
W1126 12:43:18.050443 21847 kv_manager.h:72] Succ proc initialize kvmanager for engine: general_infer_0
W1126 12:43:18.052489 21847 service.cpp:50] Succ get merger: default for service: GeneralModelService
W1126 12:43:18.053643 21849 infer.cpp:287] Succ reload version engine: 18446744073709551615
W1126 12:43:18.057180 21850 memory.cpp:78] Succ thread initialize mempool wrapper
W1126 12:43:18.057214 21850 resource.cpp:302] Successfully thread initialized mempool wrapper
W1126 12:43:18.057308 21851 memory.cpp:78] Succ thread initialize mempool wrapper
W1126 12:43:18.057515 21850 infer.h:431] tid:21850 Loading clone model ...
W1126 12:43:18.057585 21853 memory.cpp:78] Succ thread initialize mempool wrapper
W1126 12:43:18.057629 21851 resource.cpp:302] Successfully thread initialized mempool wrapper
W1126 12:43:18.057780 21852 memory.cpp:78] Succ thread initialize mempool wrapper
W1126 12:43:18.057869 21854 memory.cpp:78] Succ thread initialize mempool wrapper
W1126 12:43:18.058123 21855 memory.cpp:78] Succ thread initialize mempool wrapper
W1126 12:43:18.058228 21855 resource.cpp:302] Successfully thread initialized mempool wrapper
W1126 12:43:18.057819 21853 resource.cpp:302] Successfully thread initialized mempool wrapper
W1126 12:43:18.058167 21854 resource.cpp:302] Successfully thread initialized mempool wrapper
W1126 12:43:18.058022 21852 resource.cpp:302] Successfully thread initialized mempool wrapper
W1126 12:43:18.058274 21856 memory.cpp:78] Succ thread initialize mempool wrapper
W1126 12:43:18.058522 21857 memory.cpp:78] Succ thread initialize mempool wrapper
W1126 12:43:18.058579 21856 resource.cpp:302] Successfully thread initialized mempool wrapper
W1126 12:43:18.058593 21858 memory.cpp:78] Succ thread initialize mempool wrapper
W1126 12:43:18.058630 21857 resource.cpp:302] Successfully thread initialized mempool wrapper
W1126 12:43:18.058743 21858 resource.cpp:302] Successfully thread initialized mempool wrapper
W1126 12:43:18.059038 21859 memory.cpp:78] Succ thread initialize mempool wrapper
W1126 12:43:18.059110 21860 memory.cpp:78] Succ thread initialize mempool wrapper
W1126 12:43:18.062522 21860 resource.cpp:302] Successfully thread initialized mempool wrapper
W1126 12:43:18.060531 21859 resource.cpp:302] Successfully thread initialized mempool wrapper
I0100 00:00:00.000000 21847 op_repository.h:68] RAW: Succ regist op: GeneralDistKVInferOp
I0100 00:00:00.000000 21847 op_repository.h:68] RAW: Succ regist op: GeneralDistKVQuantInferOp
I0100 00:00:00.000000 21847 op_repository.h:68] RAW: Succ regist op: GeneralInferOp
I0100 00:00:00.000000 21847 op_repository.h:68] RAW: Succ regist op: GeneralReaderOp
I0100 00:00:00.000000 21847 op_repository.h:68] RAW: Succ regist op: GeneralResponseOp
I0100 00:00:00.000000 21847 service_manager.h:79] RAW: Service[LoadGeneralModelService] insert successfully!
I0100 00:00:00.000000 21847 load_general_model_service.pb.h:333] RAW: Success regist service[LoadGeneralModelService][PN5baidu14paddle_serving9predictor26load_general_model_service27LoadGeneralModelServiceImplE]
I0100 00:00:00.000000 21847 service_manager.h:79] RAW: Service[GeneralModelService] insert successfully!
I0100 00:00:00.000000 21847 general_model_service.pb.h:1608] RAW: Success regist service[GeneralModelService][PN5baidu14paddle_serving9predictor13general_model23GeneralModelServiceImplE]
I0100 00:00:00.000000 21847 factory.h:155] RAW: Succ insert one factory, tag: PADDLE_INFER, base type N5baidu14paddle_serving9predictor11InferEngineE
W0100 00:00:00.000000 21847 paddle_engine.cpp:29] RAW: Succ regist factory: ::baidu::paddle_serving::predictor::FluidInferEngine<PaddleInferenceEngine>->::baidu::paddle_serving::predictor::InferEngine, tag: PADDLE_INFER in macro!
I1126 12:43:19.723743 21850 analysis_predictor.cc:668] ir_optim is turned off, no IR pass will be executed
--- Running analysis [ir_graph_build_pass]
--- Running analysis [ir_graph_clean_pass]
--- Running analysis [ir_analysis_pass]
--- Running analysis [ir_params_sync_among_devices_pass]
I1126 12:43:19.724848 21850 ir_params_sync_among_devices_pass.cc:45] Sync params from CPU to GPU
--- Running analysis [adjust_cudnn_workspace_size_pass]
--- Running analysis [inference_op_replace_pass]
--- Running analysis [memory_optimize_pass]
I1126 12:43:19.725744 21850 memory_optimize_pass.cc:214] Cluster name : fc_0.tmp_0 size: 4
I1126 12:43:19.725759 21850 memory_optimize_pass.cc:214] Cluster name : x size: 52
I1126 12:43:19.725764 21850 memory_optimize_pass.cc:214] Cluster name : fc_0.tmp_1 size: 4
--- Running analysis [ir_graph_to_program_pass]
I1126 12:43:19.726503 21850 analysis_predictor.cc:717] ======= optimize end =======
I1126 12:43:19.726533 21850 naive_executor.cc:98] --- skip [feed], feed -> x
I1126 12:43:19.726585 21850 naive_executor.cc:98] --- skip [fc_0.tmp_1], fetch -> fetch
I1126 12:43:19.726626 21850 cache.cpp:48] cube cache is loading data, path: uci_housing_model/cube_cache
E1126 12:43:19.726637 21850 cache.cpp:58] invalid cache path uci_housing_model/cube_cache
W1126 12:43:19.726644 21850 infer.h:444] create cube cache[0] done.
W1126 12:43:19.726651 21850 infer.h:478] [21850] Reload clone model and cube cache done. switching to current_idx[0]
W1126 12:43:19.726673 21850 infer.cpp:255] Succ thrd initialize version engine: 18446744073709551615
W1126 12:43:19.726680 21850 infer.cpp:441] Succ thrd initialize engine, name: general_infer_0
W1126 12:43:19.726723 21851 infer.h:431] tid:21851 Loading clone model ...
I1126 12:43:19.727079 21851 naive_executor.cc:98] --- skip [feed], feed -> x
I1126 12:43:19.727178 21851 naive_executor.cc:98] --- skip [fc_0.tmp_1], fetch -> fetch
W1126 12:43:19.727200 21851 infer.h:471] tid:21851 clone caches done
W1126 12:43:19.727206 21851 infer.h:478] [21851] Reload clone model and cube cache done. switching to current_idx[0]
W1126 12:43:19.727234 21851 infer.cpp:255] Succ thrd initialize version engine: 18446744073709551615
W1126 12:43:19.727242 21851 infer.cpp:441] Succ thrd initialize engine, name: general_infer_0
W1126 12:43:19.727262 21855 infer.h:431] tid:21855 Loading clone model ...
I1126 12:43:19.727557 21855 naive_executor.cc:98] --- skip [feed], feed -> x
I1126 12:43:19.727631 21855 naive_executor.cc:98] --- skip [fc_0.tmp_1], fetch -> fetch
W1126 12:43:19.727648 21855 infer.h:471] tid:21855 clone caches done
W1126 12:43:19.727671 21855 infer.h:478] [21855] Reload clone model and cube cache done. switching to current_idx[0]
W1126 12:43:19.727696 21855 infer.cpp:255] Succ thrd initialize version engine: 18446744073709551615
W1126 12:43:19.727705 21855 infer.cpp:441] Succ thrd initialize engine, name: general_infer_0
W1126 12:43:19.727728 21853 infer.h:431] tid:21853 Loading clone model ...
I1126 12:43:19.728101 21853 naive_executor.cc:98] --- skip [feed], feed -> x
I1126 12:43:19.728168 21853 naive_executor.cc:98] --- skip [fc_0.tmp_1], fetch -> fetch
W1126 12:43:19.728184 21853 infer.h:471] tid:21853 clone caches done
W1126 12:43:19.728190 21853 infer.h:478] [21853] Reload clone model and cube cache done. switching to current_idx[0]
W1126 12:43:19.728214 21853 infer.cpp:255] Succ thrd initialize version engine: 18446744073709551615
W1126 12:43:19.728220 21853 infer.cpp:441] Succ thrd initialize engine, name: general_infer_0
W1126 12:43:19.728250 21854 infer.h:431] tid:21854 Loading clone model ...
I1126 12:43:19.728526 21854 naive_executor.cc:98] --- skip [feed], feed -> x
I1126 12:43:19.728596 21854 naive_executor.cc:98] --- skip [fc_0.tmp_1], fetch -> fetch
W1126 12:43:19.728618 21854 infer.h:471] tid:21854 clone caches done
W1126 12:43:19.728628 21854 infer.h:478] [21854] Reload clone model and cube cache done. switching to current_idx[0]
W1126 12:43:19.728655 21854 infer.cpp:255] Succ thrd initialize version engine: 18446744073709551615
W1126 12:43:19.728662 21854 infer.cpp:441] Succ thrd initialize engine, name: general_infer_0
W1126 12:43:19.728684 21852 infer.h:431] tid:21852 Loading clone model ...
I1126 12:43:19.729209 21852 naive_executor.cc:98] --- skip [feed], feed -> x
I1126 12:43:19.729300 21852 naive_executor.cc:98] --- skip [fc_0.tmp_1], fetch -> fetch
W1126 12:43:19.729322 21852 infer.h:471] tid:21852 clone caches done
W1126 12:43:19.729333 21852 infer.h:478] [21852] Reload clone model and cube cache done. switching to current_idx[0]
W1126 12:43:19.729365 21852 infer.cpp:255] Succ thrd initialize version engine: 18446744073709551615
W1126 12:43:19.729377 21852 infer.cpp:441] Succ thrd initialize engine, name: general_infer_0
W1126 12:43:19.729384 21856 infer.h:431] tid:21856 Loading clone model ...
I1126 12:43:19.729621 21856 naive_executor.cc:98] --- skip [feed], feed -> x
I1126 12:43:19.729724 21856 naive_executor.cc:98] --- skip [fc_0.tmp_1], fetch -> fetch
W1126 12:43:19.729740 21856 infer.h:471] tid:21856 clone caches done
W1126 12:43:19.729746 21856 infer.h:478] [21856] Reload clone model and cube cache done. switching to current_idx[0]
W1126 12:43:19.729766 21856 infer.cpp:255] Succ thrd initialize version engine: 18446744073709551615
W1126 12:43:19.729773 21856 infer.cpp:441] Succ thrd initialize engine, name: general_infer_0
W1126 12:43:19.729795 21857 infer.h:431] tid:21857 Loading clone model ...
I1126 12:43:19.730116 21857 naive_executor.cc:98] --- skip [feed], feed -> x
I1126 12:43:19.730198 21857 naive_executor.cc:98] --- skip [fc_0.tmp_1], fetch -> fetch
W1126 12:43:19.730221 21857 infer.h:471] tid:21857 clone caches done
W1126 12:43:19.730230 21857 infer.h:478] [21857] Reload clone model and cube cache done. switching to current_idx[0]
W1126 12:43:19.730262 21857 infer.cpp:255] Succ thrd initialize version engine: 18446744073709551615
W1126 12:43:19.730271 21857 infer.cpp:441] Succ thrd initialize engine, name: general_infer_0
W1126 12:43:19.730280 21858 infer.h:431] tid:21858 Loading clone model ...
I1126 12:43:19.730530 21858 naive_executor.cc:98] --- skip [feed], feed -> x
I1126 12:43:19.730592 21858 naive_executor.cc:98] --- skip [fc_0.tmp_1], fetch -> fetch
W1126 12:43:19.730605 21858 infer.h:471] tid:21858 clone caches done
W1126 12:43:19.730612 21858 infer.h:478] [21858] Reload clone model and cube cache done. switching to current_idx[0]
W1126 12:43:19.730633 21858 infer.cpp:255] Succ thrd initialize version engine: 18446744073709551615
W1126 12:43:19.730639 21858 infer.cpp:441] Succ thrd initialize engine, name: general_infer_0
W1126 12:43:19.730651 21860 infer.h:431] tid:21860 Loading clone model ...
I1126 12:43:19.730875 21860 naive_executor.cc:98] --- skip [feed], feed -> x
I1126 12:43:19.730924 21860 naive_executor.cc:98] --- skip [fc_0.tmp_1], fetch -> fetch
W1126 12:43:19.730938 21860 infer.h:471] tid:21860 clone caches done
W1126 12:43:19.730944 21860 infer.h:478] [21860] Reload clone model and cube cache done. switching to current_idx[0]
W1126 12:43:19.730963 21860 infer.cpp:255] Succ thrd initialize version engine: 18446744073709551615
W1126 12:43:19.730970 21860 infer.cpp:441] Succ thrd initialize engine, name: general_infer_0
W1126 12:43:19.730998 21859 infer.h:431] tid:21859 Loading clone model ...
I1126 12:43:19.731278 21859 naive_executor.cc:98] --- skip [feed], feed -> x
I1126 12:43:19.731353 21859 naive_executor.cc:98] --- skip [fc_0.tmp_1], fetch -> fetch
W1126 12:43:19.731369 21859 infer.h:471] tid:21859 clone caches done
W1126 12:43:19.731375 21859 infer.h:478] [21859] Reload clone model and cube cache done. switching to current_idx[0]
W1126 12:43:19.731385 21859 infer.cpp:255] Succ thrd initialize version engine: 18446744073709551615
W1126 12:43:19.731392 21859 infer.cpp:441] Succ thrd initialize engine, name: general_infer_0
I1126 12:43:19.732892 21847 server.cpp:1046] Server[baidu::paddle_serving::predictor::general_model::GeneralModelServiceImpl] is serving on port=9494.
I1126 12:43:19.732928 21847 server.cpp:1049] Check out http://instance-hk6cehl7-2:9494 in web browser.
I1126 12:43:22.426546 21855 general_model_service.pb.cc:3319] (logid=0) remote_side=[127.0.0.1:48336]
I1126 12:43:22.426600 21855 general_model_service.pb.cc:3320] (logid=0) local_side=[127.0.0.1:9494]
I1126 12:43:22.426609 21855 general_model_service.pb.cc:3321] (logid=0) service_name=[GeneralModelService]
I1126 12:43:22.426609 21855 op_repository.h:68] RAW: Succ regist op: GeneralReaderOp
I1126 12:43:22.426609 21855 op_repository.h:68] RAW: Succ regist op: GeneralInferOp
I1126 12:43:22.426609 21855 op_repository.h:68] RAW: Succ regist op: GeneralResponseOp
I1126 12:43:22.431190 21855 op.cpp:164] (logid=0) general_reader_0_time=[1856]
W1126 12:43:22.431320 21855 device_context.cc:447] Please NOTE: device: 0, GPU Compute Capability: 6.1, Driver API Version: 10.1, Runtime API Version: 10.1
W1126 12:43:22.431545 21855 device_context.cc:465] device: 0, cuDNN Version: 7.6.
I1126 12:43:24.988032 21855 op.cpp:164] (logid=0) general_infer_0_time=[2556771]
I1126 12:43:24.988127 21855 op.cpp:164] (logid=0) general_response_0_time=[26]
I1126 12:43:24.988148 21855 service.cpp:263] (logid=0) workflow total time: 2561515
I1126 12:43:24.988231 21855 general_model_service.pb.cc:3343] [serving]logid=0,cost=2561.65ms.
Killed
/usr/local/lib/python3.6/runpy.py:125: RuntimeWarning: 'paddle_serving_server.serve' found in sys.modules after import of package 'paddle_serving_server', but prior to execution of 'paddle_serving_server.serve'; this may result in unpredictable behaviour
warn(RuntimeWarning(msg))
Going to Run Comand
/usr/local/lib/python3.6/site-packages/paddle_serving_server/serving-gpu-101-0.0.0/serving -enable_model_toolkit -inferservice_path workdir_9494 -inferservice_file infer_service.prototxt -max_concurrency 0 -num_threads 10 -port 9494 -precision fp32 -use_calib=False -reload_interval_s 10 -resource_path workdir_9494 -resource_file resource.prototxt -workflow_path workdir_9494 -workflow_file workflow.prototxt -bthread_concurrency 10 -max_body_size 536870912
feed_var {
name: "x"
alias_name: "x"
is_lod_tensor: false
feed_type: 1
shape: 13
}
fetch_var {
name: "fc_0.tmp_1"
alias_name: "price"
is_lod_tensor: false
fetch_type: 1
shape: 1
}
feed_var {
name: "x"
alias_name: "x"
is_lod_tensor: false
feed_type: 1
shape: 13
}
fetch_var {
name: "fc_0.tmp_1"
alias_name: "price"
is_lod_tensor: false
fetch_type: 1
shape: 1
}
feed_var {
name: "x"
alias_name: "x"
is_lod_tensor: false
feed_type: 1
shape: 13
}
fetch_var {
name: "fc_0.tmp_1"
alias_name: "price"
is_lod_tensor: false
fetch_type: 1
shape: 1
}
engines {
name: "general_infer_0"
type: "PADDLE_INFER"
reloadable_meta: "uci_housing_model/fluid_time_file"
reloadable_type: "timestamp_ne"
model_dir: "uci_housing_model"
gpu_ids: 0
enable_memory_optimization: true
enable_ir_optimization: false
use_trt: false
use_lite: false
use_xpu: false
use_gpu: true
combined_model: false
gpu_multi_stream: false
runtime_thread_num: 0
batch_infer_size: 32
enable_overrun: false
allow_split_request: true
}
port: 9494
services {
name: "GeneralModelService"
workflows: "workflow1"
}
model_toolkit_path: "workdir_9494"
model_toolkit_file: "general_infer_0/model_toolkit.prototxt"
general_model_path: "workdir_9494"
general_model_file: "general_infer_0/general_model.prototxt"
workflows {
name: "workflow1"
workflow_type: "Sequence"
nodes {
name: "general_reader_0"
type: "GeneralReaderOp"
}
nodes {
name: "general_infer_0"
type: "GeneralInferOp"
dependencies {
name: "general_reader_0"
mode: "RO"
}
}
nodes {
name: "general_response_0"
type: "GeneralResponseOp"
dependencies {
name: "general_infer_0"
mode: "RO"
}
}
}
feed_var {
name: "word"
alias_name: "words"
is_lod_tensor: true
feed_type: 0
shape: -1
}
fetch_var {
name: "crf_decoding_0.tmp_0"
alias_name: "crf_decode"
is_lod_tensor: true
fetch_type: 0
shape: -1
}
我沒有心
我沒有真實的自我
我只有消瘦的臉孔
所謂軟弱
所謂的順從一向是我
的座右銘
而我
沒有那海洋的寬闊
我只要熱情的撫摸
所謂空洞
所謂不安全感是我
的墓誌銘
而你
是否和我一般怯懦
是否和我一般矯作
和我一般囉唆
而你
是否和我一般退縮
是否和我一般肌迫
一般地困惑
我沒有力
我沒有滿腔的熱火
我只有滿肚的如果
所謂勇氣
所謂的認同感是我
隨便說說
而你
是否和我一般怯懦
是否和我一般矯作
是否對你來說
只是一場遊戲
雖然沒有把握
而你
是否和我一般退縮
是否和我一般肌迫
是否對你來說
只是逼不得已
雖然沒有藉口
\ No newline at end of file
 
、 ,
。 .
— -
~ ~
‖ |
… .
‘ '
’ '
“ "
” "
〔 (
〕 )
〈 <
〉 >
「 '
」 '
『 "
』 "
〖 [
〗 ]
【 [
】 ]
∶ :
$ $
! !
" "
# #
% %
& &
' '
( (
) )
* *
+ +
, ,
- -
. .
/ /
0 0
1 1
2 2
3 3
4 4
5 5
6 6
7 7
8 8
9 9
: :
; ;
< <
= =
> >
? ?
@ @
A a
B b
C c
D d
E e
F f
G g
H h
I i
J j
K k
L l
M m
N n
O o
P p
Q q
R r
S s
T t
U u
V v
W w
X x
Y y
Z z
[ [
\ \
] ]
^ ^
_ _
` `
a a
b b
c c
d d
e e
f f
g g
h h
i i
j j
k k
l l
m m
n n
o o
p p
q q
r r
s s
t t
u u
v v
w w
x x
y y
z z
{ {
| |
} }
 ̄ ~
〝 "
〞 "
﹐ ,
﹑ ,
﹒ .
﹔ ;
﹕ :
﹖ ?
﹗ !
﹙ (
﹚ )
﹛ {
﹜ {
﹝ [
﹞ ]
﹟ #
﹠ &
﹡ *
﹢ +
﹣ -
﹤ <
﹥ >
﹦ =
﹨ \
﹩ $
﹪ %
﹫ @
,
A a
B b
C c
D d
E e
F f
G g
H h
I i
J j
K k
L l
M m
N n
O o
P p
Q q
R r
S s
T t
U u
V v
W w
X x
Y y
Z z
0 a-B
1 a-I
2 ad-B
3 ad-I
4 an-B
5 an-I
6 c-B
7 c-I
8 d-B
9 d-I
10 f-B
11 f-I
12 m-B
13 m-I
14 n-B
15 n-I
16 nr-B
17 nr-I
18 ns-B
19 ns-I
20 nt-B
21 nt-I
22 nw-B
23 nw-I
24 nz-B
25 nz-I
26 p-B
27 p-I
28 q-B
29 q-I
30 r-B
31 r-I
32 s-B
33 s-I
34 t-B
35 t-I
36 u-B
37 u-I
38 v-B
39 v-I
40 vd-B
41 vd-I
42 vn-B
43 vn-I
44 w-B
45 w-I
46 xc-B
47 xc-I
48 PER-B
49 PER-I
50 LOC-B
51 LOC-I
52 ORG-B
53 ORG-I
54 TIME-B
55 TIME-I
56 O
云计算 5
李小福 2 nr
创新办 3 i
easy_install 3 eng
好用 300
韩玉赏鉴 3 nz
八一双鹿 3 nz
台中
凱特琳 nz
Edu Trust认证 2000
feed_var {
name: "word"
alias_name: "words"
is_lod_tensor: true
feed_type: 0
shape: -1
}
fetch_var {
name: "crf_decoding_0.tmp_0"
alias_name: "crf_decode"
is_lod_tensor: true
fetch_type: 0
shape: -1
}
......@@ -2,7 +2,7 @@ import pytest
import sys
import os
cpp_test_cases = ["test_lac.py::TestLAC::test_cpu", "test_lac.py::TestLAC::test_gpu"]
cpp_test_cases = ["test_fit_a_line.py::TestFitALine::test_cpu", "test_fit_a_line.py::TestFitALine::test_gpu"]
pipeline_test_cases = ["test_uci_pipeline.py::TestUCIPipeline::test_cpu", "test_uci_pipeline.py::TestUCIPipeline::test_gpu"]
def run_test_cases(cases_list, case_type):
......
import os
import subprocess
import numpy as np
import copy
import cv2
import re
import sys
from paddle_serving_client import Client
from paddle_serving_client.httpclient import HttpClient
from paddle_serving_client.io import inference_model_to_serving
from paddle_serving_app.reader import SegPostprocess
from paddle_serving_app.reader import *
import paddle.inference as paddle_infer
from util import *
class TestFitALine(object):
def setup_class(self):
serving_util = ServingTest(data_path="fit_a_line", example_path="fit_a_line", model_dir="uci_housing_model",
client_dir="uci_housing_client")
serving_util.check_model_data_exist()
self.get_truth_val_by_inference(self)
self.serving_util = serving_util
def teardown_method(self):
print_log(["stderr.log", "stdout.log",
"log/serving.ERROR", "PipelineServingLogs/pipeline.log"], iden="after predict")
kill_process(9494)
self.serving_util.release()
def get_truth_val_by_inference(self):
data = np.array(
[0.0137, -0.1136, 0.2553, -0.0692, 0.0582, -0.0727, -0.1583, -0.0584, 0.6283, 0.4919, 0.1856, 0.0795,
-0.0332]).astype("float32")[np.newaxis, :]
input_dict = {"x": data}
pd_config = paddle_infer.Config("uci_housing_model/")
pd_config.disable_gpu()
pd_config.switch_ir_optim(False)
predictor = paddle_infer.create_predictor(pd_config)
input_names = predictor.get_input_names()
for i, input_name in enumerate(input_names):
input_handle = predictor.get_input_handle(input_name)
input_handle.copy_from_cpu(input_dict[input_name])
predictor.run()
output_data_dict = {}
output_names = predictor.get_output_names()
for _, output_data_name in enumerate(output_names):
output_handle = predictor.get_output_handle(output_data_name)
output_data = output_handle.copy_to_cpu()
output_data_dict[output_data_name] = output_data
# 对齐Serving output
print(output_data_dict)
output_data_dict["price"] = output_data_dict["fc_0.tmp_1"]
del output_data_dict["fc_0.tmp_1"]
self.truth_val = output_data_dict
print(self.truth_val, self.truth_val["price"].shape)
def predict_brpc(self, batch_size=1):
data = np.array(
[0.0137, -0.1136, 0.2553, -0.0692, 0.0582, -0.0727, -0.1583, -0.0584, 0.6283, 0.4919, 0.1856, 0.0795,
-0.0332]).astype("float32")[np.newaxis, :]
client = Client()
client.load_client_config("uci_housing_client/serving_client_conf.prototxt")
client.connect(["127.0.0.1:9494"])
fetch_list = client.get_fetch_names()
fetch_map = client.predict(
feed={"x": data}, fetch=fetch_list, batch=True)
print(fetch_map)
return fetch_map
def predict_http(self, batch_size=1):
data = np.array(
[0.0137, -0.1136, 0.2553, -0.0692, 0.0582, -0.0727, -0.1583, -0.0584, 0.6283, 0.4919, 0.1856, 0.0795,
-0.0332]).astype("float32")[np.newaxis, :]
client = HttpClient()
client.load_client_config("uci_housing_client/serving_client_conf.prototxt")
client.connect(["127.0.0.1:9494"])
fetch_list = client.get_fetch_names()
fetch_map = client.predict(
feed={"x": data}, fetch=fetch_list, batch=True)
print(fetch_map)
return fetch_map
def test_cpu(self):
# 1.start server
self.serving_util.start_server_by_shell(
cmd=f"{self.serving_util.py_version} -m paddle_serving_server.serve --model uci_housing_model --thread 10 --port 9494",
sleep=5,
)
# 2.resource check
assert count_process_num_on_port(9494) == 1
# assert check_gpu_memory(0) is False
# 3.keywords check
# 4.predict by brpc
# batch_size 1
result_data = self.predict_brpc()
self.serving_util.check_result(result_data=result_data, truth_data=self.truth_val, batch_size=1)
# 5.release
kill_process(9494)
def test_gpu(self):
# 1.start server
self.serving_util.start_server_by_shell(
cmd=f"{self.serving_util.py_version} -m paddle_serving_server.serve --model uci_housing_model --thread 10 --port 9494 --gpu_ids 0",
sleep=5,
)
# 2.resource check
assert count_process_num_on_port(9494) == 1
# assert check_gpu_memory(0) is False
# 3.keywords check
# 4.predict by brpc
# batch_size 1
result_data = self.predict_brpc()
self.serving_util.check_result(result_data=result_data, truth_data=self.truth_val, batch_size=1)
# 5.release
kill_process(9494)
if __name__ == '__main__':
sss = TestCPPClient()
sss.get_truth_val_by_inference()
import os
import subprocess
import numpy as np
import copy
import cv2
import sys
from paddle_serving_client import Client, HttpClient
from paddle_serving_app.reader import LACReader
import paddle.inference as paddle_infer
from util import *
class TestLAC(object):
def setup_class(self):
serving_util = ServingTest(data_path="lac", example_path="lac", model_dir="lac_model",
client_dir="lac_client")
serving_util.check_model_data_exist()
self.get_truth_val_by_inference(self)
self.serving_util = serving_util
def teardown_method(self):
print_log(["stderr.log", "stdout.log",
"log/serving.ERROR", "PipelineServingLogs/pipeline.log"], iden="after predict")
kill_process(9293)
self.serving_util.release()
def get_truth_val_by_inference(self):
reader = LACReader()
line = "我爱北京天安门"
feed_data = reader.process(line)
input_dict = {
"word": np.array(feed_data + feed_data).reshape(len(feed_data) * 2, 1),
"word.lod": [0, len(feed_data), 2 * len(feed_data)]
}
pd_config = paddle_infer.Config("lac_model")
pd_config.disable_gpu()
pd_config.switch_ir_optim(False)
predictor = paddle_infer.create_predictor(pd_config)
input_names = predictor.get_input_names()
for i, input_name in enumerate(input_names):
input_handle = predictor.get_input_handle(input_name)
# 设置变长tensor
input_handle.set_lod([input_dict[f"{input_name}.lod"]])
input_handle.copy_from_cpu(input_dict[input_name])
predictor.run()
output_data_dict = {}
output_names = predictor.get_output_names()
for _, output_data_name in enumerate(output_names):
output_handle = predictor.get_output_handle(output_data_name)
output_data = output_handle.copy_to_cpu()
output_data_dict[output_data_name] = output_data
# 对齐Serving output
output_data_dict["crf_decode"] = output_data_dict["save_infer_model/scale_0"]
del output_data_dict["save_infer_model/scale_0"]
self.truth_val = output_data_dict
print(self.truth_val, self.truth_val["crf_decode"].shape)
def predict_brpc(self, batch_size=2):
reader = LACReader()
line = "我爱北京天安门"
feed_data = reader.process(line)
feed_dict = {
"words": np.array(feed_data + feed_data).reshape(len(feed_data) * 2, 1),
"words.lod": [0, len(feed_data), 2 * len(feed_data)]
}
fetch = ["crf_decode"]
endpoint_list = ['127.0.0.1:9293']
client = Client()
client.load_client_config(self.serving_util.client_config)
client.connect(endpoint_list)
fetch_map = client.predict(feed=feed_dict, fetch=fetch, batch=True)
print(fetch_map)
return fetch_map
def predict_http(self, mode="proto", compress=False, batch_size=2):
reader = LACReader()
line = "我爱北京天安门"
feed_data = reader.process(line)
feed_dict = {
"words": np.array(feed_data + feed_data).reshape(len(feed_data) * 2, 1),
"words.lod": [0, len(feed_data), 2 * len(feed_data)]
}
fetch = ["crf_decode"]
client = HttpClient()
client.load_client_config(self.serving_util.client_config)
if mode == "proto":
client.set_http_proto(True)
elif mode == "json":
client.set_http_proto(False)
elif mode == "grpc":
client.set_use_grpc_client(True)
else:
exit(-1)
if compress:
client.set_response_compress(True)
client.set_request_compress(True)
client.connect(["127.0.0.1:9293"])
fetch_map = client.predict(feed=feed_dict, fetch=fetch, batch=True)
result_dict = {}
print(fetch_map)
if isinstance(fetch_map, dict):
for tensor in fetch_map["outputs"][0]["tensor"]:
result_dict[tensor["alias_name"]] = np.array(tensor["int64_data"]).reshape(tensor["shape"])
else:
for tensor in fetch_map.outputs[0].tensor:
result_dict[tensor.alias_name] = np.array(tensor.int64_data).reshape(tensor.shape)
print(result_dict)
return result_dict
def test_cpu(self):
# 1.start server
self.serving_util.start_server_by_shell(
cmd=f"{self.serving_util.py_version} -m paddle_serving_server.serve --model lac_model --port 9293",
sleep=5,
)
# 2.resource check
#assert count_process_num_on_port(9293) == 1
#assert check_gpu_memory(2) is False
# 3.keywords check
# 4.predict by brpc
# batch_size 2
result_data = self.predict_brpc(batch_size=2)
# 删除lod信息
del result_data["crf_decode.lod"]
self.serving_util.check_result(result_data=result_data, truth_data=self.truth_val, batch_size=1, delta=1)
# predict by http
# batch_size 2
result_data = self.predict_http(mode="proto", batch_size=2)
self.serving_util.check_result(result_data=result_data, truth_data=self.truth_val, batch_size=1, delta=1)
result_data = self.predict_http(mode="json", batch_size=1)
self.serving_util.check_result(result_data=result_data, truth_data=self.truth_val, batch_size=1, delta=1)
result_data = self.predict_http(mode="grpc", batch_size=1)
self.serving_util.check_result(result_data=result_data, truth_data=self.truth_val, batch_size=1, delta=1)
# # compress
result_data = self.predict_http(mode="proto", compress=True, batch_size=1)
self.serving_util.check_result(result_data=result_data, truth_data=self.truth_val, batch_size=1, delta=1)
result_data = self.predict_http(mode="json", compress=True, batch_size=1)
self.serving_util.check_result(result_data=result_data, truth_data=self.truth_val, batch_size=1, delta=1)
# 5.release
kill_process(9293)
def test_gpu(self):
# 1.start server
self.serving_util.start_server_by_shell(
cmd=f"{self.serving_util.py_version} -m paddle_serving_server.serve --model lac_model --port 9293 --gpu_ids 0",
sleep=8,
)
# 2.resource check
assert count_process_num_on_port(9293) == 1
#assert check_gpu_memory(3) is True
#assert check_gpu_memory(1) is False
# 3.keywords check
check_keywords_in_server_log("Sync params from CPU to GPU", filename="stderr.log")
# 4.predict by brpc
# batch_size 2
result_data = self.predict_brpc(batch_size=2)
# 删除lod信息
del result_data["crf_decode.lod"]
self.serving_util.check_result(result_data=result_data, truth_data=self.truth_val, batch_size=1, delta=1)
# predict by http
# batch_size 2
result_data = self.predict_http(mode="proto", batch_size=2)
self.serving_util.check_result(result_data=result_data, truth_data=self.truth_val, batch_size=1, delta=1)
result_data = self.predict_http(mode="json", batch_size=1)
self.serving_util.check_result(result_data=result_data, truth_data=self.truth_val, batch_size=1, delta=1)
result_data = self.predict_http(mode="grpc", batch_size=1)
self.serving_util.check_result(result_data=result_data, truth_data=self.truth_val, batch_size=1, delta=1)
# # compress
result_data = self.predict_http(mode="proto", compress=True, batch_size=1)
self.serving_util.check_result(result_data=result_data, truth_data=self.truth_val, batch_size=1, delta=1)
result_data = self.predict_http(mode="json", compress=True, batch_size=1)
self.serving_util.check_result(result_data=result_data, truth_data=self.truth_val, batch_size=1, delta=1)
# 5.release
kill_process(9293, 2)
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册