config.yml 1.9 KB
Newer Older
1 2 3 4 5
#worker_num, 最大并发数。当build_dag_each_worker=True时, 框架会创建worker_num个进程,每个进程内构建grpcSever和DAG
##当build_dag_each_worker=False时,框架会设置主线程grpc线程池的max_workers=worker_num
worker_num: 1

#http端口, rpc_port和http_port不允许同时为空。当rpc_port可用且http_port为空时,不自动生成http_port
6
rpc_port: 9998
7 8
http_port: 18082

B
barriery 已提交
9
dag:
10 11
    #op资源类型, True, 为线程模型;False,为进程模型
    is_thread_op: False
T
TeslaZhao 已提交
12 13 14 15

    #tracer
    tracer:
        interval_s: 10
B
barriery 已提交
16 17
op:
    uci:
T
TeslaZhao 已提交
18
        #并发数,is_thread_op=True时,为线程并发;否则为进程并发
T
TeslaZhao 已提交
19
        concurrency: 1
T
TeslaZhao 已提交
20

21
        #当op配置没有server_endpoints时,从local_service_conf读取本地服务配置
B
barriery 已提交
22
        local_service_conf:
23 24

            #uci模型路径
B
barriery 已提交
25
            model_config: uci_housing_model
26

27 28 29 30
            #计算硬件类型: 空缺时由devices决定(CPU/GPU),0=cpu, 1=gpu, 2=tensorRT, 3=arm cpu, 4=kunlun xpu
            device_type: 0

            #计算硬件ID,优先由device_type决定硬件类型。devices为""或空缺时为CPU预测;当为"0", "0,1,2"时为GPU预测,表示使用的GPU卡
31
            devices: "" # "0,1"
32 33 34 35 36

            #client类型,包括brpc, grpc和local_predictor.local_predictor不启动Serving服务,进程内预测
            client_type: local_predictor

            #Fetch结果列表,以client_config中fetch_var的alias_name为准
37 38 39 40 41
            fetch_list: ["price"]

            #precsion, 预测精度,降低预测精度可提升预测速度
            #GPU 支持: "fp32"(default), "fp16", "int8";
            #CPU 支持: "fp32"(default), "fp16", "bf16"(mkldnn); 不支持: "int8"
T
TeslaZhao 已提交
42 43 44 45
            precision: "fp32"

            #ir_optim开关, 默认False
            ir_optim: True 
46

T
TeslaZhao 已提交
47 48
            #use_mkldnn开关, 默认False, use_mkldnn与ir_optim同时打开才有性能提升
            use_mkldnn: True