service.h 2.4 KB
Newer Older
W
wangguibao 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91
#ifndef BAIDU_PADDLE_SERVING_PREDICTOR_SERVICE_H
#define BAIDU_PADDLE_SERVING_PREDICTOR_SERVICE_H

#include "common/inner_common.h"
#include "framework/workflow.h"
#include "framework/merger.h"

namespace baidu {
namespace paddle_serving {
namespace predictor {

class InferService {
public:
    typedef OpChannel<google::protobuf::Message> BuiltinChannel;

    static const char* tag() {
        return "Service";
    }

    InferService() :
            _last_change_timestamp(0),
            _enable_map_request_to_workflow(false),
            _request_field_key(""),
            _merger(NULL) {
        _flows.clear();
        _request_to_workflow_map.clear();
    }

    int init(const comcfg::ConfigUnit& conf);

    int deinit() { return 0; }

    int reload();

    const std::string& name() const;
    
    const std::string& full_name() const {
        return _infer_service_format;
    }

    // 串行执行每个workflow
    virtual int inference(
            const google::protobuf::Message* request,
            google::protobuf::Message* response,
            base::IOBufBuilder* debug_os = NULL);

    int debug(
            const google::protobuf::Message* request,
            google::protobuf::Message* response,
            base::IOBufBuilder* debug_os);

    int execute_one_workflow(
            uint32_t index,
            const google::protobuf::Message* request, 
            google::protobuf::Message* response,
            base::IOBufBuilder* debug_os);
private:
    int _execute_workflow(
            Workflow* workflow,
            const google::protobuf::Message* request, 
            google::protobuf::Message* response,
            base::IOBufBuilder* debug_os);

    std::vector<Workflow*>* _map_request_to_workflow(const google::protobuf::Message* request);

private:
    std::vector<Workflow*> _flows;
    std::string _infer_service_format;
    uint64_t _last_change_timestamp;
    bool _enable_map_request_to_workflow;
    std::string _request_field_key;
    ::base::FlatMap<std::string, std::vector<Workflow*> > _request_to_workflow_map;
    IMerger* _merger;
};

class ParallelInferService : public InferService {
public:
    // 并行执行每个workflow
    int inference(
            const google::protobuf::Message* request,
            google::protobuf::Message* response,
            base::IOBufBuilder* debug_os) {
        return 0;
    }
};

} // predictor
} // paddle_serving
} // baidu

#endif // BAIDU_PADDLE_SERVING_PREDICTOR_INFERSERVICE_H