// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. syntax = "proto2"; option java_multiple_files = true; option java_package = "io.paddle.serving.grpc"; option java_outer_classname = "ServingProto"; message Tensor { optional bytes data = 1; repeated int32 int_data = 2; repeated int64 int64_data = 3; repeated float float_data = 4; optional int32 elem_type = 5; repeated int32 shape = 6; repeated int32 lod = 7; // only for fetch tensor currently }; message FeedInst { repeated Tensor tensor_array = 1; }; message FetchInst { repeated Tensor tensor_array = 1; }; message InferenceRequest { repeated FeedInst insts = 1; repeated string feed_var_names = 2; repeated string fetch_var_names = 3; required bool is_python = 4 [ default = false ]; }; message InferenceResponse { repeated ModelOutput outputs = 1; optional string tag = 2; required int32 err_code = 3; }; message ModelOutput { repeated FetchInst insts = 1; optional string engine_name = 2; } message SetTimeoutRequest { required int32 timeout_ms = 1; } message SimpleResponse { required int32 err_code = 1; } message GetClientConfigRequest {} message GetClientConfigResponse { required string client_config_str = 1; } service MultiLangGeneralModelService { rpc Inference(InferenceRequest) returns (InferenceResponse) {} rpc SetTimeout(SetTimeoutRequest) returns (SimpleResponse) {} rpc GetClientConfig(GetClientConfigRequest) returns (GetClientConfigResponse) {} };