int64tensor_echo_op.cpp 1.6 KB
Newer Older
W
wangguibao 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

G
guru4elephant 已提交
15
#include "examples/demo-serving/op/int64tensor_echo_op.h"
W
wangguibao 已提交
16 17 18 19 20 21 22 23 24 25

namespace baidu {
namespace paddle_serving {
namespace predictor {

using baidu::paddle_serving::predictor::format::Float32TensorPredictor;
using baidu::paddle_serving::predictor::int64tensor_service::Request;
using baidu::paddle_serving::predictor::int64tensor_service::Response;

int Int64TensorEchoOp::inference() {
W
wangguibao 已提交
26 27 28 29 30 31 32 33 34 35 36 37 38
  const Request* req = dynamic_cast<const Request*>(get_request_message());
  Response* res = mutable_data<Response>();
  LOG(INFO) << "Receive request in dense service:" << req->ShortDebugString();
  uint32_t sample_size = req->instances_size();
  for (uint32_t si = 0; si < sample_size; si++) {
    Float32TensorPredictor* float32_tensor_res =
        res->mutable_predictions()->Add();
    float32_tensor_res->add_data(1.0);
    float32_tensor_res->add_data(2.0);
    float32_tensor_res->add_shape(2);
    float32_tensor_res->add_shape(1);
  }
  return 0;
W
wangguibao 已提交
39 40 41 42
}

DEFINE_OP(Int64TensorEchoOp);

W
wangguibao 已提交
43 44 45
}  // namespace predictor
}  // namespace paddle_serving
}  // namespace baidu