general_infer_op.cpp 2.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

M
MRXLT 已提交
15
#include "core/general-server/op/general_infer_op.h"
16 17 18 19 20 21 22
#include <algorithm>
#include <iostream>
#include <memory>
#include <sstream>
#include "core/predictor/framework/infer.h"
#include "core/predictor/framework/memory.h"
#include "core/predictor/framework/resource.h"
23
#include "core/util/include/timer.h"
24 25 26 27 28

namespace baidu {
namespace paddle_serving {
namespace serving {

29
using baidu::paddle_serving::Timer;
30 31 32
using baidu::paddle_serving::predictor::MempoolWrapper;
using baidu::paddle_serving::predictor::general_model::Tensor;
using baidu::paddle_serving::predictor::general_model::Response;
33
using baidu::paddle_serving::predictor::general_model::Request;
34 35
using baidu::paddle_serving::predictor::general_model::FetchInst;
using baidu::paddle_serving::predictor::InferManager;
36
using baidu::paddle_serving::predictor::PaddleGeneralModelConfig;
37 38

int GeneralInferOp::inference() {
M
MRXLT 已提交
39
  const GeneralBlob *input_blob = get_depend_argument<GeneralBlob>(pre_name());
40

M
MRXLT 已提交
41
  GeneralBlob *output_blob = mutable_data<GeneralBlob>();
42

43
  if (!input_blob) {
M
MRXLT 已提交
44
    LOG(ERROR) << "Failed mutable depended argument, op:" << pre_name();
45 46 47
    return -1;
  }

48
  const TensorVector *in = &input_blob->tensor_vector;
49
  TensorVector *out = &output_blob->tensor_vector;
50
  int batch_size = input_blob->GetBatchSize();
M
MRXLT 已提交
51
  output_blob->SetBatchSize(batch_size);
52 53

  VLOG(2) << "infer batch size: " << batch_size;
54
  // infer
M
MRXLT 已提交
55 56 57
  // Timer timeline;
  // double infer_time = 0.0;
  // timeline.Start();
58 59 60 61
  if (InferManager::instance().infer(GENERAL_MODEL_NAME, in, out, batch_size)) {
    LOG(ERROR) << "Failed do infer in fluid model: " << GENERAL_MODEL_NAME;
    return -1;
  }
M
MRXLT 已提交
62 63
  // timeline.Pause();
  // infer_time = timeline.ElapsedUS();
64 65 66 67 68 69 70
  return 0;
}
DEFINE_OP(GeneralInferOp);

}  // namespace serving
}  // namespace paddle_serving
}  // namespace baidu