general_infer_op.cpp 2.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

M
MRXLT 已提交
15
#include "core/general-server/op/general_infer_op.h"
16 17 18 19 20 21 22
#include <algorithm>
#include <iostream>
#include <memory>
#include <sstream>
#include "core/predictor/framework/infer.h"
#include "core/predictor/framework/memory.h"
#include "core/predictor/framework/resource.h"
23
#include "core/util/include/timer.h"
24 25 26 27 28

namespace baidu {
namespace paddle_serving {
namespace serving {

29
using baidu::paddle_serving::Timer;
30 31 32
using baidu::paddle_serving::predictor::MempoolWrapper;
using baidu::paddle_serving::predictor::general_model::Tensor;
using baidu::paddle_serving::predictor::general_model::Response;
33
using baidu::paddle_serving::predictor::general_model::Request;
34 35
using baidu::paddle_serving::predictor::general_model::FetchInst;
using baidu::paddle_serving::predictor::InferManager;
36
using baidu::paddle_serving::predictor::PaddleGeneralModelConfig;
37 38

int GeneralInferOp::inference() {
39
  VLOG(2) << "Going to run inference";
M
MRXLT 已提交
40
  const GeneralBlob *input_blob = get_depend_argument<GeneralBlob>(pre_name());
41
  VLOG(2) << "Get precedent op name: " << pre_name();
M
MRXLT 已提交
42
  GeneralBlob *output_blob = mutable_data<GeneralBlob>();
43

44
  if (!input_blob) {
M
MRXLT 已提交
45
    LOG(ERROR) << "Failed mutable depended argument, op:" << pre_name();
46 47 48
    return -1;
  }

49
  const TensorVector *in = &input_blob->tensor_vector;
50
  TensorVector *out = &output_blob->tensor_vector;
51
  int batch_size = input_blob->GetBatchSize();
52 53
  VLOG(2) << "input batch size: " << batch_size;

M
MRXLT 已提交
54
  output_blob->SetBatchSize(batch_size);
55 56

  VLOG(2) << "infer batch size: " << batch_size;
M
MRXLT 已提交
57

58
  Timer timeline;
G
guru4elephant 已提交
59
  int64_t start = timeline.TimeStampUS();
60
  timeline.Start();
M
MRXLT 已提交
61

62 63 64 65
  if (InferManager::instance().infer(GENERAL_MODEL_NAME, in, out, batch_size)) {
    LOG(ERROR) << "Failed do infer in fluid model: " << GENERAL_MODEL_NAME;
    return -1;
  }
G
guru4elephant 已提交
66

G
guru4elephant 已提交
67 68 69 70
  int64_t end = timeline.TimeStampUS();
  CopyBlobInfo(input_blob, output_blob);
  AddBlobInfo(output_blob, start);
  AddBlobInfo(output_blob, end);
71 72 73 74 75 76 77
  return 0;
}
DEFINE_OP(GeneralInferOp);

}  // namespace serving
}  // namespace paddle_serving
}  // namespace baidu