ctr_prediction_op.cpp 7.9 KB
Newer Older
W
wangguibao 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "demo-serving/op/ctr_prediction_op.h"
#include <algorithm>
W
wangguibao 已提交
17
#include <string>
X
xulongteng 已提交
18
#include "cube/cube-api/include/cube_api.h"
W
wangguibao 已提交
19 20 21 22 23 24 25 26 27 28 29 30 31
#include "predictor/framework/infer.h"
#include "predictor/framework/memory.h"

namespace baidu {
namespace paddle_serving {
namespace serving {

using baidu::paddle_serving::predictor::MempoolWrapper;
using baidu::paddle_serving::predictor::ctr_prediction::CTRResInstance;
using baidu::paddle_serving::predictor::ctr_prediction::Response;
using baidu::paddle_serving::predictor::ctr_prediction::CTRReqInstance;
using baidu::paddle_serving::predictor::ctr_prediction::Request;

W
wangguibao 已提交
32 33
const int VARIABLE_NAME_LEN = 256;

W
wangguibao 已提交
34 35 36 37 38 39 40 41 42 43 44
// Total 26 sparse input + 1 dense input
const int CTR_PREDICTION_INPUT_SLOTS = 27;

// First 26: sparse input
const int CTR_PREDICTION_SPARSE_SLOTS = 26;

// Last 1: dense input
const int CTR_PREDICTION_DENSE_SLOT_ID = 26;
const int CTR_PREDICTION_DENSE_DIM = 13;
const int CTR_PREDICTION_EMBEDDING_SIZE = 10;

X
xulongteng 已提交
45 46
// dict name
const char dict_name[] = "dict";
W
wangguibao 已提交
47

W
wangguibao 已提交
48 49 50 51 52 53 54 55 56 57 58 59
void fill_response_with_message(Response *response,
                                int err_code,
                                std::string err_msg) {
  if (response == NULL) {
    LOG(ERROR) << "response is NULL";
    return;
  }

  response->set_err_code(err_code);
  response->set_err_msg(err_msg);
  return;
}
W
wangguibao 已提交
60 61 62 63 64

int CTRPredictionOp::inference() {
  const Request *req = dynamic_cast<const Request *>(get_request_message());

  TensorVector *in = butil::get_object<TensorVector>();
W
wangguibao 已提交
65 66
  Response *res = mutable_data<Response>();

W
wangguibao 已提交
67 68 69
  uint32_t sample_size = req->instances_size();
  if (sample_size <= 0) {
    LOG(WARNING) << "No instances need to inference!";
W
wangguibao 已提交
70
    fill_response_with_message(res, -1, "Sample size invalid");
W
wangguibao 已提交
71
    return 0;
W
wangguibao 已提交
72 73 74
  }

  paddle::PaddleTensor lod_tensors[CTR_PREDICTION_INPUT_SLOTS];
W
wangguibao 已提交
75
  for (int i = 0; i < CTR_PREDICTION_INPUT_SLOTS; ++i) {
W
wangguibao 已提交
76 77 78 79 80 81
    lod_tensors[i].dtype = paddle::PaddleDType::FLOAT32;
    std::vector<std::vector<size_t>> &lod = lod_tensors[i].lod;
    lod.resize(1);
    lod[0].push_back(0);
  }

W
wangguibao 已提交
82
  // Query cube API for sparse embeddings
X
xulongteng 已提交
83 84
  std::vector<uint64_t> keys;
  std::vector<rec::mcube::CubeValue> values;
W
wangguibao 已提交
85 86 87

  for (uint32_t si = 0; si < sample_size; ++si) {
    const CTRReqInstance &req_instance = req->instances(si);
W
wangguibao 已提交
88
    if (req_instance.sparse_ids_size() != CTR_PREDICTION_SPARSE_SLOTS) {
W
wangguibao 已提交
89
      std::ostringstream iss;
W
wangguibao 已提交
90
      iss << "Sparse input size != " << CTR_PREDICTION_SPARSE_SLOTS;
W
wangguibao 已提交
91
      fill_response_with_message(res, -1, iss.str());
W
wangguibao 已提交
92
      return 0;
W
wangguibao 已提交
93 94 95 96 97 98 99
    }

    for (int i = 0; i < req_instance.sparse_ids_size(); ++i) {
      keys.push_back(req_instance.sparse_ids(i));
    }
  }

X
xulongteng 已提交
100 101
  rec::mcube::CubeAPI *cube = rec::mcube::CubeAPI::instance();
  int ret = cube->seek(dict_name, keys, &values);
W
wangguibao 已提交
102 103 104 105 106
  if (ret != 0) {
    fill_response_with_message(res, -1, "Query cube for embeddings error");
    LOG(ERROR) << "Query cube for embeddings error";
    return -1;
  }
W
wangguibao 已提交
107

W
wangguibao 已提交
108
  // Sparse embeddings
W
wangguibao 已提交
109
  for (int i = 0; i < CTR_PREDICTION_SPARSE_SLOTS; ++i) {
W
wangguibao 已提交
110
    paddle::PaddleTensor &lod_tensor = lod_tensors[i];
W
wangguibao 已提交
111 112
    std::vector<std::vector<size_t>> &lod = lod_tensor.lod;

W
wangguibao 已提交
113 114 115 116
    char name[VARIABLE_NAME_LEN];
    snprintf(name, VARIABLE_NAME_LEN, "embedding_%d.tmp_0", i);
    lod_tensor.name = std::string(name);

W
wangguibao 已提交
117 118 119 120 121
    for (uint32_t si = 0; si < sample_size; ++si) {
      const CTRReqInstance &req_instance = req->instances(si);
      lod[0].push_back(lod[0].back() + 1);
    }

W
wangguibao 已提交
122 123 124
    lod_tensor.shape = {lod[0].back(), CTR_PREDICTION_EMBEDDING_SIZE};
    lod_tensor.data.Resize(lod[0].back() * sizeof(float) *
                           CTR_PREDICTION_EMBEDDING_SIZE);
W
wangguibao 已提交
125 126 127

    int offset = 0;
    for (uint32_t si = 0; si < sample_size; ++si) {
W
wangguibao 已提交
128
      float *data_ptr = static_cast<float *>(lod_tensor.data.data()) + offset;
W
wangguibao 已提交
129
      const CTRReqInstance &req_instance = req->instances(si);
W
wangguibao 已提交
130 131 132 133 134 135 136

      int idx = si * CTR_PREDICTION_SPARSE_SLOTS + i;
      if (values[idx].buff.size() !=
          sizeof(float) * CTR_PREDICTION_EMBEDDING_SIZE) {
        LOG(ERROR) << "Embedding vector size not expected";
        fill_response_with_message(
            res, -1, "Embedding vector size not expected");
W
wangguibao 已提交
137
        return 0;
W
wangguibao 已提交
138 139 140 141
      }

      memcpy(data_ptr, values[idx].buff.data(), values[idx].buff.size());
      offset += CTR_PREDICTION_EMBEDDING_SIZE;
W
wangguibao 已提交
142 143 144 145 146
    }

    in->push_back(lod_tensor);
  }

W
wangguibao 已提交
147
  // Dense features
W
wangguibao 已提交
148 149
  paddle::PaddleTensor &lod_tensor = lod_tensors[CTR_PREDICTION_DENSE_SLOT_ID];
  lod_tensor.dtype = paddle::PaddleDType::FLOAT32;
W
wangguibao 已提交
150
  std::vector<std::vector<size_t>> &lod = lod_tensor.lod;
W
wangguibao 已提交
151
  lod_tensor.name = std::string("dense_input");
W
wangguibao 已提交
152 153 154

  for (uint32_t si = 0; si < sample_size; ++si) {
    const CTRReqInstance &req_instance = req->instances(si);
W
wangguibao 已提交
155 156 157 158
    if (req_instance.dense_ids_size() != CTR_PREDICTION_DENSE_DIM) {
      std::ostringstream iss;
      iss << "dense input size != " << CTR_PREDICTION_DENSE_DIM;
      fill_response_with_message(res, -1, iss.str());
W
wangguibao 已提交
159
      return 0;
W
wangguibao 已提交
160
    }
W
wangguibao 已提交
161 162 163
    lod[0].push_back(lod[0].back() + req_instance.dense_ids_size());
  }

W
wangguibao 已提交
164 165 166
  lod_tensor.shape = {lod[0].back() / CTR_PREDICTION_DENSE_DIM,
                      CTR_PREDICTION_DENSE_DIM};
  lod_tensor.data.Resize(lod[0].back() * sizeof(float));
W
wangguibao 已提交
167 168 169

  int offset = 0;
  for (uint32_t si = 0; si < sample_size; ++si) {
W
wangguibao 已提交
170
    float *data_ptr = static_cast<float *>(lod_tensor.data.data()) + offset;
W
wangguibao 已提交
171 172 173
    const CTRReqInstance &req_instance = req->instances(si);
    int id_count = req_instance.dense_ids_size();
    memcpy(data_ptr,
W
wangguibao 已提交
174
           req_instance.dense_ids().data(),
W
wangguibao 已提交
175
           sizeof(float) * req_instance.dense_ids_size());
W
wangguibao 已提交
176 177 178 179 180 181 182 183
    offset += req_instance.dense_ids_size();
  }

  in->push_back(lod_tensor);

  TensorVector *out = butil::get_object<TensorVector>();
  if (!out) {
    LOG(ERROR) << "Failed get tls output object";
W
wangguibao 已提交
184
    fill_response_with_message(res, -1, "Failed get thread local resource");
W
wangguibao 已提交
185
    return 0;
W
wangguibao 已提交
186 187 188 189 190 191 192
  }

  // call paddle fluid model for inferencing
  if (predictor::InferManager::instance().infer(
          CTR_PREDICTION_MODEL_NAME, in, out, sample_size)) {
    LOG(ERROR) << "Failed do infer in fluid model: "
               << CTR_PREDICTION_MODEL_NAME;
W
wangguibao 已提交
193
    fill_response_with_message(res, -1, "Failed do infer in fluid model");
W
wangguibao 已提交
194
    return 0;
W
wangguibao 已提交
195 196
  }

W
wangguibao 已提交
197
  if (out->size() != sample_size) {
W
wangguibao 已提交
198
    LOG(ERROR) << "Output tensor size not equal that of input";
W
wangguibao 已提交
199
    fill_response_with_message(res, -1, "Output size != input size");
W
wangguibao 已提交
200
    return 0;
W
wangguibao 已提交
201 202 203 204 205 206 207 208
  }

  for (size_t i = 0; i < out->size(); ++i) {
    int dim1 = out->at(i).shape[0];
    int dim2 = out->at(i).shape[1];

    if (out->at(i).dtype != paddle::PaddleDType::FLOAT32) {
      LOG(ERROR) << "Expected data type float";
W
wangguibao 已提交
209
      fill_response_with_message(res, -1, "Expected data type float");
W
wangguibao 已提交
210
      return 0;
W
wangguibao 已提交
211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231
    }

    float *data = static_cast<float *>(out->at(i).data.data());
    for (int j = 0; j < dim1; ++j) {
      CTRResInstance *res_instance = res->add_predictions();
      res_instance->set_prob0(data[j * dim2]);
      res_instance->set_prob1(data[j * dim2 + 1]);
    }
  }

  for (size_t i = 0; i < in->size(); ++i) {
    (*in)[i].shape.clear();
  }
  in->clear();
  butil::return_object<TensorVector>(in);

  for (size_t i = 0; i < out->size(); ++i) {
    (*out)[i].shape.clear();
  }
  out->clear();
  butil::return_object<TensorVector>(out);
W
wangguibao 已提交
232 233 234

  res->set_err_code(0);
  res->set_err_msg(std::string(""));
W
wangguibao 已提交
235 236 237 238 239 240 241 242
  return 0;
}

DEFINE_OP(CTRPredictionOp);

}  // namespace serving
}  // namespace paddle_serving
}  // namespace baidu