bsf.h 42.0 KB
Newer Older
W
wangguibao 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#pragma once
W
wangguibao 已提交
16 17

#include <errno.h>
W
wangguibao 已提交
18
#include <algorithm>
H
HexToString 已提交
19
#include <cstring>
H
HexToString 已提交
20
#include <list>
H
HexToString 已提交
21
#include <set>
W
wangguibao 已提交
22
#include <vector>
W
wangguibao 已提交
23 24 25 26

#ifdef BCLOUD
#include "base/atomicops.h"
#else
W
wangguibao 已提交
27
#include "butil/atomicops.h"
W
wangguibao 已提交
28 29
#endif

G
guru4elephant 已提交
30
#include "core/predictor/common/inner_common.h"
W
wangguibao 已提交
31

W
wangguibao 已提交
32
#include "boost/function.hpp"
W
wangguibao 已提交
33

34 35 36
#include "core/predictor/framework/memory.h"
#include "paddle_inference_api.h"

W
wangguibao 已提交
37 38 39 40 41
namespace im {
namespace bsf {

static const size_t DEFAULT_BATCH_SIZE = 100;

42 43 44 45 46 47 48 49 50
// InItemT is paddle::PaddleTensor
// InVectorT std::vector<paddle::PaddleTensor>
// InVectorT means different feedvar, but not batch.
// Batch is already inside the  paddle::PaddleTensor.

// size_t `rem` records how many batch have not been put in BatchTasks.
// `rem` don`t need to be atomic, cause the operation `put` is synchronous.
// actually, the reason is that lock have been added outside the operation
// `put`.
H
HexToString 已提交
51 52
template <typename TaskT>
class BatchTasks;
53 54
// size_t `index` records how many batch have been processing completed.
// `index` need to be atomic, cause the operation 'notify' is asynchronous.
W
wangguibao 已提交
55
template <typename InItemT, typename OutItemT>
W
wangguibao 已提交
56
struct Task {
57 58
  typedef std::vector<InItemT> InVectorT;
  typedef std::vector<OutItemT> OutVectorT;
W
wangguibao 已提交
59 60 61
  typedef InItemT InType;
  typedef OutItemT OutType;
  typedef Task<InItemT, OutItemT> TaskT;
H
HexToString 已提交
62
  typedef std::vector<size_t> ShapeVector;
63
  typedef std::vector<ShapeVector> VectorOfShapeVector;
W
wangguibao 已提交
64

W
wangguibao 已提交
65 66 67
  int read_fd;
  int write_fd;
  pid_t owner_tid;
68 69
  const InVectorT* inVectorT_ptr;
  OutVectorT* outVectorT_ptr;
W
wangguibao 已提交
70
  size_t rem;
H
HexToString 已提交
71 72 73 74 75
  size_t total_feed_batch;
  std::set<size_t> set_feed_lod_index;
  std::set<size_t> set_feed_nobatch_index;
  std::vector<size_t> vector_fetch_lod_index;
  std::set<size_t> set_fetch_nobatch_index;
W
wangguibao 已提交
76
  butil::atomic<size_t> index;
H
HexToString 已提交
77 78 79 80 81
  size_t taskmeta_num;
  THREAD_MUTEX_T task_mut;
  bool fetch_init;
  // taskmeta_num * set_feed_lod_index.size()
  std::vector<OutVectorT> outLodTensorVector;
W
wangguibao 已提交
82 83 84 85 86

  Task() {
    read_fd = -1;
    write_fd = -1;
    owner_tid = -1;
87 88
    inVectorT_ptr = NULL;
    outVectorT_ptr = NULL;
H
HexToString 已提交
89 90 91 92
    set_feed_lod_index.clear();
    set_feed_nobatch_index.clear();
    vector_fetch_lod_index.clear();
    set_fetch_nobatch_index.clear();
W
wangguibao 已提交
93
    rem = -1;
H
HexToString 已提交
94 95
    total_feed_batch = 0;
    taskmeta_num = 0;
W
wangguibao 已提交
96
    index.store(0, butil::memory_order_relaxed);
H
HexToString 已提交
97 98 99 100 101 102 103
    THREAD_MUTEX_INIT(&task_mut, NULL);
    fetch_init = false;
    outLodTensorVector.clear();
  }
  ~Task() {
    THREAD_MUTEX_DESTROY(&task_mut);
    outLodTensorVector.clear();
W
wangguibao 已提交
104
  }
105

H
HexToString 已提交
106
  bool check_feedvar_valid(size_t feedvar_index) {
107 108 109 110 111 112 113 114 115 116 117 118 119
    if (feedvar_index < 0 || inVectorT_ptr->size() <= feedvar_index) {
      LOG(ERROR) << "feedvar doesnt exsit or feedvar_index error";
      return 0;
    }

    if ((*inVectorT_ptr)[feedvar_index].shape.size() <= 0) {
      LOG(ERROR) << "feedvar[" << feedvar_index << "].shape.size()<=0,error";
      return 0;
    }

    return 1;
  }

H
HexToString 已提交
120 121 122 123 124 125 126 127
  bool combine_task_valid(Task* other_task) {
    // TODO(HexToString): auto-padding
    // 除最外层的shape外,内层shape应一致才能合并。
    // 否则跳出循环,放入下一个batchTask中。
    // 以此保证batch.append_task(task)中的task的内层shape相同。
    if (other_task->feedvar_shape_nobatch() != feedvar_shape_nobatch()) {
      return false;
    }
128

H
HexToString 已提交
129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144
    // 对于Shape[0] = 1 而!=batch的情况,因为合并时,取其中一个的值
    // 所以要求该feedvar必须相等,才能合并。
    // 目前没有PaddleTensor和PaddleBuff没有重载==,所以只能比较内存.
    for (size_t feedvar_index = 0;
         feedvar_index < set_feed_nobatch_index.size();
         ++feedvar_index) {
      int result =
          std::memcmp((*inVectorT_ptr)[feedvar_index].data.data(),
                      (*(other_task->inVectorT_ptr))[feedvar_index].data.data(),
                      (*inVectorT_ptr)[feedvar_index].data.length());
      if (result != 0) return false;
    }
    return true;
  }

  size_t feedvar_batch_size(size_t feedvar_index) {
145 146 147
    if (!check_feedvar_valid(feedvar_index)) {
      return 0;
    }
H
HexToString 已提交
148 149 150 151 152 153 154 155 156
    // if lod, 'lod[0].size()-1' is batch.
    // for PaddleTensor lod is vector<vector<size_t>>, so lod[0] is real lod.
    // for example, lod = [0,3,4,6], shape = [6,340,340], batch is 3 actually.
    // for lod, the batch < shape[0].
    if ((*inVectorT_ptr)[feedvar_index].lod.size() > 0 &&
        (*inVectorT_ptr)[feedvar_index].lod[0].size() > 0) {
      return (*inVectorT_ptr)[feedvar_index].lod[0].size() - 1;
    }
    // if not lod, the first dimension of data `PaddleTensor.shape[0]` is batch.
157 158 159
    return (*inVectorT_ptr)[feedvar_index].shape[0];
  }

H
HexToString 已提交
160
  size_t feedvar_element_bytesize(size_t feedvar_index) {
161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181
    if (!check_feedvar_valid(feedvar_index)) {
      return 0;
    }
    int dtype = (*inVectorT_ptr)[feedvar_index].dtype;
    if (dtype == paddle::PaddleDType::INT64) {
      return sizeof(int64_t);
    }
    if (dtype == paddle::PaddleDType::FLOAT32) {
      return sizeof(float);
    }
    if (dtype == paddle::PaddleDType::INT32) {
      return sizeof(int32_t);
    }
    if (dtype == paddle::PaddleDType::UINT8) {
      return sizeof(char);
    }
    return 0;
  }

  // Now, the implementation of this function is based on assumption
  // that shape [0] = batch_size.
H
HexToString 已提交
182
  size_t feedvar_element_num(size_t feedvar_index) {
183 184 185
    if (!check_feedvar_valid(feedvar_index)) {
      return 0;
    }
H
HexToString 已提交
186
    size_t element_num = 1;
187 188
    if ((*inVectorT_ptr)[feedvar_index].shape.size() == 1) {
      // cause shape[0] is batch_size.
H
HexToString 已提交
189 190
      // [10,1] = [10], so if shape[1] doesn`t exist.
      // should return 1.
191 192 193
      return 1;
    }
    // start from shape[1], cause shape[0] = batch_size.
H
HexToString 已提交
194
    for (size_t i = 1; i < (*inVectorT_ptr)[feedvar_index].shape.size(); ++i) {
195 196 197 198 199
      element_num *= (*inVectorT_ptr)[feedvar_index].shape[i];
    }
    return element_num;
  }

H
HexToString 已提交
200
  size_t feedvar_bytesize(size_t feedvar_index) {
201 202 203 204
    return feedvar_element_num(feedvar_index) *
           feedvar_element_bytesize(feedvar_index);
  }

H
HexToString 已提交
205
  ShapeVector feedvar_shape_nobatch(size_t feedvar_index) {
206 207 208 209 210 211 212 213
    if (!check_feedvar_valid(feedvar_index)) {
      return ShapeVector();
    }
    return ShapeVector{(*inVectorT_ptr)[feedvar_index].shape.begin() + 1,
                       (*inVectorT_ptr)[feedvar_index].shape.end()};
  }

  VectorOfShapeVector feedvar_shape_nobatch() {
H
HexToString 已提交
214 215 216 217 218
    VectorOfShapeVector vector_of_feedvar_shape_nobatch;
    for (size_t feedvar_index = 0; feedvar_index < inVectorT_ptr->size();
         ++feedvar_index) {
      vector_of_feedvar_shape_nobatch.push_back(
          feedvar_shape_nobatch(feedvar_index));
219 220 221 222
    }
    return vector_of_feedvar_shape_nobatch;
  }

H
HexToString 已提交
223 224 225 226 227 228 229 230 231 232
  // For each feedvar, batch should be 1 or batch_size.
  // if feedvar-1: batch_size = 1 (always not batch).
  // feedvar-2: batch_size = n,  batch = n.
  // this function is not thread safe. only called when task is creating.
  bool task_init() {
    total_feed_batch = feedvar_batch_size(0);
    // which means error.
    if (total_feed_batch <= 0) return false;

    for (size_t feedvar_index = 0; feedvar_index < inVectorT_ptr->size();
233
         ++feedvar_index) {
H
HexToString 已提交
234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255
      // TODO(HexToString): Distinguish between nobatch and batch =
      // 1(By:HexToString)
      // 当数据中feedvar-1: 带batch,且batch =1,shape[0] = 1
      // feedvar-2:不带batch,由于不带batch导致shape[0] =1
      // 此时,无法分辨是否是天然nobatch,此时set_feed_nobatch_index会漏掉
      // 后续希望在其他地方能够区分两者。
      if (feedvar_batch_size(feedvar_index) != total_feed_batch) {
        // which means error.
        if (feedvar_batch_size(feedvar_index) != 1 && total_feed_batch != 1) {
          return false;
        } else {
          // which means feedvar shape[0] = 1.
          // shape[0] does not change with batch
          set_feed_nobatch_index.insert(feedvar_index);
          total_feed_batch =
              std::max(feedvar_batch_size(feedvar_index), total_feed_batch);
        }
      }
      // 将lod feedvar index加入到vector中。
      if ((*inVectorT_ptr)[feedvar_index].lod.size() > 0 &&
          (*inVectorT_ptr)[feedvar_index].lod[0].size() > 0) {
        set_feed_lod_index.insert(feedvar_index);
256 257
      }
    }
H
HexToString 已提交
258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303
    return true;
  }

  size_t batch_size() { return total_feed_batch; }

  // start_batch range is 0~batch_size, end_batch range is 1~batch_size
  // start_batch should not be included, end_batch > start_batch
  // return is (start_batch, end_batch] = [start_batch+1,end_batch]
  // for not lod, shape0_index = [(start_batch+1)-1,end_batch-1] =
  // [start_batch,end_batch-1] = [start_batch,end_batch)
  // for lod, shape0_index = [lod[start_batch],lod[end_batch]-1] =
  // [lod[start_batch],lod[end_batch])
  // for nobatch, shape0_index = [0,1)
  // 对于调用者,拿到shape0_index后,for(size_t myindex =shape0_index[0];
  // myindex <shape0_index[1];myindex++)即可.

  // 原始lod= [0,3,4,6] 取的batch为(start_batch = 1,end_batch =
  // 3],即取batch=2,3.
  // 此时lod=[3,4,6],处理后得到[1,3]
  // 这样处理后,合并lod比较方便,直接加上上一个lod的结尾的值即可。
  std::vector<std::vector<size_t>> get_feature_by_batch(size_t feedvar_index,
                                                        size_t start_batch,
                                                        size_t end_batch) {
    std::vector<std::vector<size_t>> feature_vector;
    // feature_vector是双层vector,这么设计是由于一个遍历即可处理所有的特征。
    // feature_vector[0]是由shape0_index的范围值组成的vector,包含两个元素最小和最大值。
    // feature_vector[1]是由lod组成的vector,包含指定batch的lod信息.
    // feature_vector[2]是由单个元素的组成的vector,元素值为1表示是nobatch的feedvar。

    // if 为 nobatch feedvar情况。
    // else if 为带lod的feedvar情况。
    // else为不带lod 普通feedvar情况。
    if (set_feed_nobatch_index.size() > 0 &&
        set_feed_nobatch_index.find(feedvar_index) !=
            set_feed_nobatch_index.end()) {
      feature_vector = {{0, 1}, {}, {1}};
    } else if (set_feed_lod_index.size() > 0 &&
               set_feed_lod_index.find(feedvar_index) !=
                   set_feed_lod_index.end()) {
      std::vector<size_t> feed_lod_vector(end_batch - start_batch);
      for (size_t lod_index = start_batch + 1, vector_index = 0;
           lod_index < end_batch + 1;
           ++lod_index, ++vector_index) {
        feed_lod_vector[vector_index] =
            (*inVectorT_ptr)[feedvar_index].lod[0][lod_index] -
            (*inVectorT_ptr)[feedvar_index].lod[0][start_batch];
304
      }
H
HexToString 已提交
305 306 307 308 309 310
      size_t shape0_start = (*inVectorT_ptr)[feedvar_index].lod[0][start_batch];
      size_t shape0_end = (*inVectorT_ptr)[feedvar_index].lod[0][end_batch];
      feature_vector = {{shape0_start, shape0_end}, feed_lod_vector};
      // feature_vector.push_back(feed_lod_vector);
    } else {
      feature_vector = {{start_batch, end_batch}};
311
    }
H
HexToString 已提交
312
    return feature_vector;
313 314
  }

H
HexToString 已提交
315 316 317 318 319 320 321
  bool combine_taskmeta() {
    // 只有含有lod类型的fetch输出,且task被拆分为多个taskmeta的情况
    // 才需要将数据从outLodTensorVector搬运到outVectorT_ptr
    if (vector_fetch_lod_index.size() > 0 && taskmeta_num > 1) {
      for (size_t index = 0; index < vector_fetch_lod_index.size(); ++index) {
        size_t data_length = 0;
        size_t lod_length = 0;
H
HexToString 已提交
322
        size_t total_shape0 = 0;
H
HexToString 已提交
323 324 325 326 327 328 329
        size_t feedvar_index = vector_fetch_lod_index[index];
        // 由于PaddleTensor的resize实现,是每次都会清空,所以必须先统计总长度。
        for (size_t taskmeta_index = 0; taskmeta_index < taskmeta_num;
             ++taskmeta_num) {
          data_length +=
              outLodTensorVector[taskmeta_index][index].data.length();
          lod_length += outLodTensorVector[taskmeta_index][index].lod[0].size();
H
HexToString 已提交
330
          total_shape0 += outLodTensorVector[taskmeta_index][index].shape[0];
H
HexToString 已提交
331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368
        }
        // 一次性扩容PaddleTensor中的data和lod
        paddle::PaddleTensor& fetchVarTensor = (*outVectorT_ptr)[feedvar_index];
        fetchVarTensor.data.Resize(data_length);
        // task中的lod补0
        if (fetchVarTensor.lod.size() <= 0) {
          fetchVarTensor.lod.push_back({0});
        } else if (fetchVarTensor.lod[0].size() <= 0) {
          fetchVarTensor.lod[0].push_back(0);
        }
        fetchVarTensor.lod[0].resize(lod_length + 1, 0);

        //
        size_t data_length_offset = 0;
        size_t lod_length_offset = 0;
        size_t once_data_length = 0;
        size_t once_lod_length = 0;
        size_t last_lod_value = fetchVarTensor.lod[0][lod_length_offset];
        for (size_t taskmeta_index = 0; taskmeta_index < taskmeta_num;
             ++taskmeta_num) {
          void* dst_ptr = fetchVarTensor.data.data() + data_length_offset;
          void* source_ptr =
              outLodTensorVector[taskmeta_index][index].data.data();
          once_data_length =
              outLodTensorVector[taskmeta_index][index].data.length();
          memcpy(dst_ptr, source_ptr, once_data_length);
          once_lod_length =
              outLodTensorVector[taskmeta_index][index].lod[0].size();
          for (size_t once_index = 0; once_index < once_lod_length;
               ++once_index) {
            fetchVarTensor.lod[0][lod_length_offset + 1] =
                last_lod_value +
                outLodTensorVector[taskmeta_index][index].lod[0][once_index];
          }
          data_length_offset += once_data_length;
          lod_length_offset += once_lod_length;
        }
      }
369
    }
H
HexToString 已提交
370
    return true;
371
  }
H
HexToString 已提交
372

H
HexToString 已提交
373 374
  bool task_fetch_init(BatchTasks<TaskT>& batchTask);
  bool task_fetch_create(BatchTasks<TaskT>& batchTask);
W
wangguibao 已提交
375 376
};

377 378 379 380 381 382 383 384 385 386 387 388
// `Several Task` or `part of batch in Task` can be a TaskMeta.
// Task is the original Request from User.
// For example, the batch of Task is 30. There are 4 Requests.
// The batch of BatchTasks is 100, which means we can deal 100 batch 1 time.
// TaskMeta-1:{task-1,0,30} TaskMeta-2:{task-2,0,30} TaskMeta-3:{task-3,0,30}
// but the last Task will be divided to 2 TaskMeta.
// TaskMeta-4:{task-4,0,10} TaskMeta-5:{task-4,10,30}.
// TaskMeta-1 ~ TaskMeta-4 will be inside BatchTasks-1.
// TaskMeta-5 will be inside BatchTasks-2.

// TaskMeta is necessary.
// cause we need know the the corresponding relationship between
H
HexToString 已提交
389
// `_batch_out`(which is in BatchTasks) and `outVectorT_ptr`(which is in Task).
390 391
// especially when 1 Task be divided into several TaskMeta and be put into
// several different BatchTasks.
H
HexToString 已提交
392 393 394 395 396

// begin、add、end means batch, not shape[0].
// if not lod, batch == shape[0]. if lod, batch != shape[0]
// for example, lod = [0,3,4,6], shape = [6,340,340]
// there is 3 batch actually, add = 3, but shape[0] = 6.
W
wangguibao 已提交
397
template <typename TaskT>
W
wangguibao 已提交
398
struct TaskMeta {
H
HexToString 已提交
399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419
  TaskMeta(TaskT* ptr, size_t start, size_t add, size_t taskmeta_index)
      : task(ptr),
        begin(start),
        end(start + add),
        taskmeta_index(taskmeta_index) {
    feedvar_num = ptr->inVectorT_ptr->size();
    for (size_t feedvar_index = 0; feedvar_index < feedvar_num;
         ++feedvar_index) {
      std::vector<std::vector<size_t>> feature =
          ptr->get_feature_by_batch(feedvar_index, start, start + add);
      feed_shape0_range.push_back(feature[0]);
      feedvar_type.push_back(feature.size());
      if (feature.size() == 1) {
        feed_lod_vector.push_back({});
      } else if (feature.size() == 2) {
        feed_lod_vector.push_back(feature[1]);
      } else {
        feed_lod_vector.push_back({});
      }
    }
  }
W
wangguibao 已提交
420 421 422 423

  TaskT* task;
  size_t begin;
  size_t end;
H
HexToString 已提交
424 425 426 427 428
  size_t feedvar_num;
  size_t taskmeta_index;
  std::vector<std::vector<size_t>> feed_shape0_range;
  std::vector<std::vector<size_t>> feed_lod_vector;
  std::vector<size_t> feedvar_type;
W
wangguibao 已提交
429 430
};

431 432 433
// each TaskT is already include batch in itself
// BatchTasks need to combine several `small TaskMeta` into a new `big TaskT`.
// The only difference between the `big TaskT` and `small TaskT` is that
H
HexToString 已提交
434 435
// the TaskT.inVectorT_ptr->[feedvar_index].shape[0] is different
// `big TaskT`.inVectorT_ptr->[feedvar_index].shape[0] is actually batch_size .
W
wangguibao 已提交
436
template <typename TaskT>
W
wangguibao 已提交
437
class BatchTasks {
W
wangguibao 已提交
438 439 440 441
 public:
  typedef typename TaskT::InType InType;
  typedef typename TaskT::OutType OutType;
  typedef TaskMeta<TaskT> TaskMetaT;
H
HexToString 已提交
442 443 444 445 446
  typedef std::vector<size_t> ShapeVector;
  typedef std::vector<ShapeVector> VectorOfShapeVector;
  typedef std::vector<size_t> LodVector;
  typedef std::vector<LodVector> PaddleTensorLod;
  friend TaskT;
W
wangguibao 已提交
447

H
HexToString 已提交
448
  explicit BatchTasks(size_t batch_size,
H
HexToString 已提交
449
                      bool overrun = false,
H
HexToString 已提交
450
                      bool allow_split_request = true)
W
wangguibao 已提交
451 452
      : _batch_size(batch_size),
        _rem_size(batch_size),
H
HexToString 已提交
453
        _overrun(overrun),
H
HexToString 已提交
454
        _allow_split_request(allow_split_request) {
W
wangguibao 已提交
455
    _batch_in.clear();
456
    _batch_in_offset.clear();
H
HexToString 已提交
457 458 459 460
    _total_shape0_batch_in.clear();
    _total_feed_batch = 0;
    _batch_in_lod.clear();

W
wangguibao 已提交
461
    _batch_out.clear();
462
    _batch_out_offset.clear();
H
HexToString 已提交
463
    _total_fetch_batch = 0;
464
    _taskmeta_vector.clear();
H
HexToString 已提交
465 466
    set_fetch_nobatch_index.clear();
    vector_fetch_lod_index.clear();
W
wangguibao 已提交
467 468 469 470
  }

  ~BatchTasks() {
    _batch_in.clear();
471
    _batch_in_offset.clear();
H
HexToString 已提交
472 473 474 475
    _total_shape0_batch_in.clear();
    _total_feed_batch = 0;
    _batch_in_lod.clear();

W
wangguibao 已提交
476
    _batch_out.clear();
477
    _batch_out_offset.clear();
H
HexToString 已提交
478
    _total_fetch_batch = 0;
479
    _taskmeta_vector.clear();
H
HexToString 已提交
480 481
    set_fetch_nobatch_index.clear();
    vector_fetch_lod_index.clear();
W
wangguibao 已提交
482 483 484
  }

  // synchronized operation
485
  // because Upper level callers of this function have already locked.
H
HexToString 已提交
486
  // 能进到此函数的task都是同类task,在该函数之前已保证了这点。
W
wangguibao 已提交
487 488
  size_t append_task(TaskT* task) {
    size_t add = std::min(task->rem, _rem_size);
H
HexToString 已提交
489
    // when _overrun == true, it means always take a whole task as TaskMeta
H
HexToString 已提交
490 491
    // we can temporary breakthrough the limit of BatchTask`s capacity
    // BatchTask`s capacity is _batch_size or _rem_size
H
HexToString 已提交
492
    if (_overrun) {
W
wangguibao 已提交
493
      add = task->rem;
W
wangguibao 已提交
494
    }
495
    int start_index = task->batch_size() - task->rem;
H
HexToString 已提交
496 497
    TaskMetaT tm(task, start_index, add, task->taskmeta_num);
    task->taskmeta_num += 1;
498
    _taskmeta_vector.push_back(tm);
H
HexToString 已提交
499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546
    if (_batch_in_offset.size() == 0) {
      _batch_in_offset.resize(tm.feedvar_num, 0);
    }
    if (_total_shape0_batch_in.size() == 0) {
      _total_shape0_batch_in.resize(tm.feedvar_num, 0);
    }
    if (_batch_in_lod.size() == 0) {
      PaddleTensorLod null_lod;
      _batch_in_lod.resize(tm.feedvar_num, null_lod);
    }
    _total_feed_batch += add;
    for (size_t feedvar_index = 0; feedvar_index < tm.feedvar_num;
         ++feedvar_index) {
      if (tm.feedvar_type[feedvar_index] == 1) {
        // 普通的非lod feedvar
        // 累计计算shape0的累加值,为后面初始化PaddleTensor做准备。
        _total_shape0_batch_in[feedvar_index] +=
            tm.feed_shape0_range[feedvar_index][1] -
            tm.feed_shape0_range[feedvar_index][0];
      } else if (tm.feedvar_type[feedvar_index] == 2) {
        // lod类型的feedvar
        // 累计计算shape0的累加值,为后面初始化PaddleTensor做准备。
        _total_shape0_batch_in[feedvar_index] +=
            tm.feed_shape0_range[feedvar_index][1] -
            tm.feed_shape0_range[feedvar_index][0];
        // 在Lod最前面加0
        if (_batch_in_lod[feedvar_index].size() <= 0) {
          _batch_in_lod[feedvar_index].push_back({0});
        } else if (_batch_in_lod[feedvar_index][0].size() <= 0) {
          _batch_in_lod[feedvar_index][0].push_back(0);
        }
        // 将lod加上前一组lod的结尾最大值,组合Lod
        size_t last_lod_value = _batch_in_lod[feedvar_index][0].back();
        for (size_t lod_index = 0;
             lod_index < tm.feed_lod_vector[feedvar_index].size();
             ++lod_index) {
          _batch_in_lod[feedvar_index][0].push_back(
              last_lod_value + tm.feed_lod_vector[feedvar_index][lod_index]);
        }
      } else {
        // tm.feedvar_type[feedvar_index] == 3
        // nobatch类型的feedvar.
        // 此时不累加,且值应为1
        _total_shape0_batch_in[feedvar_index] =
            tm.feed_shape0_range[feedvar_index][1] -
            tm.feed_shape0_range[feedvar_index][0];
      }
    }
W
wangguibao 已提交
547 548 549 550 551
    task->rem -= add;
    _rem_size -= add;
    return _rem_size;
  }

552 553
  static bool check_valid(const typename TaskT::InVectorT& in,
                          const typename TaskT::OutVectorT& out,
W
wangguibao 已提交
554 555 556 557 558 559 560
                          bool align) {
    (void)in;
    (void)out;
    (void)align;
    return true;
  }

561 562 563 564 565 566 567 568
  // this should be modified totally.
  // maybe we don`t need to do this inside the BatchTasks.
  // we can do the copy work outside the BatchTasks.
  // cause maybe next time we don`t need to do the extra copy.
  // directly copy the every Task into the Predictor.

  // batch.merge_tasks() is thread-safe function
  // cause batch is a local variable and Task is just read, not written.
H
HexToString 已提交
569

W
wangguibao 已提交
570
  void merge_tasks() {
571 572 573 574 575 576 577
    if (_taskmeta_vector.size() <= 0) {
      return;
    }

    for (size_t ti = 0; ti < _taskmeta_vector.size(); ++ti) {
      TaskMetaT& tm = _taskmeta_vector[ti];

H
HexToString 已提交
578 579
      for (size_t feedvar_index = 0; feedvar_index < tm.feedvar_num;
           ++feedvar_index) {
580
        const paddle::PaddleTensor& feedVarTensor =
H
HexToString 已提交
581 582
            (*tm.task->inVectorT_ptr)[feedvar_index];
        size_t feedvar_bytesize = tm.task->feedvar_bytesize(feedvar_index);
583 584

        if (ti == 0) {
H
HexToString 已提交
585
          // Create the entire tensor at once
586 587 588 589 590 591
          // for now, we assume that every task feedvar_bytesize is the same.
          // which means we dont support auto embedding.
          // but for different feedvar, it is different.
          paddle::PaddleTensor paddleTensor;
          paddleTensor.dtype = feedVarTensor.dtype;
          paddleTensor.name = feedVarTensor.name;
H
HexToString 已提交
592
          paddleTensor.lod = _batch_in_lod[feedvar_index];
593
          paddleTensor.shape = feedVarTensor.shape;
H
HexToString 已提交
594
          paddleTensor.shape[0] = _total_shape0_batch_in[feedvar_index];
595
          paddleTensor.data.Resize(feedvar_bytesize *
H
HexToString 已提交
596
                                   _total_shape0_batch_in[feedvar_index]);
597 598 599
          _batch_in.push_back(paddleTensor);
        }

H
HexToString 已提交
600 601
        void* dst_ptr = _batch_in[feedvar_index].data.data() +
                        _batch_in_offset[feedvar_index];
602
        void* source_ptr =
H
HexToString 已提交
603 604 605 606 607
            feedVarTensor.data.data() +
            feedvar_bytesize * tm.feed_shape0_range[feedvar_index][0];
        size_t length =
            feedvar_bytesize * (tm.feed_shape0_range[feedvar_index][1] -
                                tm.feed_shape0_range[feedvar_index][0]);
608
        memcpy(dst_ptr, source_ptr, length);
H
HexToString 已提交
609 610 611
        // nobatch类型的feedvar,不叠加.
        if (tm.feedvar_type[feedvar_index] != 3)
          _batch_in_offset[feedvar_index] += length;
W
wangguibao 已提交
612
      }
W
wangguibao 已提交
613
    }
W
wangguibao 已提交
614
  }
W
wangguibao 已提交
615

H
HexToString 已提交
616
  bool check_fetchvar_valid(size_t fetchvar_index) {
617 618 619 620 621 622 623 624 625 626 627 628 629
    if (fetchvar_index < 0 || _batch_out.size() <= fetchvar_index) {
      LOG(ERROR) << "fetchvar doesnt exsit or fetchvar_index error";
      return 0;
    }

    if (_batch_out[fetchvar_index].shape.size() <= 0) {
      LOG(ERROR) << "fetchvar[" << fetchvar_index << "].shape.size()<=0,error";
      return 0;
    }

    return 1;
  }

H
HexToString 已提交
630
  size_t fetchvar_element_bytesize(size_t fetchvar_index) {
631 632 633
    if (!check_fetchvar_valid(fetchvar_index)) {
      return 0;
    }
H
HexToString 已提交
634
    size_t dtype = _batch_out[fetchvar_index].dtype;
635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651
    if (dtype == paddle::PaddleDType::INT64) {
      return sizeof(int64_t);
    }
    if (dtype == paddle::PaddleDType::FLOAT32) {
      return sizeof(float);
    }
    if (dtype == paddle::PaddleDType::INT32) {
      return sizeof(int32_t);
    }
    if (dtype == paddle::PaddleDType::UINT8) {
      return sizeof(char);
    }
    return 0;
  }

  // Now, the implementation of this function is based on assumption
  // that shape [0] = batch_size.
H
HexToString 已提交
652
  size_t fetchvar_element_num(size_t fetchvar_index) {
653 654 655
    if (!check_fetchvar_valid(fetchvar_index)) {
      return 0;
    }
H
HexToString 已提交
656
    size_t element_num = 1;
657 658 659 660 661
    if (_batch_out[fetchvar_index].shape.size() == 1) {
      // cause shape[0] is batch_size.
      return 1;
    }
    // start from shape[1], cause shape[0] = batch_size.
H
HexToString 已提交
662
    for (size_t i = 1; i < _batch_out[fetchvar_index].shape.size(); ++i) {
663 664 665 666 667
      element_num *= _batch_out[fetchvar_index].shape[i];
    }
    return element_num;
  }

H
HexToString 已提交
668
  size_t fetchvar_bytesize(size_t fetchvar_index) {
669 670 671 672
    return fetchvar_element_num(fetchvar_index) *
           fetchvar_element_bytesize(fetchvar_index);
  }

H
HexToString 已提交
673 674 675
  size_t fetchvar_batch_size(size_t fetchvar_index) {
    if (!check_fetchvar_valid(fetchvar_index)) {
      return 0;
676
    }
H
HexToString 已提交
677 678 679 680 681 682 683 684 685 686
    // if lod, 'lod[0].size()-1' is batch.
    // for PaddleTensor lod is vector<vector<size_t>>, so lod[0] is real lod.
    // for example, lod = [0,3,4,6], shape = [6,340,340], batch is 3 actually.
    // for lod, the batch < shape[0].
    if (_batch_out[fetchvar_index].lod.size() > 0 &&
        _batch_out[fetchvar_index].lod[0].size() > 0) {
      return _batch_out[fetchvar_index].lod[0].size() - 1;
    }
    // if not lod, the first dimension of data `PaddleTensor.shape[0]` is batch.
    return _batch_out[fetchvar_index].shape[0];
687 688
  }

H
HexToString 已提交
689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719
  size_t fetchvar_batch_size() { return _total_fetch_batch; }

  bool deal_batch_out() {
    _total_fetch_batch = fetchvar_batch_size(0);
    if (_total_fetch_batch <= 0) return false;
    for (size_t fetchvar_index = 0; fetchvar_index < _batch_out.size();
         ++fetchvar_index) {
      // TODO(HexToString): Distinguish between nobatch and batch =
      // 1(By:HexToString)
      // 当数据中fetchvar-1: 带batch,且batch =1,shape[0] = 1
      // fetchvar-2:不带batch,由于不带batch导致shape[0] =1
      // 此时,无法分辨是否是天然nobatch,此时set_fetch_nobatch_index会漏掉
      // 后续希望在其他地方能够区分两者。
      if (fetchvar_batch_size(fetchvar_index) != _total_fetch_batch) {
        // which means error.
        if (fetchvar_batch_size(fetchvar_index) != 1 &&
            _total_fetch_batch != 1) {
          return false;
        } else {
          // which means fetchvar shape[0] = 1.
          // shape[0] does not change with batch
          set_fetch_nobatch_index.insert(fetchvar_index);
          _total_fetch_batch =
              std::max(fetchvar_batch_size(fetchvar_index), _total_fetch_batch);
        }
      }
      // 将lod fetchvar index加入到vector中。
      if (_batch_out[fetchvar_index].lod.size() > 0 &&
          _batch_out[fetchvar_index].lod[0].size() > 0) {
        vector_fetch_lod_index.push_back(fetchvar_index);
      }
720
    }
H
HexToString 已提交
721
    return true;
722 723
  }

W
wangguibao 已提交
724
  void notify_tasks() {
725 726 727 728
    if (_taskmeta_vector.size() <= 0) {
      LOG(ERROR) << "_taskmeta_vector.size() <=0, error.";
      return;
    }
H
HexToString 已提交
729 730 731 732 733
    // 根据_batch_out,求出输出的整体batch
    // 并将lod类型和nobatch类型的fetchvar的index记录到set中,方便后续查看。
    deal_batch_out();
    // 若输出的batch不是1,且不与输入batch对应,则错误
    if (_total_feed_batch != _total_fetch_batch && _total_fetch_batch != 1) {
734
      LOG(ERROR) << "_batch_out`s batch != _batch_in`s batch, error.";
W
wangguibao 已提交
735
      return;
W
wangguibao 已提交
736 737
    }

H
HexToString 已提交
738
    size_t fetchvar_num = _batch_out.size();
739 740 741 742 743 744 745 746
    if (_batch_out_offset.size() == 0) {
      _batch_out_offset.resize(fetchvar_num, 0);
    }

    for (size_t ti = 0; ti < _taskmeta_vector.size(); ++ti) {
      TaskT* task = _taskmeta_vector[ti].task;
      size_t begin = _taskmeta_vector[ti].begin;
      size_t end = _taskmeta_vector[ti].end;
W
wangguibao 已提交
747
      size_t add = end - begin;
H
HexToString 已提交
748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792
      size_t taskmeta_index = _taskmeta_vector[ti].taskmeta_index;
      // 对task中的outVectorT_ptr进行初始化
      // 如果是lod输出+多个taskmeta,此时对outLodTensorVector也需要初始化
      if (!task->task_fetch_init(*this)) {
        LOG(ERROR) << " task_fetch_init error.";
        return;
      }
      size_t fetch_lod_index = 0;

      for (size_t fetchvar_index = 0; fetchvar_index < fetchvar_num;
           ++fetchvar_index) {
        size_t fetchvar_bytesize_index = fetchvar_bytesize(fetchvar_index);

        if (set_fetch_nobatch_index.size() > 0 &&
            set_fetch_nobatch_index.find(fetchvar_index) !=
                set_fetch_nobatch_index.end()) {
          // nobatch fetchvar情况
          // 无论输入是多少batch,该index的fetchvar始终就shape[0] = 1
          paddle::PaddleTensor& fetchVarTensor =
              (*task->outVectorT_ptr)[fetchvar_index];
          void* dst_ptr = fetchVarTensor.data.data();
          size_t length = fetchvar_bytesize_index * 1;
          void* source_ptr = _batch_out[fetchvar_index].data.data();
          memcpy(dst_ptr, source_ptr, length);
        } else if (vector_fetch_lod_index.size() > 0 &&
                   std::find(vector_fetch_lod_index.begin(),
                             vector_fetch_lod_index.end(),
                             fetchvar_index) != vector_fetch_lod_index.end()) {
          // lod fetchvar情况,此时无法确定总的shape[0]
          // 根据task中的task_num总数开辟task_num个临时空间
          // 每个lod型的fetchvar拷贝到对应的临时空间中
          // 最后再计算临时空间的总量,合并fetchvar和lod
          size_t last_batch = _batch_out_offset[fetchvar_index];
          size_t shape0_index_start =
              _batch_out[fetchvar_index].lod[0][last_batch];
          size_t shape0_index_end =
              _batch_out[fetchvar_index].lod[0][last_batch + add];
          size_t shape0_length = shape0_index_end - shape0_index_start;
          // task被拆分为多个taskmeta时,不能直接拷入task->outVectorT_ptr
          // 此时,先拷入task->outLodTensorVector[taskmeta_index]
          // 当task所有的taskmeta都完成时,再按照顺序进行拷贝回task->outVectorT_ptr。
          if (task->taskmeta_num > 1) {
            paddle::PaddleTensor& fetchVarTensor =
                task->outLodTensorVector[taskmeta_index][fetch_lod_index];
            size_t length = fetchvar_bytesize_index * shape0_length;
H
HexToString 已提交
793
            fetchVarTensor.shape[0] = shape0_length;
H
HexToString 已提交
794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818
            fetchVarTensor.data.Resize(length);
            void* dst_ptr = fetchVarTensor.data.data();
            void* source_ptr = _batch_out[fetchvar_index].data.data() +
                               shape0_index_start * fetchvar_bytesize_index;
            memcpy(dst_ptr, source_ptr, length);
            // 由于是拆分的各个lod,不要补0,在最后合并给Task中的outVectorT_ptr时再补。
            if (fetchVarTensor.lod.size() <= 0) {
              fetchVarTensor.lod.push_back({});
            }
            fetchVarTensor.lod[0].resize(add, 0);
            size_t last_lod_value =
                _batch_out[fetchvar_index].lod[0][last_batch];
            for (size_t lod_index = last_batch + 1, my_index = 0;
                 lod_index < last_batch + add + 1;
                 ++lod_index, ++my_index) {
              fetchVarTensor.lod[0][my_index] =
                  (_batch_out[fetchvar_index].lod[0][lod_index] -
                   last_lod_value);
            }
          } else {
            // task未被拆分为多个taskmeta,故只有某个线程中的taskmeta会操作task不存在多线程竞争
            // 此时resize后,直接写入task->outVectorT_ptr中即可。
            paddle::PaddleTensor& fetchVarTensor =
                (*task->outVectorT_ptr)[fetchvar_index];
            size_t length = fetchvar_bytesize_index * shape0_length;
H
HexToString 已提交
819
            fetchVarTensor.shape[0] = shape0_length;
H
HexToString 已提交
820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863
            fetchVarTensor.data.Resize(length);
            void* dst_ptr = fetchVarTensor.data.data();
            void* source_ptr = _batch_out[fetchvar_index].data.data() +
                               shape0_index_start * fetchvar_bytesize_index;
            memcpy(dst_ptr, source_ptr, length);

            // task中的lod补0
            if (fetchVarTensor.lod.size() <= 0) {
              fetchVarTensor.lod.push_back({0});
            } else if (fetchVarTensor.lod[0].size() <= 0) {
              fetchVarTensor.lod[0].push_back(0);
            }
            // 将合并的lod信息对应的batch,拆分到task中。
            // 注意,此时需要去掉前面lod导致的前置积累。
            // 例如: 合lod = [0,2,5;7,10],是由两组batch=2的task合并后预测的。
            // 此时拆分,第一组时,都减去0,得到[2,5]+(由于前面已经补了0了) =
            // [0,2,5]
            // 第二组,都需要减5,得到[2,5],这样处理才对。
            fetchVarTensor.lod[0].resize(add + 1, 0);
            size_t last_lod_value =
                _batch_out[fetchvar_index].lod[0][last_batch];
            for (size_t lod_index = last_batch + 1, my_index = 1;
                 lod_index < last_batch + add + 1;
                 ++lod_index, ++my_index) {
              fetchVarTensor.lod[0][my_index] =
                  (_batch_out[fetchvar_index].lod[0][lod_index] -
                   last_lod_value);
            }
          }
          fetch_lod_index++;
        } else {
          // 普通fetchvar情况,此时该Task总的fetchvar_batch =
          // 输入的总的batch_size()
          // 输出的batch应与输入的batch对应相等。
          paddle::PaddleTensor& fetchVarTensor =
              (*task->outVectorT_ptr)[fetchvar_index];
          void* dst_ptr =
              fetchVarTensor.data.data() + fetchvar_bytesize_index * begin;
          size_t length = fetchvar_bytesize_index * add;
          void* source_ptr =
              _batch_out[fetchvar_index].data.data() +
              _batch_out_offset[fetchvar_index] * fetchvar_bytesize_index;

          memcpy(dst_ptr, source_ptr, length);
W
wangguibao 已提交
864
        }
H
HexToString 已提交
865
        _batch_out_offset[fetchvar_index] += add;
W
wangguibao 已提交
866
      }
W
wangguibao 已提交
867

H
HexToString 已提交
868 869 870
      // index是局部变量,fetch_add是原子操作,成功则返回原值。
      // 只有最后一个taskmeta都完成后,该线程的index+add才能>task->batch_size()
      // 故只有一个线程能进入if{}内.不会造成多线程竞争的问题。
W
wangguibao 已提交
871
      size_t index = task->index.fetch_add(add);
872
      if ((index + add) >= task->batch_size()) {
H
HexToString 已提交
873
        task->combine_taskmeta();
W
wangguibao 已提交
874 875
        char c = 0;
        while (write(task->write_fd, &c, 1) != 1 && errno == EINTR) {
W
wangguibao 已提交
876
        }
W
wangguibao 已提交
877 878
        butil::return_object(task);
      }
W
wangguibao 已提交
879
    }
W
wangguibao 已提交
880
  }
W
wangguibao 已提交
881

882
  const typename TaskT::InVectorT& in() const { return _batch_in; }
W
wangguibao 已提交
883

884
  typename TaskT::OutVectorT& out() { return _batch_out; }
W
wangguibao 已提交
885

886
  size_t task_size() { return _taskmeta_vector.size(); }
W
wangguibao 已提交
887

H
HexToString 已提交
888 889
  const size_t get_rem_size() { return _rem_size; }

H
HexToString 已提交
890
  bool get_overrun() { return _overrun; }
H
HexToString 已提交
891 892 893

  bool get_allow_split_request() { return _allow_split_request; }

W
wangguibao 已提交
894
 private:
895 896
  std::vector<TaskMetaT> _taskmeta_vector;
  typename TaskT::InVectorT _batch_in;
H
HexToString 已提交
897
  std::vector<size_t> _batch_in_offset;
H
HexToString 已提交
898 899 900 901
  std::vector<size_t> _total_shape0_batch_in;
  size_t _total_feed_batch;
  std::vector<PaddleTensorLod> _batch_in_lod;

902
  typename TaskT::OutVectorT _batch_out;
H
HexToString 已提交
903
  std::vector<size_t> _batch_out_offset;
H
HexToString 已提交
904 905 906 907 908 909
  // std::vector<size_t> _total_shape0_batch_out;
  size_t _total_fetch_batch;
  // std::vector<PaddleTensorLod>  _batch_out_lod;
  std::set<size_t> set_fetch_nobatch_index;
  std::vector<size_t> vector_fetch_lod_index;

W
wangguibao 已提交
910 911
  size_t _rem_size;
  size_t _batch_size;
H
HexToString 已提交
912
  bool _overrun;
H
HexToString 已提交
913
  bool _allow_split_request;
W
wangguibao 已提交
914 915
};

W
wangguibao 已提交
916
// BSF task handle
H
HexToString 已提交
917 918 919 920 921 922 923
// TaskHandler is the handle of Task.
// `read_fd` is used for receive signal in brpc Thread.
// 'write_fd' is used for write signal in bsf Thread.
// when TaskMeta is done, bsf Thread will write to 'write_fd'.
// brpc Thread is keeping reading 'read_fd' in a while loop.
// brpc Thread will receive signal when TaskMeta is done.
// so `read_fd` and 'write_fd' is used for communicate in different Thread.
W
wangguibao 已提交
924
template <typename TaskT>
W
wangguibao 已提交
925
struct TaskHandler {
W
wangguibao 已提交
926 927
  int read_fd;
  int write_fd;
W
wangguibao 已提交
928

W
wangguibao 已提交
929 930 931
  TaskHandler() : read_fd(-1), write_fd(-1) {
    // do nothing
  }
W
wangguibao 已提交
932

W
wangguibao 已提交
933 934 935 936
  explicit TaskHandler(TaskT const& task)
      : read_fd(task.read_fd), write_fd(task.write_fd) {
    // do nothing
  }
W
wangguibao 已提交
937

W
wangguibao 已提交
938
  inline bool valid() const { return read_fd >= 0 && write_fd >= 0; }
W
wangguibao 已提交
939

W
wangguibao 已提交
940 941 942 943
  static TaskHandler<TaskT>& valid_handle() {
    static TaskHandler<TaskT> vhandle;
    return vhandle;
  }
W
wangguibao 已提交
944 945
};

H
HexToString 已提交
946
// TaskExecutor is a Thread pool.
W
wangguibao 已提交
947
template <typename TaskT>
W
wangguibao 已提交
948 949
class TaskExecutor;

H
HexToString 已提交
950
// ThreadContext is used for start a bsf Thread.
W
wangguibao 已提交
951
template <typename TaskT>
W
wangguibao 已提交
952
struct ThreadContext {
W
wangguibao 已提交
953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968
  TaskExecutor<TaskT>* executor;
  void* user_thread_context;
  THREAD_T tid;
  int init_status;

  ThreadContext()
      : executor(NULL), user_thread_context(NULL), tid(-1), init_status(0) {
    // do nothing
  }

  ~ThreadContext() {
    tid = -1;
    executor = NULL;
    user_thread_context = NULL;
    init_status = 0;
  }
W
wangguibao 已提交
969 970
};

H
HexToString 已提交
971 972 973 974 975 976 977 978 979
// TaskExecutor is a Thread pool.
// Each Model corresponding to a Model.
// TaskT is actually a Request preprocessed by ReaderOp.
// TaskT will be divided as TaskMeta which will be
// put into _task_queue in brpc-Thread by schedule().
// TaskHander will be returned to brpc-Thread.
// start() function will create `thread_num` bsf Threads.
// every bsf Thread check the _task_queue and take TaskMeta from it.
// when a Task`s all TaskMeta is done, TaskHander will be noticed.
W
wangguibao 已提交
980
template <typename TaskT>
W
wangguibao 已提交
981
class TaskExecutor {
W
wangguibao 已提交
982 983 984
 public:
  typedef typename TaskT::InType InType;
  typedef typename TaskT::OutType OutType;
985 986
  typedef typename TaskT::InVectorT InVectorT;
  typedef typename TaskT::OutVectorT OutVectorT;
W
wangguibao 已提交
987
  typedef std::vector<TaskT> TaskArrayT;
988
  typedef baidu::paddle_serving::predictor::MempoolWrapper MempoolWrapper;
H
HexToString 已提交
989 990
  typedef std::vector<size_t> ShapeVector;
  typedef std::vector<ShapeVector> VectorOfShapeVector;
W
wangguibao 已提交
991

W
wangguibao 已提交
992 993 994 995 996 997
  TaskExecutor()
      : _stop(false),
        _thread_init_fn(NULL),
        _thread_reset_fn(NULL),
        _user_thread_contexts(NULL),
        _batch_size(DEFAULT_BATCH_SIZE),
H
HexToString 已提交
998
        _overrun(false),
W
wangguibao 已提交
999 1000 1001 1002 1003
        _fn(NULL) {
    THREAD_MUTEX_INIT(&_mut, NULL);
    THREAD_COND_INIT(&_cond, NULL);
    _task_queue.clear();
  }
W
wangguibao 已提交
1004

W
wangguibao 已提交
1005 1006 1007 1008
  ~TaskExecutor() {
    THREAD_MUTEX_DESTROY(&_mut);
    THREAD_COND_DESTROY(&_cond);
  }
W
wangguibao 已提交
1009

H
HexToString 已提交
1010 1011 1012 1013 1014
  // cause vector.resize will use copy or move construct.
  TaskExecutor(TaskExecutor<TaskT>&& other) noexcept {
    if (this != &other) {
      TaskExecutor();
    }
W
wangguibao 已提交
1015
  }
W
wangguibao 已提交
1016

W
wangguibao 已提交
1017
  void set_batch_size(size_t batch_size) { _batch_size = batch_size; }
W
wangguibao 已提交
1018

H
HexToString 已提交
1019
  void set_overrun(bool overrun) { _overrun = overrun; }
H
HexToString 已提交
1020 1021 1022 1023

  void set_allow_split_request(bool allow_split_request) {
    _allow_split_request = allow_split_request;
  }
W
wangguibao 已提交
1024

W
wangguibao 已提交
1025 1026 1027 1028 1029
  void set_thread_init_fn(boost::function<int(void*)> init_fn,
                          void** contexts = NULL) {
    _thread_init_fn = init_fn;
    _user_thread_contexts = contexts;
  }
W
wangguibao 已提交
1030

W
wangguibao 已提交
1031 1032 1033 1034
  void set_thread_reset_fn(boost::function<int(void*)> reset_fn) {
    _thread_reset_fn = reset_fn;
  }

1035
  void set_thread_callback_fn(boost::function<void(const void*, void*)> cb) {
W
wangguibao 已提交
1036 1037
    _fn = cb;
  }
W
wangguibao 已提交
1038

W
wangguibao 已提交
1039 1040
  int start(uint32_t thread_num, uint32_t init_timeout_sec = 0);
  void stop();
W
wangguibao 已提交
1041

W
wangguibao 已提交
1042
  static void* thread_entry(void* args);
W
wangguibao 已提交
1043

W
wangguibao 已提交
1044
  int work(ThreadContext<TaskT>* context);
W
wangguibao 已提交
1045

1046
  TaskHandler<TaskT> schedule(const void*, void*);
W
wangguibao 已提交
1047

H
HexToString 已提交
1048
  bool move_task_to_batch(BatchTasks<TaskT>& batchTask);  // NOLINT
W
wangguibao 已提交
1049

H
HexToString 已提交
1050 1051 1052 1053 1054 1055 1056 1057 1058 1059
 private:
  TaskExecutor(TaskExecutor<TaskT> const& other) = delete;

  TaskExecutor& operator=(TaskExecutor<TaskT> const& other) = delete;
  /*
  TaskExecutor(TaskExecutor<TaskT> && other) = delete;

  TaskExecutor& operator=(TaskExecutor<TaskT> && other) = delete;
  */

W
wangguibao 已提交
1060
  bool _stop;
W
wangguibao 已提交
1061

W
wangguibao 已提交
1062 1063 1064
  // can't use boost::mutex, because some stupid macro
  THREAD_MUTEX_T _mut;
  THREAD_COND_T _cond;
W
wangguibao 已提交
1065

H
HexToString 已提交
1066
  std::list<TaskT*> _task_queue;
W
wangguibao 已提交
1067

W
wangguibao 已提交
1068 1069 1070
  boost::function<int(void*)> _thread_init_fn;
  boost::function<int(void*)> _thread_reset_fn;
  void** _user_thread_contexts;
W
wangguibao 已提交
1071

W
wangguibao 已提交
1072
  std::vector<ThreadContext<TaskT>*> _thread_contexts;
W
wangguibao 已提交
1073

W
wangguibao 已提交
1074
  size_t _batch_size;
H
HexToString 已提交
1075
  bool _overrun;
H
HexToString 已提交
1076
  bool _allow_split_request;
W
wangguibao 已提交
1077

1078
  boost::function<void(const void*, void*)> _fn;
W
wangguibao 已提交
1079 1080
};

H
HexToString 已提交
1081 1082 1083
// TaskExecutorVector is a SingleTon class.
// Each Model corresponding to a TaskExecutor.
// So we need several TaskExecutor when there are more than 1 Model.
H
HexToString 已提交
1084 1085 1086 1087 1088 1089 1090 1091 1092 1093
template <typename TaskT>
class TaskExecutorVector {
 public:
  static TaskExecutorVector<TaskT>& instance() {
    static TaskExecutorVector<TaskT> singleton;
    return singleton;
  }

  void resize(int size) { _vector_executor.resize(size); }

H
HexToString 已提交
1094 1095 1096 1097
  TaskExecutor<TaskT>& operator[](int task_index) {
    if (_vector_executor.size() <= task_index || task_index <= -1) {
      LOG(ERROR) << "_vector_executor.size() <= task_index or <= -1";
      throw "_vector_executor.size() <= task_index or <= -1";
H
HexToString 已提交
1098
    }
H
HexToString 已提交
1099
    return _vector_executor[task_index];
H
HexToString 已提交
1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111
  }

 private:
  TaskExecutorVector() = default;
  TaskExecutorVector(const TaskExecutorVector<TaskT>& other) = delete;
  TaskExecutorVector& operator=(const TaskExecutorVector<TaskT>& other) =
      delete;
  TaskExecutorVector(TaskExecutorVector<TaskT>&& other) = delete;
  TaskExecutorVector& operator=(TaskExecutorVector<TaskT>&& other) = delete;
  std::vector<TaskExecutor<TaskT>> _vector_executor;
};

H
HexToString 已提交
1112 1113 1114 1115 1116
// TaskManager is actually a wrapper of Request in bsf.
// TaskManager`s schedule() change Request to be TaskT.
// and divided TaskT into several TaskMeta to put into the TaskExecutor`s
// task_queue.
// wait() is a while loop to receive signal when a whole Task is done.
W
wangguibao 已提交
1117
template <typename InItemT, typename OutItemT>
W
wangguibao 已提交
1118
class TaskManager {
W
wangguibao 已提交
1119 1120
 public:
  typedef Task<InItemT, OutItemT> TaskT;
1121 1122
  typedef typename TaskT::InVectorT InVectorT;
  typedef typename TaskT::OutVectorT OutVectorT;
W
wangguibao 已提交
1123

H
HexToString 已提交
1124 1125
  explicit TaskManager(uint32_t model_index)  // NOLINT
      : _model_index(model_index) {}
W
wangguibao 已提交
1126

W
wangguibao 已提交
1127
  ~TaskManager() { wait(); }
W
wangguibao 已提交
1128

1129
  bool schedule(const void* in, void* out);  // NOLINT
W
wangguibao 已提交
1130
  void wait();
W
wangguibao 已提交
1131

W
wangguibao 已提交
1132
  inline void clear() { wait(); }
W
wangguibao 已提交
1133

W
wangguibao 已提交
1134 1135
 private:
  TaskHandler<TaskT> _task_owned;
H
HexToString 已提交
1136
  uint32_t _model_index;
W
wangguibao 已提交
1137
};  // class TaskManager
W
wangguibao 已提交
1138 1139

class AutoMutex {
W
wangguibao 已提交
1140 1141 1142 1143
 public:
  explicit AutoMutex(THREAD_MUTEX_T& mut) : _mut(mut) {
    THREAD_MUTEX_LOCK(&_mut);
  }
W
wangguibao 已提交
1144

W
wangguibao 已提交
1145
  ~AutoMutex() { THREAD_MUTEX_UNLOCK(&_mut); }
W
wangguibao 已提交
1146

W
wangguibao 已提交
1147 1148
 private:
  THREAD_MUTEX_T& _mut;
W
wangguibao 已提交
1149 1150
};

W
wangguibao 已提交
1151 1152
}  // namespace bsf
}  // namespace im
W
wangguibao 已提交
1153

1154
// #include "core/predictor/framework/bsf-inl-tensor.h"
G
guru4elephant 已提交
1155
#include "core/predictor/framework/bsf-inl.h"