workspace.h 2.5 KB
Newer Older
Y
Yan Chunwei 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#pragma once
#include <memory>
#include "lite/core/memory.h"
#include "lite/core/types.h"
#include "lite/utils/macros.h"

namespace paddle {
namespace lite {

/*
 * WorkSpace is a container that help to manage the temporary memory that are
 * shared across kernels during the serial execution.
 *
 * Due to the mobile library size limit, a complex allocator or GC algorithm is
 * not suitable here, one need to carefully manage the workspace inside a single
 * kernel.
 *
 * NOTE
 *
 * For kernel developers, one need to call the workspace as follows:
 *
 * - call `WorkSpace::Global().Alloc()` if needed to allocate some temporary
 * buffer.
 */
class WorkSpace {
 public:
  // Reset the workspace, and treat the workspace as empty.
  void AllocReset() { cursor_ = 0; }

  // Allocate a memory buffer.
  core::byte_t* Alloc(size_t size) {
    buffer_.ResetLazy(target_, cursor_ + size);
    auto* data = static_cast<core::byte_t*>(buffer_.data()) + cursor_;
    cursor_ += size;
    return data;
  }

  static WorkSpace& Global_Host() {
53 54
    static LITE_THREAD_LOCAL std::unique_ptr<WorkSpace> x(
        new WorkSpace(TARGET(kHost)));
Y
Yan Chunwei 已提交
55 56 57 58 59 60 61 62 63 64 65 66 67
    return *x;
  }

#if defined(LITE_WITH_X86)
  static WorkSpace& Global_X86() { return Global_Host(); }
#endif

#if defined(LITE_WITH_ARM)
  static WorkSpace& Global_ARM() { return Global_Host(); }
#endif

#if defined(LITE_WITH_CUDA)
  static WorkSpace& Global_CUDA() {
68 69
    static LITE_THREAD_LOCAL std::unique_ptr<WorkSpace> x(
        new WorkSpace(TARGET(kCUDA)));
Y
Yan Chunwei 已提交
70 71 72 73
    return *x;
  }
#endif

74 75
#if defined(LITE_WITH_MLU)
  static WorkSpace& Global_MLU() {
76 77
    static LITE_THREAD_LOCAL std::unique_ptr<WorkSpace> x(
        new WorkSpace(TARGET(kMLU)));
78 79 80 81
    return *x;
  }
#endif

Y
Yan Chunwei 已提交
82 83 84 85 86 87 88 89 90 91 92 93
 private:
  explicit WorkSpace(TargetType x) : target_(x) {}

  TargetType target_;
  Buffer buffer_;
  size_t cursor_;

  DISALLOW_COPY_AND_ASSIGN(WorkSpace);
};

}  // namespace lite
}  // namespace paddle