zero_copy_tensor.cc 7.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "paddle/fluid/framework/lod_tensor.h"
#include "paddle/fluid/framework/scope.h"
#include "paddle/fluid/inference/api/paddle_inference_api.h"
N
nhzlx 已提交
18
#include "paddle/fluid/memory/memcpy.h"
19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36
#include "paddle/fluid/platform/enforce.h"

namespace paddle {

void ZeroCopyTensor::Reshape(const std::vector<int> &shape) {
  PADDLE_ENFORCE(!name_.empty(),
                 "Need to SetName first, so that the corresponding tensor can "
                 "be retrieved.");
  PADDLE_ENFORCE(input_or_output_,
                 "Can't reshape the output tensor, it is readonly");
  PADDLE_ENFORCE(scope_);
  auto *scope = static_cast<framework::Scope *>(scope_);
  auto *var = scope->FindVar(name_);
  PADDLE_ENFORCE(var, "No tensor called [%s] in the runtime scope", name_);
  auto *tensor = var->GetMutable<framework::LoDTensor>();
  tensor->Resize(framework::make_ddim(shape));
}

37 38 39 40 41 42
#define EAGER_GET_TENSOR    \
  if (!tensor_) {           \
    tensor_ = FindTensor(); \
  }                         \
  auto *tensor = static_cast<framework::LoDTensor *>(tensor_);

43 44
template <typename T>
T *ZeroCopyTensor::mutable_data(PaddlePlace place) {
45
  EAGER_GET_TENSOR;
46 47 48 49
  PADDLE_ENFORCE_GT(
      tensor->numel(), 0,
      "You should call ZeroCopyTensor::Reshape(const std::vector<int> &shape)"
      "function before retrieving mutable_data from input tensor.");
50 51 52 53 54
  switch (static_cast<int>(place)) {
    case static_cast<int>(PaddlePlace::kCPU): {
      return tensor->mutable_data<T>(platform::CPUPlace());
    }
    case static_cast<int>(PaddlePlace::kGPU): {
55
      return tensor->mutable_data<T>(platform::CUDAPlace(device_));
56 57 58 59 60 61 62 63 64
    }
    default:
      PADDLE_THROW("Unsupported place: %d", static_cast<int>(place));
      break;
  }
  return nullptr;
}

template <typename T>
65
T *ZeroCopyTensor::data(PaddlePlace *place, int *size) const {
66
  EAGER_GET_TENSOR;
67 68 69 70 71 72 73 74 75 76 77 78 79 80
  auto *res = tensor->data<T>();

  if (platform::is_cpu_place(tensor->place())) {
    *place = PaddlePlace::kCPU;
  } else if (platform::is_gpu_place(tensor->place())) {
    *place = PaddlePlace::kGPU;
  } else {
    *place = PaddlePlace::kUNK;
  }

  *size = tensor->numel();
  return res;
}

N
nhzlx 已提交
81
PaddleDType ZeroCopyTensor::type() const {
82 83 84 85 86 87
  EAGER_GET_TENSOR;
  auto type = tensor->type();
  if (type == framework::proto::VarType::FP32) {
    return PaddleDType::FLOAT32;
  } else if (type == framework::proto::VarType::INT64) {
    return PaddleDType::INT64;
N
nhzlx 已提交
88 89
  } else if (type == framework::proto::VarType::INT32) {
    return PaddleDType::INT32;
90 91
  } else if (type == framework::proto::VarType::UINT8) {
    return PaddleDType::UINT8;
92 93 94 95
  }
  return PaddleDType::FLOAT32;
}

N
nhzlx 已提交
96 97 98 99 100 101
template <typename T>
void ZeroCopyTensor::copy_from_cpu(const T *data) {
  EAGER_GET_TENSOR;
  PADDLE_ENFORCE_GE(
      tensor->numel(), 0,
      "You should call ZeroCopyTensor::Reshape(const std::vector<int> &shape)"
102
      "function before copying data from cpu.");
N
nhzlx 已提交
103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118
  size_t ele_size = tensor->numel() * sizeof(T);

  if (place_ == PaddlePlace::kCPU) {
    auto *t_data = tensor->mutable_data<T>(platform::CPUPlace());
    std::memcpy(static_cast<void *>(t_data), data, ele_size);
  } else {
#ifdef PADDLE_WITH_CUDA
    platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance();
    platform::CUDAPlace gpu_place(device_);
    auto *t_data = tensor->mutable_data<T>(gpu_place);
    auto *dev_ctx =
        static_cast<const platform::CUDADeviceContext *>(pool.Get(gpu_place));

    memory::Copy(gpu_place, static_cast<void *>(t_data), platform::CPUPlace(),
                 data, ele_size, dev_ctx->stream());
#else
119
    PADDLE_THROW("Not compiled with CUDA, should not reach here.");
N
nhzlx 已提交
120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140
#endif
  }
}

template <typename T>
void ZeroCopyTensor::copy_to_cpu(T *data) {
  EAGER_GET_TENSOR;
  auto ele_num = tensor->numel();
  auto *t_data = tensor->data<T>();
  auto t_place = tensor->place();

  if (platform::is_cpu_place(t_place)) {
    std::memcpy(static_cast<void *>(data), t_data, ele_num * sizeof(T));
  } else {
#ifdef PADDLE_WITH_CUDA
    platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance();
    auto gpu_place = boost::get<platform::CUDAPlace>(t_place);
    auto *dev_ctx =
        static_cast<const platform::CUDADeviceContext *>(pool.Get(gpu_place));
    memory::Copy(platform::CPUPlace(), static_cast<void *>(data), gpu_place,
                 t_data, ele_num * sizeof(T), dev_ctx->stream());
141 142

    cudaStreamSynchronize(dev_ctx->stream());
N
nhzlx 已提交
143 144 145 146 147 148 149
#else
    PADDLE_THROW("Not compile with CUDA, should not reach here.");
#endif
  }
}
template void ZeroCopyTensor::copy_from_cpu<float>(const float *data);
template void ZeroCopyTensor::copy_from_cpu<int64_t>(const int64_t *data);
L
luotao1 已提交
150
template void ZeroCopyTensor::copy_from_cpu<int32_t>(const int32_t *data);
151
template void ZeroCopyTensor::copy_from_cpu<uint8_t>(const uint8_t *data);
N
nhzlx 已提交
152 153
template void ZeroCopyTensor::copy_to_cpu<float>(float *data);
template void ZeroCopyTensor::copy_to_cpu<int64_t>(int64_t *data);
L
luotao1 已提交
154
template void ZeroCopyTensor::copy_to_cpu<int32_t>(int32_t *data);
155
template void ZeroCopyTensor::copy_to_cpu<uint8_t>(uint8_t *data);
N
nhzlx 已提交
156

157 158 159 160
template float *ZeroCopyTensor::data<float>(PaddlePlace *place,
                                            int *size) const;
template int64_t *ZeroCopyTensor::data<int64_t>(PaddlePlace *place,
                                                int *size) const;
L
luotao1 已提交
161 162
template int32_t *ZeroCopyTensor::data<int32_t>(PaddlePlace *place,
                                                int *size) const;
163 164
template uint8_t *ZeroCopyTensor::data<uint8_t>(PaddlePlace *place,
                                                int *size) const;
165 166
template float *ZeroCopyTensor::mutable_data<float>(PaddlePlace place);
template int64_t *ZeroCopyTensor::mutable_data<int64_t>(PaddlePlace place);
L
luotao1 已提交
167
template int32_t *ZeroCopyTensor::mutable_data<int32_t>(PaddlePlace place);
168
template uint8_t *ZeroCopyTensor::mutable_data<uint8_t>(PaddlePlace place);
169 170 171 172 173 174 175 176 177 178 179 180 181

void *ZeroCopyTensor::FindTensor() const {
  PADDLE_ENFORCE(!name_.empty(),
                 "Need to SetName first, so that the corresponding tensor can "
                 "be retrieved.");
  PADDLE_ENFORCE(scope_);
  auto *scope = static_cast<framework::Scope *>(scope_);
  auto *var = scope->FindVar(name_);
  PADDLE_ENFORCE(var, "No tensor called [%s] in the runtime scope", name_);
  auto *tensor = var->GetMutable<framework::LoDTensor>();
  return tensor;
}

N
nhzlx 已提交
182
std::vector<int> ZeroCopyTensor::shape() const {
183 184
  EAGER_GET_TENSOR;
  PADDLE_ENFORCE(tensor_, "not found tensor called %s in the scope", name_);
185
  return framework::vectorize<int>(tensor->dims());
186 187 188
}

void ZeroCopyTensor::SetLoD(const std::vector<std::vector<size_t>> &x) {
189
  EAGER_GET_TENSOR;
190 191 192 193 194 195 196 197
  framework::LoD lod;
  for (auto &level : x) {
    lod.emplace_back(level);
  }
  tensor->set_lod(lod);
}

std::vector<std::vector<size_t>> ZeroCopyTensor::lod() const {
198
  EAGER_GET_TENSOR;
199 200 201 202 203 204 205 206
  std::vector<std::vector<size_t>> res;
  for (auto &level : tensor->lod()) {
    res.emplace_back(level);
  }
  return res;
}

}  // namespace paddle