lod_tensor.cc 8.3 KB
Newer Older
1 2
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.

L
Luo Tao 已提交
3 4 5
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
6

L
Luo Tao 已提交
7
    http://www.apache.org/licenses/LICENSE-2.0
8

L
Luo Tao 已提交
9 10 11 12 13
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
14 15

#include "paddle/framework/lod_tensor.h"
武毅 已提交
16 17
#include "paddle/framework/data_type.h"
#include "paddle/framework/framework.pb.h"
18 19 20 21 22 23 24 25

#include "paddle/memory/memcpy.h"
#include "paddle/memory/memory.h"

#include <stdint.h>
#include <string.h>
#include <algorithm>
#include <iterator>
26 27 28 29 30 31

#include <glog/logging.h>

namespace paddle {
namespace framework {

武毅 已提交
32
std::ostream &operator<<(std::ostream &os, const LoD &lod) {
33
  os << "{";
武毅 已提交
34
  for (auto &v : lod) {
35
    os << "{";
武毅 已提交
36
    for (auto &i : v) {
37 38 39 40 41 42 43 44 45
      os << i << ",";
    }
    os << "}";
  }
  os << "}";

  return os;
}

Y
Yang Yang 已提交
46 47 48
std::ostream &operator<<(std::ostream &os, const LoDTensor &t) {
  PADDLE_ENFORCE(t.type().hash_code() == typeid(float).hash_code());

49 50 51 52 53 54 55 56 57 58 59
  if (!platform::is_cpu_place(t.place())) {
    LoDTensor tt;
    framework::Copy(t, platform::CPUPlace(), &tt);
    platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance();
    auto &dev_ctx = *pool.Get(t.place());
    dev_ctx.Wait();

    os << tt;
    return os;
  }

Y
Yang Yang 已提交
60 61 62 63 64 65 66 67 68 69 70 71
  os << "dim: " << t.dims() << "\n";
  os << "lod: " << t.lod() << "\n";

  // only print first ten elements
  int64_t size = t.numel() < 10 ? t.numel() : 10;
  for (int64_t i = 0; i < size; ++i) {
    os << t.data<float>()[i] << " ";
  }

  return os;
}

Q
Qiao Longfei 已提交
72 73 74 75 76 77
std::string LoDToString(const LoD &lod) {
  std::ostringstream stream;
  stream << lod;
  return stream.str();
}

武毅 已提交
78
LoD SliceInLevel(const LoD &in, size_t level, size_t elem_begin,
Q
qijun 已提交
79
                 size_t elem_end) {
80 81 82 83 84 85 86 87 88
  PADDLE_ENFORCE_LT(level, in.size());
  PADDLE_ENFORCE_LT(elem_end, in[level].size());

  LoD res;
  res.resize(in.size() - level);
  // copy the first level
  res[0].assign(in[level].begin() + elem_begin,
                in[level].begin() + elem_end + 1);
  for (size_t lvl = 1; lvl < res.size(); lvl++) {
武毅 已提交
89 90 91
    const auto &in_level = in[level + lvl];
    const auto &above_level = res[lvl - 1];
    auto &out_level = res[lvl];
92 93
    out_level.assign(in_level.begin() + above_level.front(),
                     in_level.begin() + above_level.back() + 1);
94
  }
95 96 97 98
  for (size_t lvl = 0; lvl < res.size(); lvl++) {
    // to make the first offset equals 0, all the elements minus the first
    // element
    size_t front = res[lvl].front();
武毅 已提交
99
    for (auto &ele : res[lvl]) {
100 101 102 103 104 105
      ele -= front;
    }
  }
  return res;
}

武毅 已提交
106
LoD ToAbsOffset(const LoD &in) {
107 108 109 110
  // the lowest level stores relative offsets
  if (in.empty() || in.size() == 1) return in;
  LoD result = in;
  for (int level = result.size() - 2; level >= 0; level--) {
武毅 已提交
111
    for (auto &ele : result[level]) {
112 113 114 115
      ele = result[level + 1][ele];
    }
  }
  return result;
116 117
}

武毅 已提交
118
bool operator==(const LoD &a, const LoD &b) {
119 120 121 122 123
  if (a.size() != b.size()) {
    return false;
  }

  for (size_t i = 0; i < a.size(); i++) {
武毅 已提交
124 125
    const auto &a_level = a[i];
    const auto &b_level = b[i];
126 127 128 129 130 131 132 133 134 135
    if (a_level.size() != b_level.size()) {
      return false;
    }
    for (size_t j = 0; j < a_level.size(); j++) {
      if (a_level[j] != b_level[j]) {
        return false;
      }
    }
  }
  return true;
136 137
}

138
using LoDAndOffset = std::pair<LoD, std::pair<size_t, size_t>>;
武毅 已提交
139
LoDAndOffset GetSubLoDAndAbsoluteOffset(const LoD &lod, size_t start_idx,
140 141 142 143 144 145
                                        size_t end_idx, size_t start_level) {
  LoD sub_lod;

  for (size_t level_idx = start_level; level_idx < lod.size(); ++level_idx) {
    PADDLE_ENFORCE_LE(start_idx, end_idx);
    PADDLE_ENFORCE_LT(end_idx, lod[level_idx].size());
146 147 148 149
    std::vector<size_t> level_lens;
    for (size_t i = start_idx; i < end_idx; ++i) {
      level_lens.push_back(lod[level_idx][i + 1] - lod[level_idx][i]);
    }
150
    sub_lod.emplace_back(level_lens);
151 152 153
    start_idx = lod[level_idx][start_idx];
    end_idx = lod[level_idx][end_idx];
  }
154 155

  return LoDAndOffset{sub_lod, {start_idx, end_idx}};
156 157
}

武毅 已提交
158
void AppendLoD(LoD *lod, const LoD &lod_length) {
159 160
  PADDLE_ENFORCE(
      lod->empty() || lod->size() == lod_length.size(),
161
      "The lod_length should has the same size with the appended lod.");
162
  if (lod->empty()) {
Y
Yang Yu 已提交
163 164 165
    for (size_t i = 0; i < lod_length.size(); ++i) {
      lod->emplace_back(1, 0);  // size = 1, value = 0;
    }
166 167
    *lod = LoD(lod_length.size(), std::vector<size_t>({0}));
  }
168
  for (size_t i = 0; i < lod->size(); ++i) {
武毅 已提交
169
    auto &level = (*lod)[i];
170 171 172 173 174 175
    for (size_t len : lod_length[i]) {
      level.push_back(level.back() + len);
    }
  }
}

武毅 已提交
176 177
void SerializeToStream(std::ostream &os, const LoDTensor &tensor,
                       const platform::DeviceContext &dev_ctx) {
178
  {  // the 1st field, uint32_t version for LoDTensor
武毅 已提交
179 180 181
    constexpr uint32_t version = 0;
    os.write(reinterpret_cast<const char *>(&version), sizeof(version));
  }
182 183 184 185 186 187
  {
    // the 2st field, LoD information
    // uint64_t lod_level
    // uint64_t lod_level_1 size in byte.
    // int*     lod_level_1 data
    // ...
武毅 已提交
188 189 190 191 192 193 194 195 196 197 198
    auto lod = tensor.lod();
    uint64_t size = lod.size();
    os.write(reinterpret_cast<const char *>(&size), sizeof(size));

    for (auto &each : lod) {
      size = each.size() * sizeof(framework::LoD::value_type::value_type);
      os.write(reinterpret_cast<const char *>(&size), sizeof(size));
      os.write(reinterpret_cast<const char *>(each.data()),
               static_cast<std::streamsize>(size));
    }
  }
199 200
  // the 3st field, Tensor
  SerializeToStream(os, static_cast<Tensor>(tensor), dev_ctx);
武毅 已提交
201 202
}

Y
Yancey 已提交
203 204
void DeserializeFromStream(std::istream &is, LoDTensor *tensor,
                           const platform::DeviceContext &dev_ctx) {
205
  {
Y
Yancey 已提交
206
    // the 1st field, unit32_t version for LoDTensor
207 208 209
    uint32_t version;
    is.read(reinterpret_cast<char *>(&version), sizeof(version));
    PADDLE_ENFORCE_EQ(version, 0U, "Only version 0 is supported");
武毅 已提交
210
  }
211 212
  {
    // the 2st field, LoD information
武毅 已提交
213 214 215 216 217 218 219 220 221 222 223 224 225
    uint64_t lod_level;
    is.read(reinterpret_cast<char *>(&lod_level), sizeof(lod_level));
    auto &lod = *tensor->mutable_lod();
    lod.resize(lod_level);
    for (uint64_t i = 0; i < lod_level; ++i) {
      uint64_t size;
      is.read(reinterpret_cast<char *>(&size), sizeof(size));
      std::vector<size_t> tmp(size / sizeof(size_t));
      is.read(reinterpret_cast<char *>(tmp.data()),
              static_cast<std::streamsize>(size));
      lod[i] = tmp;
    }
  }
226
  // the 3st filed, Tensor
Y
Yancey 已提交
227
  DeserializeFromStream(is, static_cast<Tensor *>(tensor), dev_ctx);
武毅 已提交
228 229
}

230
// TODO(tonyyang-svail): make this function support LoD
Y
Yang Yang 已提交
231 232 233 234 235 236 237 238
std::vector<LoDTensor> LoDTensor::SplitLoDTensor(
    const std::vector<platform::Place> places) const {
  check_memory_size();
  PADDLE_ENFORCE(lod().empty(), "Disable parallel lod for now");
  PADDLE_ENFORCE(dims()[0] % places.size() == 0,
                 "Batch size should be divided by places size");

  std::vector<LoDTensor> lods;
Y
Yang Yu 已提交
239
  for (size_t place_idx = 0; place_idx < places.size(); ++place_idx) {
240 241
    int begin = place_idx * dims()[0] / places.size();
    int end = (place_idx + 1) * dims()[0] / places.size();
Y
Yang Yang 已提交
242

243
    auto src = Slice(begin, end);
Y
Yang Yang 已提交
244
    auto &dst_place = places[place_idx];
245 246
    LoDTensor dst;
    framework::Copy(src, dst_place, &dst);
Y
Yang Yang 已提交
247 248 249 250 251 252 253

    lods.emplace_back(dst);
  }

  return lods;
}

254
// TODO(tonyyang-svail): make this function support LoD
Y
Yang Yang 已提交
255
void LoDTensor::MergeLoDTensor(
256 257
    const std::vector<const LoDTensor *> &lod_tensors,
    platform::Place dst_place) {
Y
Yang Yang 已提交
258 259 260
  PADDLE_ENFORCE(!lod_tensors.empty());
  framework::DDim new_dim = lod_tensors[0]->dims();
  std::type_index new_type = lod_tensors[0]->type();
261
  auto new_layout = lod_tensors[0]->layout();
Y
Yang Yang 已提交
262 263 264
  for (auto *lod : lod_tensors) {
    PADDLE_ENFORCE(new_dim == lod->dims());
    PADDLE_ENFORCE(new_type == lod->type());
265
    PADDLE_ENFORCE(new_layout == lod->layout());
Y
Yang Yang 已提交
266 267 268
  }
  new_dim[0] *= lod_tensors.size();
  Resize(new_dim);
269
  set_layout(new_layout);
Y
Yang Yang 已提交
270

271 272
  mutable_data(dst_place, new_type);
  int begin = 0;
Y
Yang Yang 已提交
273
  for (auto *src : lod_tensors) {
274 275 276 277
    int end = begin + src->dims()[0];
    auto dst = Slice(begin, end);
    framework::Copy(*src, dst_place, &dst);
    begin = end;
Y
Yang Yang 已提交
278 279 280
  }
}

281 282
}  // namespace framework
}  // namespace paddle