lod_tensor.cc 13.7 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
2

L
Luo Tao 已提交
3 4 5
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
6

L
Luo Tao 已提交
7
    http://www.apache.org/licenses/LICENSE-2.0
8

L
Luo Tao 已提交
9 10 11 12 13
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
14

F
fengjiayi 已提交
15 16 17 18 19
#include <stdint.h>
#include <string.h>
#include <algorithm>
#include <iterator>

Y
Yi Wang 已提交
20 21
#include "paddle/fluid/framework/data_type.h"
#include "paddle/fluid/framework/framework.pb.h"
F
fengjiayi 已提交
22
#include "paddle/fluid/framework/lod_tensor.h"
23

Y
Yi Wang 已提交
24 25
#include "paddle/fluid/memory/memcpy.h"
#include "paddle/fluid/memory/memory.h"
26

Y
Yu Yang 已提交
27 28 29
#include "paddle/fluid/recordio/scanner.h"
#include "paddle/fluid/recordio/writer.h"

30 31 32
namespace paddle {
namespace framework {

武毅 已提交
33
std::ostream &operator<<(std::ostream &os, const LoD &lod) {
34
  os << "{";
武毅 已提交
35
  for (auto &v : lod) {
36
    os << "{";
L
Liu Yiqun 已提交
37
    bool is_first = true;
武毅 已提交
38
    for (auto &i : v) {
L
Liu Yiqun 已提交
39 40 41 42 43 44
      if (is_first) {
        os << i;
        is_first = false;
      } else {
        os << ", " << i;
      }
45 46 47 48 49 50 51 52
    }
    os << "}";
  }
  os << "}";

  return os;
}

Y
Yang Yang 已提交
53
std::ostream &operator<<(std::ostream &os, const LoDTensor &t) {
54 55
  if (!platform::is_cpu_place(t.place())) {
    LoDTensor tt;
Y
Yi Wang 已提交
56
    framework::TensorCopy(t, platform::CPUPlace(), &tt);
57 58 59 60 61 62 63 64
    platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance();
    auto &dev_ctx = *pool.Get(t.place());
    dev_ctx.Wait();

    os << tt;
    return os;
  }

Y
Yang Yang 已提交
65 66 67 68 69 70
  os << "dim: " << t.dims() << "\n";
  os << "lod: " << t.lod() << "\n";

  // only print first ten elements
  int64_t size = t.numel() < 10 ? t.numel() : 10;
  for (int64_t i = 0; i < size; ++i) {
71 72 73 74 75 76 77
    if (t.type().hash_code() == typeid(float).hash_code()) {
      os << t.data<float>()[i] << " ";
    } else if (t.type().hash_code() == typeid(int64_t).hash_code()) {
      os << t.data<int64_t>()[i] << " ";
    } else {
      PADDLE_THROW("LoDTensor data type not in [float, int64_t]");
    }
Y
Yang Yang 已提交
78 79 80 81 82
  }

  return os;
}

Q
Qiao Longfei 已提交
83 84 85 86 87 88
std::string LoDToString(const LoD &lod) {
  std::ostringstream stream;
  stream << lod;
  return stream.str();
}

武毅 已提交
89
LoD SliceInLevel(const LoD &in, size_t level, size_t elem_begin,
Q
qijun 已提交
90
                 size_t elem_end) {
91 92 93 94 95 96 97 98 99
  PADDLE_ENFORCE_LT(level, in.size());
  PADDLE_ENFORCE_LT(elem_end, in[level].size());

  LoD res;
  res.resize(in.size() - level);
  // copy the first level
  res[0].assign(in[level].begin() + elem_begin,
                in[level].begin() + elem_end + 1);
  for (size_t lvl = 1; lvl < res.size(); lvl++) {
武毅 已提交
100 101 102
    const auto &in_level = in[level + lvl];
    const auto &above_level = res[lvl - 1];
    auto &out_level = res[lvl];
103 104
    out_level.assign(in_level.begin() + above_level.front(),
                     in_level.begin() + above_level.back() + 1);
105
  }
106 107 108 109
  for (size_t lvl = 0; lvl < res.size(); lvl++) {
    // to make the first offset equals 0, all the elements minus the first
    // element
    size_t front = res[lvl].front();
武毅 已提交
110
    for (auto &ele : res[lvl]) {
111 112 113 114 115 116
      ele -= front;
    }
  }
  return res;
}

武毅 已提交
117
LoD ToAbsOffset(const LoD &in) {
118 119 120
  // the lowest level stores relative offsets
  if (in.empty() || in.size() == 1) return in;
  LoD result = in;
Q
Qiao Longfei 已提交
121 122 123 124
  for (auto level = static_cast<int>(in.size() - 2); level >= 0; level--) {
    for (size_t i = 0; i < in[level].size(); ++i) {
      size_t index = in[level][i];
      result[level][i] = result[level + 1][index];
125 126 127
    }
  }
  return result;
128 129
}

武毅 已提交
130
bool operator==(const LoD &a, const LoD &b) {
131 132 133 134 135
  if (a.size() != b.size()) {
    return false;
  }

  for (size_t i = 0; i < a.size(); i++) {
武毅 已提交
136 137
    const auto &a_level = a[i];
    const auto &b_level = b[i];
138 139 140 141 142 143 144 145 146 147
    if (a_level.size() != b_level.size()) {
      return false;
    }
    for (size_t j = 0; j < a_level.size(); j++) {
      if (a_level[j] != b_level[j]) {
        return false;
      }
    }
  }
  return true;
148 149
}

Y
Yan Chunwei 已提交
150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208
bool CheckLoD(const LoD &in, int tensor_height) {
  if (in.empty()) return true;
  for (const auto &level : in) {
    // check: there should be more than 2 offsets existing in each level.
    if (level.size() < 2) return false;
    // check: the first offset(the begin offset) of each level should be 0.
    if (level.front() != 0) return false;
    // check: all the offsets in a level should be ascending(no same items
    // allows).
    if (!std::is_sorted(level.begin(), level.begin(), [](size_t a, size_t b) {
          if (a < b) return true;
          return false;
        })) {
      LOG(INFO) << "ascending error";
      return false;
    }
  }
  // check: the lowest level's last offset should equals `tensor_height` if
  //        tensor_height>0.
  if (tensor_height > 0 && (size_t)tensor_height != in.back().back())
    return false;

  // check: the higher level's last offset should equals the lower level's
  // size-1.
  // NOTE LoD store the levels from top to bottom, so the higher level goes
  // first.
  for (size_t level = 0; level < in.size() - 1; level++) {
    if (in[level].back() != in[level + 1].size() - 1) return false;
  }
  return true;
}

bool CheckAbsLoD(const LoD &in, int tensor_height) {
  if (in.empty()) return true;
  for (const auto &level : in) {
    // check: all the offsets in a level should be ascending(no same items
    // allows).
    if (!std::is_sorted(level.begin(), level.begin(), [](size_t a, size_t b) {
          if (a < b) return true;
          return false;
        })) {
      return false;
    }

    // check: there should be more than 2 offsets existing in each level.
    if (level.size() < 2) return false;

    // check: the first offset of each level should be 0, and the last should be
    // the same(the height of underlying tensor).
    if (level.front() != 0) return false;
    if (tensor_height < 0) {
      tensor_height = level.back();
    } else if ((size_t)tensor_height != level.back()) {
      return false;
    }
  }
  return true;
}

209
using LoDAndOffset = std::pair<LoD, std::pair<size_t, size_t>>;
武毅 已提交
210
LoDAndOffset GetSubLoDAndAbsoluteOffset(const LoD &lod, size_t start_idx,
211 212 213 214 215 216
                                        size_t end_idx, size_t start_level) {
  LoD sub_lod;

  for (size_t level_idx = start_level; level_idx < lod.size(); ++level_idx) {
    PADDLE_ENFORCE_LE(start_idx, end_idx);
    PADDLE_ENFORCE_LT(end_idx, lod[level_idx].size());
217 218 219 220
    std::vector<size_t> level_lens;
    for (size_t i = start_idx; i < end_idx; ++i) {
      level_lens.push_back(lod[level_idx][i + 1] - lod[level_idx][i]);
    }
221
    sub_lod.emplace_back(level_lens);
222 223 224
    start_idx = lod[level_idx][start_idx];
    end_idx = lod[level_idx][end_idx];
  }
225 226

  return LoDAndOffset{sub_lod, {start_idx, end_idx}};
227 228
}

武毅 已提交
229
void AppendLoD(LoD *lod, const LoD &lod_length) {
230 231
  PADDLE_ENFORCE(
      lod->empty() || lod->size() == lod_length.size(),
232
      "The lod_length should has the same size with the appended lod.");
233
  if (lod->empty()) {
Y
Yang Yu 已提交
234 235 236
    for (size_t i = 0; i < lod_length.size(); ++i) {
      lod->emplace_back(1, 0);  // size = 1, value = 0;
    }
237 238
    *lod = LoD(lod_length.size(), std::vector<size_t>({0}));
  }
239
  for (size_t i = 0; i < lod->size(); ++i) {
武毅 已提交
240
    auto &level = (*lod)[i];
241 242 243 244 245 246
    for (size_t len : lod_length[i]) {
      level.push_back(level.back() + len);
    }
  }
}

武毅 已提交
247 248
void SerializeToStream(std::ostream &os, const LoDTensor &tensor,
                       const platform::DeviceContext &dev_ctx) {
249
  {  // the 1st field, uint32_t version for LoDTensor
武毅 已提交
250 251 252
    constexpr uint32_t version = 0;
    os.write(reinterpret_cast<const char *>(&version), sizeof(version));
  }
253 254 255 256 257 258
  {
    // the 2st field, LoD information
    // uint64_t lod_level
    // uint64_t lod_level_1 size in byte.
    // int*     lod_level_1 data
    // ...
武毅 已提交
259 260 261 262 263 264 265 266 267 268 269
    auto lod = tensor.lod();
    uint64_t size = lod.size();
    os.write(reinterpret_cast<const char *>(&size), sizeof(size));

    for (auto &each : lod) {
      size = each.size() * sizeof(framework::LoD::value_type::value_type);
      os.write(reinterpret_cast<const char *>(&size), sizeof(size));
      os.write(reinterpret_cast<const char *>(each.data()),
               static_cast<std::streamsize>(size));
    }
  }
270
  // the 3st field, Tensor
Y
Yi Wang 已提交
271
  TensorToStream(os, static_cast<Tensor>(tensor), dev_ctx);
武毅 已提交
272 273
}

Y
Yancey 已提交
274 275
void DeserializeFromStream(std::istream &is, LoDTensor *tensor,
                           const platform::DeviceContext &dev_ctx) {
276
  {
Y
Yancey 已提交
277
    // the 1st field, unit32_t version for LoDTensor
278 279 280
    uint32_t version;
    is.read(reinterpret_cast<char *>(&version), sizeof(version));
    PADDLE_ENFORCE_EQ(version, 0U, "Only version 0 is supported");
武毅 已提交
281
  }
282 283
  {
    // the 2st field, LoD information
武毅 已提交
284 285 286 287 288 289 290 291 292 293 294 295 296
    uint64_t lod_level;
    is.read(reinterpret_cast<char *>(&lod_level), sizeof(lod_level));
    auto &lod = *tensor->mutable_lod();
    lod.resize(lod_level);
    for (uint64_t i = 0; i < lod_level; ++i) {
      uint64_t size;
      is.read(reinterpret_cast<char *>(&size), sizeof(size));
      std::vector<size_t> tmp(size / sizeof(size_t));
      is.read(reinterpret_cast<char *>(tmp.data()),
              static_cast<std::streamsize>(size));
      lod[i] = tmp;
    }
  }
297
  // the 3st filed, Tensor
Y
Yi Wang 已提交
298
  TensorFromStream(is, static_cast<Tensor *>(tensor), dev_ctx);
武毅 已提交
299 300
}

F
fengjiayi 已提交
301
void WriteToRecordIO(recordio::Writer *writer,
Y
Yu Yang 已提交
302 303 304 305 306 307 308 309
                     const std::vector<LoDTensor> &tensor,
                     const platform::DeviceContext &dev_ctx) {
  std::stringstream buffer;
  size_t sz = tensor.size();
  buffer.write(reinterpret_cast<const char *>(&sz), sizeof(uint32_t));
  for (auto &each : tensor) {
    SerializeToStream(buffer, each, dev_ctx);
  }
F
fengjiayi 已提交
310
  writer->Write(buffer.str());
Y
Yu Yang 已提交
311 312 313
}

std::vector<LoDTensor> ReadFromRecordIO(
F
fengjiayi 已提交
314
    recordio::Scanner *scanner, const platform::DeviceContext &dev_ctx) {
Y
Yu Yang 已提交
315
  std::vector<LoDTensor> result;
F
fengjiayi 已提交
316 317 318 319 320 321 322 323
  if (scanner->HasNext()) {
    std::istringstream sin(scanner->Next());
    uint32_t sz;
    sin.read(reinterpret_cast<char *>(&sz), sizeof(uint32_t));
    result.resize(sz);
    for (uint32_t i = 0; i < sz; ++i) {
      DeserializeFromStream(sin, &result[i], dev_ctx);
    }
Y
Yu Yang 已提交
324 325 326 327
  }
  return result;
}

Y
Yang Yang 已提交
328 329 330
std::vector<LoDTensor> LoDTensor::SplitLoDTensor(
    const std::vector<platform::Place> places) const {
  check_memory_size();
Y
Yang Yang 已提交
331 332 333 334
  int batch_size =
      lod().empty() ? dims()[0] : static_cast<int>(lod()[0].size()) - 1;
  size_t result_size = std::min(static_cast<size_t>(batch_size), places.size());
  size_t remainder = batch_size % places.size();
Y
Yu Yang 已提交
335 336 337 338

  std::vector<LoDTensor> results;
  results.reserve(result_size);

Y
Yang Yang 已提交
339
  int step_width = static_cast<int>(batch_size / result_size);
Y
Yu Yang 已提交
340 341 342 343 344 345
  for (size_t i = 0; i < result_size; ++i) {
    int begin = static_cast<int>(i * step_width);
    int end = static_cast<int>((i + 1) * step_width);
    if (i + 1 == places.size()) {  // last
      end += remainder;
    }
Y
Yang Yang 已提交
346

347
    LoDTensor dst;
Y
Yang Yang 已提交
348 349
    if (lod().empty()) {
      auto src = Slice(begin, end);
Y
Yang Yang 已提交
350
      auto &dst_place = places[i];
Y
Yi Wang 已提交
351
      framework::TensorCopy(src, dst_place, &dst);
Y
Yang Yang 已提交
352 353 354 355 356
    } else {
      auto lod_and_offset = GetSubLoDAndAbsoluteOffset(lod(), begin, end, 0);

      auto &offset = lod_and_offset.second;
      auto src = Slice(offset.first, offset.second);
Y
Yang Yang 已提交
357
      auto &dst_place = places[i];
Y
Yi Wang 已提交
358
      framework::TensorCopy(src, dst_place, &dst);
Y
Yang Yang 已提交
359 360 361 362 363 364 365 366 367 368 369

      LoD my_lod;
      for (auto &l : lod_and_offset.first) {
        std::vector<size_t> v{0};
        for (auto &ll : l) {
          v.push_back(ll + v.back());
        }
        my_lod.emplace_back(v);
      }
      dst.set_lod(my_lod);
    }
Y
Yang Yang 已提交
370
    results.emplace_back(dst);
Y
Yang Yang 已提交
371 372
  }

Y
Yu Yang 已提交
373
  return results;
Y
Yang Yang 已提交
374 375
}

Y
Yang Yang 已提交
376
void LoDTensor::MergeLoDTensor(
377 378
    const std::vector<const LoDTensor *> &lod_tensors,
    platform::Place dst_place) {
Y
Yang Yang 已提交
379
  PADDLE_ENFORCE(!lod_tensors.empty());
Y
Yang Yang 已提交
380

Y
Yang Yang 已提交
381 382
  framework::DDim new_dim = lod_tensors[0]->dims();
  std::type_index new_type = lod_tensors[0]->type();
Y
Yang Yang 已提交
383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401
  framework::DataLayout new_layout = lod_tensors[0]->layout();
  LoD new_lod = lod_tensors[0]->lod();
  for (size_t i = 1; i < lod_tensors.size(); ++i) {
    auto *t = lod_tensors[i];
    PADDLE_ENFORCE_EQ(new_type.hash_code(), t->type().hash_code());
    PADDLE_ENFORCE_EQ(new_layout, t->layout());

    PADDLE_ENFORCE_EQ(framework::product(new_dim) / new_dim[0],
                      framework::product(t->dims()) / t->dims()[0]);
    new_dim[0] += t->dims()[0];

    auto &lod = t->lod();
    for (size_t j = 0; j < lod.size(); ++j) {
      auto &sub_lod = new_lod[j];
      auto &offset = sub_lod.back();
      for (size_t k = 1; k < lod[j].size(); ++k) {
        sub_lod.push_back(lod[j][k] + offset);
      }
    }
Y
Yang Yang 已提交
402 403
  }
  Resize(new_dim);
404
  set_layout(new_layout);
Y
Yang Yang 已提交
405
  set_lod(new_lod);
406
  mutable_data(dst_place, new_type);
Y
Yang Yang 已提交
407

408
  int begin = 0;
Y
Yang Yang 已提交
409
  for (auto *src : lod_tensors) {
410 411
    int end = begin + src->dims()[0];
    auto dst = Slice(begin, end);
Y
Yi Wang 已提交
412
    framework::TensorCopy(*src, dst_place, &dst);
413
    begin = end;
Y
Yang Yang 已提交
414 415 416
  }
}

417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449
LoD ConvertToLengthBasedLoD(const LoD &offset_lod) {
  LoD length_lod;
  length_lod.reserve(offset_lod.size());
  for (size_t lvl = 0; lvl < offset_lod.size(); ++lvl) {
    std::vector<size_t> level;
    if (offset_lod[lvl].size() > 0) {
      level.reserve(offset_lod[lvl].size() - 1);
    }
    for (size_t idx = 0; idx < offset_lod[lvl].size() - 1; ++idx) {
      level.push_back(offset_lod[lvl][idx + 1] - offset_lod[lvl][idx]);
    }
    length_lod.push_back(level);
  }
  return length_lod;
}

LoD ConvertToOffsetBasedLoD(const LoD &length_lod) {
  LoD offset_lod;
  offset_lod.reserve(length_lod.size());
  for (size_t lvl = 0; lvl < length_lod.size(); ++lvl) {
    std::vector<size_t> level;
    level.reserve(length_lod[lvl].size() + 1);
    size_t tmp = 0;
    level.push_back(tmp);
    for (size_t idx = 0; idx < length_lod[lvl].size(); ++idx) {
      tmp += length_lod[lvl][idx];
      level.push_back(tmp);
    }
    offset_lod.push_back(level);
  }
  return offset_lod;
}

450 451
}  // namespace framework
}  // namespace paddle