lod_tensor.cc 13.6 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
2

L
Luo Tao 已提交
3 4 5
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
6

L
Luo Tao 已提交
7
    http://www.apache.org/licenses/LICENSE-2.0
8

L
Luo Tao 已提交
9 10 11 12 13
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
14

F
fengjiayi 已提交
15 16 17 18 19
#include <stdint.h>
#include <string.h>
#include <algorithm>
#include <iterator>

Y
Yi Wang 已提交
20 21
#include "paddle/fluid/framework/data_type.h"
#include "paddle/fluid/framework/framework.pb.h"
F
fengjiayi 已提交
22
#include "paddle/fluid/framework/lod_tensor.h"
S
sneaxiy 已提交
23
#include "paddle/fluid/framework/var_type.h"
24

Y
Yi Wang 已提交
25 26
#include "paddle/fluid/memory/memcpy.h"
#include "paddle/fluid/memory/memory.h"
27

Y
Yu Yang 已提交
28 29 30
#include "paddle/fluid/recordio/scanner.h"
#include "paddle/fluid/recordio/writer.h"

31 32 33
namespace paddle {
namespace framework {

武毅 已提交
34
std::ostream &operator<<(std::ostream &os, const LoD &lod) {
35
  os << "{";
武毅 已提交
36
  for (auto &v : lod) {
37
    os << "{";
L
Liu Yiqun 已提交
38
    bool is_first = true;
武毅 已提交
39
    for (auto &i : v) {
L
Liu Yiqun 已提交
40 41 42 43 44 45
      if (is_first) {
        os << i;
        is_first = false;
      } else {
        os << ", " << i;
      }
46 47 48 49 50 51 52 53
    }
    os << "}";
  }
  os << "}";

  return os;
}

Y
Yang Yang 已提交
54
std::ostream &operator<<(std::ostream &os, const LoDTensor &t) {
55 56
  if (!platform::is_cpu_place(t.place())) {
    LoDTensor tt;
Y
Yi Wang 已提交
57
    framework::TensorCopy(t, platform::CPUPlace(), &tt);
58 59 60 61 62 63 64 65
    platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance();
    auto &dev_ctx = *pool.Get(t.place());
    dev_ctx.Wait();

    os << tt;
    return os;
  }

Y
Yang Yang 已提交
66 67 68 69 70 71
  os << "dim: " << t.dims() << "\n";
  os << "lod: " << t.lod() << "\n";

  // only print first ten elements
  int64_t size = t.numel() < 10 ? t.numel() : 10;
  for (int64_t i = 0; i < size; ++i) {
S
sneaxiy 已提交
72
    if (IsType<float>(t.type())) {
73
      os << t.data<float>()[i] << " ";
S
sneaxiy 已提交
74
    } else if (IsType<int64_t>(t.type())) {
75 76 77 78
      os << t.data<int64_t>()[i] << " ";
    } else {
      PADDLE_THROW("LoDTensor data type not in [float, int64_t]");
    }
Y
Yang Yang 已提交
79 80 81 82 83
  }

  return os;
}

Q
Qiao Longfei 已提交
84 85 86 87 88 89
std::string LoDToString(const LoD &lod) {
  std::ostringstream stream;
  stream << lod;
  return stream.str();
}

武毅 已提交
90
LoD SliceInLevel(const LoD &in, size_t level, size_t elem_begin,
Q
qijun 已提交
91
                 size_t elem_end) {
92 93 94 95 96 97 98 99 100
  PADDLE_ENFORCE_LT(level, in.size());
  PADDLE_ENFORCE_LT(elem_end, in[level].size());

  LoD res;
  res.resize(in.size() - level);
  // copy the first level
  res[0].assign(in[level].begin() + elem_begin,
                in[level].begin() + elem_end + 1);
  for (size_t lvl = 1; lvl < res.size(); lvl++) {
武毅 已提交
101 102 103
    const auto &in_level = in[level + lvl];
    const auto &above_level = res[lvl - 1];
    auto &out_level = res[lvl];
104 105
    out_level.assign(in_level.begin() + above_level.front(),
                     in_level.begin() + above_level.back() + 1);
106
  }
107 108 109 110
  for (size_t lvl = 0; lvl < res.size(); lvl++) {
    // to make the first offset equals 0, all the elements minus the first
    // element
    size_t front = res[lvl].front();
武毅 已提交
111
    for (auto &ele : res[lvl]) {
112 113 114 115 116 117
      ele -= front;
    }
  }
  return res;
}

武毅 已提交
118
LoD ToAbsOffset(const LoD &in) {
119 120 121
  // the lowest level stores relative offsets
  if (in.empty() || in.size() == 1) return in;
  LoD result = in;
Q
Qiao Longfei 已提交
122 123 124 125
  for (auto level = static_cast<int>(in.size() - 2); level >= 0; level--) {
    for (size_t i = 0; i < in[level].size(); ++i) {
      size_t index = in[level][i];
      result[level][i] = result[level + 1][index];
126 127 128
    }
  }
  return result;
129 130
}

武毅 已提交
131
bool operator==(const LoD &a, const LoD &b) {
132 133 134 135 136
  if (a.size() != b.size()) {
    return false;
  }

  for (size_t i = 0; i < a.size(); i++) {
武毅 已提交
137 138
    const auto &a_level = a[i];
    const auto &b_level = b[i];
139 140 141 142 143 144 145 146 147 148
    if (a_level.size() != b_level.size()) {
      return false;
    }
    for (size_t j = 0; j < a_level.size(); j++) {
      if (a_level[j] != b_level[j]) {
        return false;
      }
    }
  }
  return true;
149 150
}

Y
Yan Chunwei 已提交
151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209
bool CheckLoD(const LoD &in, int tensor_height) {
  if (in.empty()) return true;
  for (const auto &level : in) {
    // check: there should be more than 2 offsets existing in each level.
    if (level.size() < 2) return false;
    // check: the first offset(the begin offset) of each level should be 0.
    if (level.front() != 0) return false;
    // check: all the offsets in a level should be ascending(no same items
    // allows).
    if (!std::is_sorted(level.begin(), level.begin(), [](size_t a, size_t b) {
          if (a < b) return true;
          return false;
        })) {
      LOG(INFO) << "ascending error";
      return false;
    }
  }
  // check: the lowest level's last offset should equals `tensor_height` if
  //        tensor_height>0.
  if (tensor_height > 0 && (size_t)tensor_height != in.back().back())
    return false;

  // check: the higher level's last offset should equals the lower level's
  // size-1.
  // NOTE LoD store the levels from top to bottom, so the higher level goes
  // first.
  for (size_t level = 0; level < in.size() - 1; level++) {
    if (in[level].back() != in[level + 1].size() - 1) return false;
  }
  return true;
}

bool CheckAbsLoD(const LoD &in, int tensor_height) {
  if (in.empty()) return true;
  for (const auto &level : in) {
    // check: all the offsets in a level should be ascending(no same items
    // allows).
    if (!std::is_sorted(level.begin(), level.begin(), [](size_t a, size_t b) {
          if (a < b) return true;
          return false;
        })) {
      return false;
    }

    // check: there should be more than 2 offsets existing in each level.
    if (level.size() < 2) return false;

    // check: the first offset of each level should be 0, and the last should be
    // the same(the height of underlying tensor).
    if (level.front() != 0) return false;
    if (tensor_height < 0) {
      tensor_height = level.back();
    } else if ((size_t)tensor_height != level.back()) {
      return false;
    }
  }
  return true;
}

210
using LoDAndOffset = std::pair<LoD, std::pair<size_t, size_t>>;
武毅 已提交
211
LoDAndOffset GetSubLoDAndAbsoluteOffset(const LoD &lod, size_t start_idx,
212 213 214 215 216 217
                                        size_t end_idx, size_t start_level) {
  LoD sub_lod;

  for (size_t level_idx = start_level; level_idx < lod.size(); ++level_idx) {
    PADDLE_ENFORCE_LE(start_idx, end_idx);
    PADDLE_ENFORCE_LT(end_idx, lod[level_idx].size());
218 219 220 221
    std::vector<size_t> level_lens;
    for (size_t i = start_idx; i < end_idx; ++i) {
      level_lens.push_back(lod[level_idx][i + 1] - lod[level_idx][i]);
    }
222
    sub_lod.emplace_back(level_lens);
223 224 225
    start_idx = lod[level_idx][start_idx];
    end_idx = lod[level_idx][end_idx];
  }
226 227

  return LoDAndOffset{sub_lod, {start_idx, end_idx}};
228 229
}

武毅 已提交
230
void AppendLoD(LoD *lod, const LoD &lod_length) {
231 232
  PADDLE_ENFORCE(
      lod->empty() || lod->size() == lod_length.size(),
233
      "The lod_length should has the same size with the appended lod.");
234
  if (lod->empty()) {
Y
Yang Yu 已提交
235 236 237
    for (size_t i = 0; i < lod_length.size(); ++i) {
      lod->emplace_back(1, 0);  // size = 1, value = 0;
    }
238 239
    *lod = LoD(lod_length.size(), std::vector<size_t>({0}));
  }
240
  for (size_t i = 0; i < lod->size(); ++i) {
武毅 已提交
241
    auto &level = (*lod)[i];
242 243 244 245 246 247
    for (size_t len : lod_length[i]) {
      level.push_back(level.back() + len);
    }
  }
}

武毅 已提交
248 249
void SerializeToStream(std::ostream &os, const LoDTensor &tensor,
                       const platform::DeviceContext &dev_ctx) {
250
  {  // the 1st field, uint32_t version for LoDTensor
武毅 已提交
251 252 253
    constexpr uint32_t version = 0;
    os.write(reinterpret_cast<const char *>(&version), sizeof(version));
  }
254 255 256 257 258 259
  {
    // the 2st field, LoD information
    // uint64_t lod_level
    // uint64_t lod_level_1 size in byte.
    // int*     lod_level_1 data
    // ...
武毅 已提交
260 261 262 263 264 265 266 267 268 269 270
    auto lod = tensor.lod();
    uint64_t size = lod.size();
    os.write(reinterpret_cast<const char *>(&size), sizeof(size));

    for (auto &each : lod) {
      size = each.size() * sizeof(framework::LoD::value_type::value_type);
      os.write(reinterpret_cast<const char *>(&size), sizeof(size));
      os.write(reinterpret_cast<const char *>(each.data()),
               static_cast<std::streamsize>(size));
    }
  }
271
  // the 3st field, Tensor
Y
Yi Wang 已提交
272
  TensorToStream(os, static_cast<Tensor>(tensor), dev_ctx);
武毅 已提交
273 274
}

Y
Yancey 已提交
275 276
void DeserializeFromStream(std::istream &is, LoDTensor *tensor,
                           const platform::DeviceContext &dev_ctx) {
277
  {
Y
Yancey 已提交
278
    // the 1st field, unit32_t version for LoDTensor
279 280 281
    uint32_t version;
    is.read(reinterpret_cast<char *>(&version), sizeof(version));
    PADDLE_ENFORCE_EQ(version, 0U, "Only version 0 is supported");
武毅 已提交
282
  }
283 284
  {
    // the 2st field, LoD information
武毅 已提交
285 286 287 288 289 290 291 292 293 294 295 296 297
    uint64_t lod_level;
    is.read(reinterpret_cast<char *>(&lod_level), sizeof(lod_level));
    auto &lod = *tensor->mutable_lod();
    lod.resize(lod_level);
    for (uint64_t i = 0; i < lod_level; ++i) {
      uint64_t size;
      is.read(reinterpret_cast<char *>(&size), sizeof(size));
      std::vector<size_t> tmp(size / sizeof(size_t));
      is.read(reinterpret_cast<char *>(tmp.data()),
              static_cast<std::streamsize>(size));
      lod[i] = tmp;
    }
  }
298
  // the 3st filed, Tensor
Y
Yi Wang 已提交
299
  TensorFromStream(is, static_cast<Tensor *>(tensor), dev_ctx);
武毅 已提交
300 301
}

F
fengjiayi 已提交
302
void WriteToRecordIO(recordio::Writer *writer,
Y
Yu Yang 已提交
303 304 305 306 307 308 309 310
                     const std::vector<LoDTensor> &tensor,
                     const platform::DeviceContext &dev_ctx) {
  std::stringstream buffer;
  size_t sz = tensor.size();
  buffer.write(reinterpret_cast<const char *>(&sz), sizeof(uint32_t));
  for (auto &each : tensor) {
    SerializeToStream(buffer, each, dev_ctx);
  }
F
fengjiayi 已提交
311
  writer->Write(buffer.str());
Y
Yu Yang 已提交
312 313 314
}

std::vector<LoDTensor> ReadFromRecordIO(
F
fengjiayi 已提交
315
    recordio::Scanner *scanner, const platform::DeviceContext &dev_ctx) {
Y
Yu Yang 已提交
316
  std::vector<LoDTensor> result;
F
fengjiayi 已提交
317 318 319 320 321 322 323 324
  if (scanner->HasNext()) {
    std::istringstream sin(scanner->Next());
    uint32_t sz;
    sin.read(reinterpret_cast<char *>(&sz), sizeof(uint32_t));
    result.resize(sz);
    for (uint32_t i = 0; i < sz; ++i) {
      DeserializeFromStream(sin, &result[i], dev_ctx);
    }
Y
Yu Yang 已提交
325 326 327 328
  }
  return result;
}

Y
Yang Yang 已提交
329 330 331
std::vector<LoDTensor> LoDTensor::SplitLoDTensor(
    const std::vector<platform::Place> places) const {
  check_memory_size();
Y
Yang Yang 已提交
332 333 334 335
  int batch_size =
      lod().empty() ? dims()[0] : static_cast<int>(lod()[0].size()) - 1;
  size_t result_size = std::min(static_cast<size_t>(batch_size), places.size());
  size_t remainder = batch_size % places.size();
Y
Yu Yang 已提交
336 337 338 339

  std::vector<LoDTensor> results;
  results.reserve(result_size);

Y
Yang Yang 已提交
340
  int step_width = static_cast<int>(batch_size / result_size);
Y
Yu Yang 已提交
341 342 343 344 345 346
  for (size_t i = 0; i < result_size; ++i) {
    int begin = static_cast<int>(i * step_width);
    int end = static_cast<int>((i + 1) * step_width);
    if (i + 1 == places.size()) {  // last
      end += remainder;
    }
Y
Yang Yang 已提交
347

348
    LoDTensor dst;
Y
Yang Yang 已提交
349 350
    if (lod().empty()) {
      auto src = Slice(begin, end);
Y
Yang Yang 已提交
351
      auto &dst_place = places[i];
Y
Yi Wang 已提交
352
      framework::TensorCopy(src, dst_place, &dst);
Y
Yang Yang 已提交
353 354 355 356 357
    } else {
      auto lod_and_offset = GetSubLoDAndAbsoluteOffset(lod(), begin, end, 0);

      auto &offset = lod_and_offset.second;
      auto src = Slice(offset.first, offset.second);
Y
Yang Yang 已提交
358
      auto &dst_place = places[i];
Y
Yi Wang 已提交
359
      framework::TensorCopy(src, dst_place, &dst);
Y
Yang Yang 已提交
360 361 362 363 364 365 366 367 368 369 370

      LoD my_lod;
      for (auto &l : lod_and_offset.first) {
        std::vector<size_t> v{0};
        for (auto &ll : l) {
          v.push_back(ll + v.back());
        }
        my_lod.emplace_back(v);
      }
      dst.set_lod(my_lod);
    }
Y
Yang Yang 已提交
371
    results.emplace_back(dst);
Y
Yang Yang 已提交
372 373
  }

Y
Yu Yang 已提交
374
  return results;
Y
Yang Yang 已提交
375 376
}

Y
Yang Yang 已提交
377
void LoDTensor::MergeLoDTensor(
378 379
    const std::vector<const LoDTensor *> &lod_tensors,
    platform::Place dst_place) {
Y
Yang Yang 已提交
380
  PADDLE_ENFORCE(!lod_tensors.empty());
Y
Yang Yang 已提交
381

Y
Yang Yang 已提交
382 383
  framework::DDim new_dim = lod_tensors[0]->dims();
  std::type_index new_type = lod_tensors[0]->type();
Y
Yang Yang 已提交
384 385 386 387
  framework::DataLayout new_layout = lod_tensors[0]->layout();
  LoD new_lod = lod_tensors[0]->lod();
  for (size_t i = 1; i < lod_tensors.size(); ++i) {
    auto *t = lod_tensors[i];
S
sneaxiy 已提交
388
    PADDLE_ENFORCE_EQ(new_type, t->type());
Y
Yang Yang 已提交
389 390 391 392 393 394 395 396 397 398 399 400 401 402
    PADDLE_ENFORCE_EQ(new_layout, t->layout());

    PADDLE_ENFORCE_EQ(framework::product(new_dim) / new_dim[0],
                      framework::product(t->dims()) / t->dims()[0]);
    new_dim[0] += t->dims()[0];

    auto &lod = t->lod();
    for (size_t j = 0; j < lod.size(); ++j) {
      auto &sub_lod = new_lod[j];
      auto &offset = sub_lod.back();
      for (size_t k = 1; k < lod[j].size(); ++k) {
        sub_lod.push_back(lod[j][k] + offset);
      }
    }
Y
Yang Yang 已提交
403 404
  }
  Resize(new_dim);
405
  set_layout(new_layout);
Y
Yang Yang 已提交
406
  set_lod(new_lod);
407
  mutable_data(dst_place, new_type);
Y
Yang Yang 已提交
408

409
  int begin = 0;
Y
Yang Yang 已提交
410
  for (auto *src : lod_tensors) {
411 412
    int end = begin + src->dims()[0];
    auto dst = Slice(begin, end);
Y
Yi Wang 已提交
413
    framework::TensorCopy(*src, dst_place, &dst);
414
    begin = end;
Y
Yang Yang 已提交
415 416 417
  }
}

418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450
LoD ConvertToLengthBasedLoD(const LoD &offset_lod) {
  LoD length_lod;
  length_lod.reserve(offset_lod.size());
  for (size_t lvl = 0; lvl < offset_lod.size(); ++lvl) {
    std::vector<size_t> level;
    if (offset_lod[lvl].size() > 0) {
      level.reserve(offset_lod[lvl].size() - 1);
    }
    for (size_t idx = 0; idx < offset_lod[lvl].size() - 1; ++idx) {
      level.push_back(offset_lod[lvl][idx + 1] - offset_lod[lvl][idx]);
    }
    length_lod.push_back(level);
  }
  return length_lod;
}

LoD ConvertToOffsetBasedLoD(const LoD &length_lod) {
  LoD offset_lod;
  offset_lod.reserve(length_lod.size());
  for (size_t lvl = 0; lvl < length_lod.size(); ++lvl) {
    std::vector<size_t> level;
    level.reserve(length_lod[lvl].size() + 1);
    size_t tmp = 0;
    level.push_back(tmp);
    for (size_t idx = 0; idx < length_lod[lvl].size(); ++idx) {
      tmp += length_lod[lvl][idx];
      level.push_back(tmp);
    }
    offset_lod.push_back(level);
  }
  return offset_lod;
}

451 452
}  // namespace framework
}  // namespace paddle