lod_tensor.cc 14.4 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
2

L
Luo Tao 已提交
3 4 5
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
6

L
Luo Tao 已提交
7
    http://www.apache.org/licenses/LICENSE-2.0
8

L
Luo Tao 已提交
9 10 11 12 13
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
14

F
fengjiayi 已提交
15 16 17 18 19
#include <stdint.h>
#include <string.h>
#include <algorithm>
#include <iterator>

Y
Yi Wang 已提交
20 21
#include "paddle/fluid/framework/data_type.h"
#include "paddle/fluid/framework/framework.pb.h"
F
fengjiayi 已提交
22
#include "paddle/fluid/framework/lod_tensor.h"
S
sneaxiy 已提交
23
#include "paddle/fluid/framework/var_type.h"
X
refine  
Xin Pan 已提交
24
#include "paddle/fluid/framework/version.h"
25

Y
Yi Wang 已提交
26 27
#include "paddle/fluid/memory/memcpy.h"
#include "paddle/fluid/memory/memory.h"
28

29 30 31
namespace paddle {
namespace framework {

武毅 已提交
32
std::ostream &operator<<(std::ostream &os, const LoD &lod) {
33
  os << "{";
武毅 已提交
34
  for (auto &v : lod) {
35
    os << "{";
L
Liu Yiqun 已提交
36
    bool is_first = true;
武毅 已提交
37
    for (auto &i : v) {
L
Liu Yiqun 已提交
38 39 40 41 42 43
      if (is_first) {
        os << i;
        is_first = false;
      } else {
        os << ", " << i;
      }
44 45 46 47 48 49 50 51
    }
    os << "}";
  }
  os << "}";

  return os;
}

Y
Yang Yang 已提交
52
std::ostream &operator<<(std::ostream &os, const LoDTensor &t) {
53 54 55 56
  if (t.lod().size() > 0) {
    os << "  - lod: " << t.lod() << "\n";
  }
  os << static_cast<Tensor>(t);
Y
Yang Yang 已提交
57 58 59
  return os;
}

Q
Qiao Longfei 已提交
60 61 62 63 64 65
std::string LoDToString(const LoD &lod) {
  std::ostringstream stream;
  stream << lod;
  return stream.str();
}

武毅 已提交
66
LoD SliceInLevel(const LoD &in, size_t level, size_t elem_begin,
Q
qijun 已提交
67
                 size_t elem_end) {
68
  PADDLE_ENFORCE_LT(level, in.size());
69
  PADDLE_ENFORCE_LT(elem_begin, elem_end);
70 71 72 73 74 75 76 77
  PADDLE_ENFORCE_LT(elem_end, in[level].size());

  LoD res;
  res.resize(in.size() - level);
  // copy the first level
  res[0].assign(in[level].begin() + elem_begin,
                in[level].begin() + elem_end + 1);
  for (size_t lvl = 1; lvl < res.size(); lvl++) {
武毅 已提交
78 79 80
    const auto &in_level = in[level + lvl];
    const auto &above_level = res[lvl - 1];
    auto &out_level = res[lvl];
81 82
    out_level.assign(in_level.begin() + above_level.front(),
                     in_level.begin() + above_level.back() + 1);
83
  }
84 85 86 87
  for (size_t lvl = 0; lvl < res.size(); lvl++) {
    // to make the first offset equals 0, all the elements minus the first
    // element
    size_t front = res[lvl].front();
武毅 已提交
88
    for (auto &ele : res[lvl]) {
89 90 91 92 93 94
      ele -= front;
    }
  }
  return res;
}

武毅 已提交
95
LoD ToAbsOffset(const LoD &in) {
96 97 98
  // the lowest level stores relative offsets
  if (in.empty() || in.size() == 1) return in;
  LoD result = in;
Q
Qiao Longfei 已提交
99 100 101 102
  for (auto level = static_cast<int>(in.size() - 2); level >= 0; level--) {
    for (size_t i = 0; i < in[level].size(); ++i) {
      size_t index = in[level][i];
      result[level][i] = result[level + 1][index];
103 104 105
    }
  }
  return result;
106 107
}

武毅 已提交
108
bool operator==(const LoD &a, const LoD &b) {
109 110 111 112 113
  if (a.size() != b.size()) {
    return false;
  }

  for (size_t i = 0; i < a.size(); i++) {
武毅 已提交
114 115
    const auto &a_level = a[i];
    const auto &b_level = b[i];
116 117 118 119 120 121 122 123 124 125
    if (a_level.size() != b_level.size()) {
      return false;
    }
    for (size_t j = 0; j < a_level.size(); j++) {
      if (a_level[j] != b_level[j]) {
        return false;
      }
    }
  }
  return true;
126 127
}

Y
Yan Chunwei 已提交
128 129 130 131 132 133 134
bool CheckLoD(const LoD &in, int tensor_height) {
  if (in.empty()) return true;
  for (const auto &level : in) {
    // check: there should be more than 2 offsets existing in each level.
    if (level.size() < 2) return false;
    // check: the first offset(the begin offset) of each level should be 0.
    if (level.front() != 0) return false;
135
    // check: all the offsets in a level should be non-descending
S
sneaxiy 已提交
136 137
    if (!std::is_sorted(level.begin(), level.end())) {
      return false;
Y
Yan Chunwei 已提交
138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158
    }
  }
  // check: the lowest level's last offset should equals `tensor_height` if
  //        tensor_height>0.
  if (tensor_height > 0 && (size_t)tensor_height != in.back().back())
    return false;

  // check: the higher level's last offset should equals the lower level's
  // size-1.
  // NOTE LoD store the levels from top to bottom, so the higher level goes
  // first.
  for (size_t level = 0; level < in.size() - 1; level++) {
    if (in[level].back() != in[level + 1].size() - 1) return false;
  }
  return true;
}

bool CheckAbsLoD(const LoD &in, int tensor_height) {
  if (in.empty()) return true;
  for (const auto &level : in) {
    // check: all the offsets in a level should be ascending(no same items
159
    // allowed).
Y
Yan Chunwei 已提交
160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181
    if (!std::is_sorted(level.begin(), level.begin(), [](size_t a, size_t b) {
          if (a < b) return true;
          return false;
        })) {
      return false;
    }

    // check: there should be more than 2 offsets existing in each level.
    if (level.size() < 2) return false;

    // check: the first offset of each level should be 0, and the last should be
    // the same(the height of underlying tensor).
    if (level.front() != 0) return false;
    if (tensor_height < 0) {
      tensor_height = level.back();
    } else if ((size_t)tensor_height != level.back()) {
      return false;
    }
  }
  return true;
}

182
using LoDAndOffset = std::pair<LoD, std::pair<size_t, size_t>>;
武毅 已提交
183
LoDAndOffset GetSubLoDAndAbsoluteOffset(const LoD &lod, size_t start_idx,
184 185 186 187 188 189
                                        size_t end_idx, size_t start_level) {
  LoD sub_lod;

  for (size_t level_idx = start_level; level_idx < lod.size(); ++level_idx) {
    PADDLE_ENFORCE_LE(start_idx, end_idx);
    PADDLE_ENFORCE_LT(end_idx, lod[level_idx].size());
190 191 192 193
    std::vector<size_t> level_lens;
    for (size_t i = start_idx; i < end_idx; ++i) {
      level_lens.push_back(lod[level_idx][i + 1] - lod[level_idx][i]);
    }
194
    sub_lod.emplace_back(level_lens);
195 196 197
    start_idx = lod[level_idx][start_idx];
    end_idx = lod[level_idx][end_idx];
  }
198 199

  return LoDAndOffset{sub_lod, {start_idx, end_idx}};
200 201
}

武毅 已提交
202
void AppendLoD(LoD *lod, const LoD &lod_length) {
203 204
  PADDLE_ENFORCE(
      lod->empty() || lod->size() == lod_length.size(),
205
      "The lod_length should has the same size with the appended lod.");
206
  if (lod->empty()) {
Y
Yang Yu 已提交
207 208 209
    for (size_t i = 0; i < lod_length.size(); ++i) {
      lod->emplace_back(1, 0);  // size = 1, value = 0;
    }
210 211
    *lod = LoD(lod_length.size(), std::vector<size_t>({0}));
  }
212
  for (size_t i = 0; i < lod->size(); ++i) {
武毅 已提交
213
    auto &level = (*lod)[i];
214 215 216 217 218 219
    for (size_t len : lod_length[i]) {
      level.push_back(level.back() + len);
    }
  }
}

武毅 已提交
220 221
void SerializeToStream(std::ostream &os, const LoDTensor &tensor,
                       const platform::DeviceContext &dev_ctx) {
222
  {  // the 1st field, uint32_t version for LoDTensor
X
refine  
Xin Pan 已提交
223 224
    os.write(reinterpret_cast<const char *>(&kCurTensorVersion),
             sizeof(kCurTensorVersion));
武毅 已提交
225
  }
226 227 228 229 230 231
  {
    // the 2st field, LoD information
    // uint64_t lod_level
    // uint64_t lod_level_1 size in byte.
    // int*     lod_level_1 data
    // ...
武毅 已提交
232 233 234 235 236 237 238 239 240 241 242
    auto lod = tensor.lod();
    uint64_t size = lod.size();
    os.write(reinterpret_cast<const char *>(&size), sizeof(size));

    for (auto &each : lod) {
      size = each.size() * sizeof(framework::LoD::value_type::value_type);
      os.write(reinterpret_cast<const char *>(&size), sizeof(size));
      os.write(reinterpret_cast<const char *>(each.data()),
               static_cast<std::streamsize>(size));
    }
  }
243
  // the 3st field, Tensor
Y
Yi Wang 已提交
244
  TensorToStream(os, static_cast<Tensor>(tensor), dev_ctx);
武毅 已提交
245 246
}

T
tangwei12 已提交
247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274
void DeserializeFromStream(std::istream &is, LoDTensor *tensor,
                           const platform::DeviceContext &dev_ctx,
                           const size_t &seek,
                           const std::vector<int64_t> &shape) {
  {
    // the 1st field, unit32_t version for LoDTensor
    uint32_t version;
    is.read(reinterpret_cast<char *>(&version), sizeof(version));
    PADDLE_ENFORCE_EQ(framework::IsTensorVersionSupported(version), true,
                      platform::errors::InvalidArgument(
                          "tensor version %u is not supported.", version));
    PADDLE_ENFORCE_EQ(
        version, 0U,
        platform::errors::InvalidArgument(
            "tensor version %u is not supported, Only version 0 is supported",
            version));
  }
  {
    // the 2st field, LoD information
    uint64_t lod_level;
    is.read(reinterpret_cast<char *>(&lod_level), sizeof(lod_level));
    auto &lod = *tensor->mutable_lod();
    lod.resize(lod_level);
  }
  // the 3st filed, Tensor
  TensorFromStream(is, static_cast<Tensor *>(tensor), dev_ctx, seek, shape);
}

Y
Yancey 已提交
275 276
void DeserializeFromStream(std::istream &is, LoDTensor *tensor,
                           const platform::DeviceContext &dev_ctx) {
277
  {
Y
Yancey 已提交
278
    // the 1st field, unit32_t version for LoDTensor
279 280
    uint32_t version;
    is.read(reinterpret_cast<char *>(&version), sizeof(version));
T
tangwei12 已提交
281 282 283 284 285 286 287 288
    PADDLE_ENFORCE_EQ(framework::IsTensorVersionSupported(version), true,
                      platform::errors::InvalidArgument(
                          "tensor version %u is not supported.", version));
    PADDLE_ENFORCE_EQ(
        version, 0U,
        platform::errors::InvalidArgument(
            "tensor version %u is not supported, Only version 0 is supported",
            version));
武毅 已提交
289
  }
290 291
  {
    // the 2st field, LoD information
武毅 已提交
292 293 294 295 296 297 298 299 300 301 302 303 304
    uint64_t lod_level;
    is.read(reinterpret_cast<char *>(&lod_level), sizeof(lod_level));
    auto &lod = *tensor->mutable_lod();
    lod.resize(lod_level);
    for (uint64_t i = 0; i < lod_level; ++i) {
      uint64_t size;
      is.read(reinterpret_cast<char *>(&size), sizeof(size));
      std::vector<size_t> tmp(size / sizeof(size_t));
      is.read(reinterpret_cast<char *>(tmp.data()),
              static_cast<std::streamsize>(size));
      lod[i] = tmp;
    }
  }
305
  // the 3st filed, Tensor
Y
Yi Wang 已提交
306
  TensorFromStream(is, static_cast<Tensor *>(tensor), dev_ctx);
武毅 已提交
307 308
}

Y
Yang Yang 已提交
309 310
std::vector<LoDTensor> LoDTensor::SplitLoDTensor(
    const std::vector<platform::Place> places) const {
311 312 313
  PADDLE_ENFORCE_GT(places.size(), 0,
                    platform::errors::InvalidArgument(
                        "place number cannot be empty when splitting"));
Y
Yang Yang 已提交
314
  check_memory_size();
315 316
  size_t batch_size =
      lod().empty() ? static_cast<size_t>(dims()[0]) : lod()[0].size() - 1;
Y
Yu Yang 已提交
317

318
  // if batch_size is 0, just return #places.size() copys of empty
319
  // tensors.
320 321 322
  if (batch_size == 0) {
    std::vector<LoDTensor> empty_results;
    empty_results.reserve(places.size());
323 324 325 326 327 328 329
    for (size_t i = 0; i < places.size(); ++i) {
      LoDTensor dst;
      dst.Resize(dims());
      dst.mutable_data(places[i], type());
      if (!lod().empty()) {
        dst.set_lod(lod());
      }
330
      empty_results.emplace_back(std::move(dst));
331
    }
332
    return empty_results;
333 334
  }

335 336 337 338 339
  auto step_width = (batch_size + places.size() - 1) / places.size();
  auto result_size = (batch_size + step_width - 1) / step_width;
  std::vector<LoDTensor> results;
  results.reserve(result_size);

Y
Yu Yang 已提交
340
  for (size_t i = 0; i < result_size; ++i) {
341 342 343 344 345
    auto begin = i * step_width;
    auto end = std::min<size_t>((i + 1) * step_width, batch_size);
    PADDLE_ENFORCE_LT(begin, end,
                      platform::errors::InvalidArgument(
                          "begin must be less than end, this may be a bug"));
Y
Yang Yang 已提交
346

347
    LoDTensor dst;
Y
Yang Yang 已提交
348 349
    if (lod().empty()) {
      auto src = Slice(begin, end);
Y
Yang Yang 已提交
350
      auto &dst_place = places[i];
Y
Yi Wang 已提交
351
      framework::TensorCopy(src, dst_place, &dst);
Y
Yang Yang 已提交
352 353 354 355 356
    } else {
      auto lod_and_offset = GetSubLoDAndAbsoluteOffset(lod(), begin, end, 0);

      auto &offset = lod_and_offset.second;
      auto src = Slice(offset.first, offset.second);
Y
Yang Yang 已提交
357
      auto &dst_place = places[i];
Y
Yi Wang 已提交
358
      framework::TensorCopy(src, dst_place, &dst);
Y
Yang Yang 已提交
359 360 361 362 363 364 365 366 367 368 369

      LoD my_lod;
      for (auto &l : lod_and_offset.first) {
        std::vector<size_t> v{0};
        for (auto &ll : l) {
          v.push_back(ll + v.back());
        }
        my_lod.emplace_back(v);
      }
      dst.set_lod(my_lod);
    }
370
    results.emplace_back(std::move(dst));
Y
Yang Yang 已提交
371 372
  }

Y
Yu Yang 已提交
373
  return results;
Y
Yang Yang 已提交
374 375
}

Y
Yang Yang 已提交
376
void LoDTensor::MergeLoDTensor(
377 378
    const std::vector<const LoDTensor *> &lod_tensors,
    platform::Place dst_place) {
Y
Yang Yang 已提交
379
  PADDLE_ENFORCE(!lod_tensors.empty());
Y
Yang Yang 已提交
380

Y
Yang Yang 已提交
381
  framework::DDim new_dim = lod_tensors[0]->dims();
382
  proto::VarType::Type new_type = proto::VarType::FP32;
Y
Yang Yang 已提交
383
  framework::DataLayout new_layout = lod_tensors[0]->layout();
384 385 386 387 388 389 390 391 392
  for (auto *t : lod_tensors) {
    if (t->numel() && t->IsInitialized()) {
      new_dim = t->dims();
      new_type = t->type();
      new_layout = t->layout();
      break;
    }
  }

Y
Yang Yang 已提交
393
  LoD new_lod = lod_tensors[0]->lod();
394

Y
Yang Yang 已提交
395 396
  for (size_t i = 1; i < lod_tensors.size(); ++i) {
    auto *t = lod_tensors[i];
397 398 399 400 401 402 403
    if (t->numel() && t->IsInitialized()) {
      PADDLE_ENFORCE_EQ(new_type, t->type());
      PADDLE_ENFORCE_EQ(new_layout, t->layout());
      PADDLE_ENFORCE_EQ(framework::product(new_dim) / new_dim[0],
                        framework::product(t->dims()) / t->dims()[0]);
      new_dim[0] += t->dims()[0];
    }
Y
Yang Yang 已提交
404 405

    auto &lod = t->lod();
F
fengjiayi 已提交
406
    PADDLE_ENFORCE_EQ(new_lod.size(), lod.size());
Y
Yang Yang 已提交
407 408
    for (size_t j = 0; j < lod.size(); ++j) {
      auto &sub_lod = new_lod[j];
C
chengduo 已提交
409
      size_t offset = sub_lod.back();
Y
Yang Yang 已提交
410 411 412 413
      for (size_t k = 1; k < lod[j].size(); ++k) {
        sub_lod.push_back(lod[j][k] + offset);
      }
    }
Y
Yang Yang 已提交
414 415
  }
  Resize(new_dim);
416
  set_layout(new_layout);
Y
Yang Yang 已提交
417
  set_lod(new_lod);
418
  mutable_data(dst_place, new_type);
Y
Yang Yang 已提交
419

420
  int begin = 0;
Y
Yang Yang 已提交
421
  for (auto *src : lod_tensors) {
422
    int end = begin + src->dims()[0];
423 424 425
    if (end == begin) {
      continue;
    }
426
    auto dst = Slice(begin, end);
Y
Yi Wang 已提交
427
    framework::TensorCopy(*src, dst_place, &dst);
428
    begin = end;
Y
Yang Yang 已提交
429 430 431
  }
}

432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464
LoD ConvertToLengthBasedLoD(const LoD &offset_lod) {
  LoD length_lod;
  length_lod.reserve(offset_lod.size());
  for (size_t lvl = 0; lvl < offset_lod.size(); ++lvl) {
    std::vector<size_t> level;
    if (offset_lod[lvl].size() > 0) {
      level.reserve(offset_lod[lvl].size() - 1);
    }
    for (size_t idx = 0; idx < offset_lod[lvl].size() - 1; ++idx) {
      level.push_back(offset_lod[lvl][idx + 1] - offset_lod[lvl][idx]);
    }
    length_lod.push_back(level);
  }
  return length_lod;
}

LoD ConvertToOffsetBasedLoD(const LoD &length_lod) {
  LoD offset_lod;
  offset_lod.reserve(length_lod.size());
  for (size_t lvl = 0; lvl < length_lod.size(); ++lvl) {
    std::vector<size_t> level;
    level.reserve(length_lod[lvl].size() + 1);
    size_t tmp = 0;
    level.push_back(tmp);
    for (size_t idx = 0; idx < length_lod[lvl].size(); ++idx) {
      tmp += length_lod[lvl][idx];
      level.push_back(tmp);
    }
    offset_lod.push_back(level);
  }
  return offset_lod;
}

465 466
}  // namespace framework
}  // namespace paddle