lod_tensor.cc 12.5 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
2

L
Luo Tao 已提交
3 4 5
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
6

L
Luo Tao 已提交
7
    http://www.apache.org/licenses/LICENSE-2.0
8

L
Luo Tao 已提交
9 10 11 12 13
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
14

F
fengjiayi 已提交
15 16 17 18 19
#include <stdint.h>
#include <string.h>
#include <algorithm>
#include <iterator>

Y
Yi Wang 已提交
20 21
#include "paddle/fluid/framework/data_type.h"
#include "paddle/fluid/framework/framework.pb.h"
F
fengjiayi 已提交
22
#include "paddle/fluid/framework/lod_tensor.h"
23

Y
Yi Wang 已提交
24 25
#include "paddle/fluid/memory/memcpy.h"
#include "paddle/fluid/memory/memory.h"
26

Y
Yu Yang 已提交
27 28 29
#include "paddle/fluid/recordio/scanner.h"
#include "paddle/fluid/recordio/writer.h"

30 31 32
namespace paddle {
namespace framework {

武毅 已提交
33
std::ostream &operator<<(std::ostream &os, const LoD &lod) {
34
  os << "{";
武毅 已提交
35
  for (auto &v : lod) {
36
    os << "{";
L
Liu Yiqun 已提交
37
    bool is_first = true;
武毅 已提交
38
    for (auto &i : v) {
L
Liu Yiqun 已提交
39 40 41 42 43 44
      if (is_first) {
        os << i;
        is_first = false;
      } else {
        os << ", " << i;
      }
45 46 47 48 49 50 51 52
    }
    os << "}";
  }
  os << "}";

  return os;
}

Y
Yang Yang 已提交
53 54 55
std::ostream &operator<<(std::ostream &os, const LoDTensor &t) {
  PADDLE_ENFORCE(t.type().hash_code() == typeid(float).hash_code());

56 57
  if (!platform::is_cpu_place(t.place())) {
    LoDTensor tt;
Y
Yi Wang 已提交
58
    framework::TensorCopy(t, platform::CPUPlace(), &tt);
59 60 61 62 63 64 65 66
    platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance();
    auto &dev_ctx = *pool.Get(t.place());
    dev_ctx.Wait();

    os << tt;
    return os;
  }

Y
Yang Yang 已提交
67 68 69 70 71 72 73 74 75 76 77 78
  os << "dim: " << t.dims() << "\n";
  os << "lod: " << t.lod() << "\n";

  // only print first ten elements
  int64_t size = t.numel() < 10 ? t.numel() : 10;
  for (int64_t i = 0; i < size; ++i) {
    os << t.data<float>()[i] << " ";
  }

  return os;
}

Q
Qiao Longfei 已提交
79 80 81 82 83 84
std::string LoDToString(const LoD &lod) {
  std::ostringstream stream;
  stream << lod;
  return stream.str();
}

武毅 已提交
85
LoD SliceInLevel(const LoD &in, size_t level, size_t elem_begin,
Q
qijun 已提交
86
                 size_t elem_end) {
87 88 89 90 91 92 93 94 95
  PADDLE_ENFORCE_LT(level, in.size());
  PADDLE_ENFORCE_LT(elem_end, in[level].size());

  LoD res;
  res.resize(in.size() - level);
  // copy the first level
  res[0].assign(in[level].begin() + elem_begin,
                in[level].begin() + elem_end + 1);
  for (size_t lvl = 1; lvl < res.size(); lvl++) {
武毅 已提交
96 97 98
    const auto &in_level = in[level + lvl];
    const auto &above_level = res[lvl - 1];
    auto &out_level = res[lvl];
99 100
    out_level.assign(in_level.begin() + above_level.front(),
                     in_level.begin() + above_level.back() + 1);
101
  }
102 103 104 105
  for (size_t lvl = 0; lvl < res.size(); lvl++) {
    // to make the first offset equals 0, all the elements minus the first
    // element
    size_t front = res[lvl].front();
武毅 已提交
106
    for (auto &ele : res[lvl]) {
107 108 109 110 111 112
      ele -= front;
    }
  }
  return res;
}

武毅 已提交
113
LoD ToAbsOffset(const LoD &in) {
114 115 116
  // the lowest level stores relative offsets
  if (in.empty() || in.size() == 1) return in;
  LoD result = in;
Q
Qiao Longfei 已提交
117 118 119 120
  for (auto level = static_cast<int>(in.size() - 2); level >= 0; level--) {
    for (size_t i = 0; i < in[level].size(); ++i) {
      size_t index = in[level][i];
      result[level][i] = result[level + 1][index];
121 122 123
    }
  }
  return result;
124 125
}

武毅 已提交
126
bool operator==(const LoD &a, const LoD &b) {
127 128 129 130 131
  if (a.size() != b.size()) {
    return false;
  }

  for (size_t i = 0; i < a.size(); i++) {
武毅 已提交
132 133
    const auto &a_level = a[i];
    const auto &b_level = b[i];
134 135 136 137 138 139 140 141 142 143
    if (a_level.size() != b_level.size()) {
      return false;
    }
    for (size_t j = 0; j < a_level.size(); j++) {
      if (a_level[j] != b_level[j]) {
        return false;
      }
    }
  }
  return true;
144 145
}

Y
Yan Chunwei 已提交
146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204
bool CheckLoD(const LoD &in, int tensor_height) {
  if (in.empty()) return true;
  for (const auto &level : in) {
    // check: there should be more than 2 offsets existing in each level.
    if (level.size() < 2) return false;
    // check: the first offset(the begin offset) of each level should be 0.
    if (level.front() != 0) return false;
    // check: all the offsets in a level should be ascending(no same items
    // allows).
    if (!std::is_sorted(level.begin(), level.begin(), [](size_t a, size_t b) {
          if (a < b) return true;
          return false;
        })) {
      LOG(INFO) << "ascending error";
      return false;
    }
  }
  // check: the lowest level's last offset should equals `tensor_height` if
  //        tensor_height>0.
  if (tensor_height > 0 && (size_t)tensor_height != in.back().back())
    return false;

  // check: the higher level's last offset should equals the lower level's
  // size-1.
  // NOTE LoD store the levels from top to bottom, so the higher level goes
  // first.
  for (size_t level = 0; level < in.size() - 1; level++) {
    if (in[level].back() != in[level + 1].size() - 1) return false;
  }
  return true;
}

bool CheckAbsLoD(const LoD &in, int tensor_height) {
  if (in.empty()) return true;
  for (const auto &level : in) {
    // check: all the offsets in a level should be ascending(no same items
    // allows).
    if (!std::is_sorted(level.begin(), level.begin(), [](size_t a, size_t b) {
          if (a < b) return true;
          return false;
        })) {
      return false;
    }

    // check: there should be more than 2 offsets existing in each level.
    if (level.size() < 2) return false;

    // check: the first offset of each level should be 0, and the last should be
    // the same(the height of underlying tensor).
    if (level.front() != 0) return false;
    if (tensor_height < 0) {
      tensor_height = level.back();
    } else if ((size_t)tensor_height != level.back()) {
      return false;
    }
  }
  return true;
}

205
using LoDAndOffset = std::pair<LoD, std::pair<size_t, size_t>>;
武毅 已提交
206
LoDAndOffset GetSubLoDAndAbsoluteOffset(const LoD &lod, size_t start_idx,
207 208 209 210 211 212
                                        size_t end_idx, size_t start_level) {
  LoD sub_lod;

  for (size_t level_idx = start_level; level_idx < lod.size(); ++level_idx) {
    PADDLE_ENFORCE_LE(start_idx, end_idx);
    PADDLE_ENFORCE_LT(end_idx, lod[level_idx].size());
213 214 215 216
    std::vector<size_t> level_lens;
    for (size_t i = start_idx; i < end_idx; ++i) {
      level_lens.push_back(lod[level_idx][i + 1] - lod[level_idx][i]);
    }
217
    sub_lod.emplace_back(level_lens);
218 219 220
    start_idx = lod[level_idx][start_idx];
    end_idx = lod[level_idx][end_idx];
  }
221 222

  return LoDAndOffset{sub_lod, {start_idx, end_idx}};
223 224
}

武毅 已提交
225
void AppendLoD(LoD *lod, const LoD &lod_length) {
226 227
  PADDLE_ENFORCE(
      lod->empty() || lod->size() == lod_length.size(),
228
      "The lod_length should has the same size with the appended lod.");
229
  if (lod->empty()) {
Y
Yang Yu 已提交
230 231 232
    for (size_t i = 0; i < lod_length.size(); ++i) {
      lod->emplace_back(1, 0);  // size = 1, value = 0;
    }
233 234
    *lod = LoD(lod_length.size(), std::vector<size_t>({0}));
  }
235
  for (size_t i = 0; i < lod->size(); ++i) {
武毅 已提交
236
    auto &level = (*lod)[i];
237 238 239 240 241 242
    for (size_t len : lod_length[i]) {
      level.push_back(level.back() + len);
    }
  }
}

武毅 已提交
243 244
void SerializeToStream(std::ostream &os, const LoDTensor &tensor,
                       const platform::DeviceContext &dev_ctx) {
245
  {  // the 1st field, uint32_t version for LoDTensor
武毅 已提交
246 247 248
    constexpr uint32_t version = 0;
    os.write(reinterpret_cast<const char *>(&version), sizeof(version));
  }
249 250 251 252 253 254
  {
    // the 2st field, LoD information
    // uint64_t lod_level
    // uint64_t lod_level_1 size in byte.
    // int*     lod_level_1 data
    // ...
武毅 已提交
255 256 257 258 259 260 261 262 263 264 265
    auto lod = tensor.lod();
    uint64_t size = lod.size();
    os.write(reinterpret_cast<const char *>(&size), sizeof(size));

    for (auto &each : lod) {
      size = each.size() * sizeof(framework::LoD::value_type::value_type);
      os.write(reinterpret_cast<const char *>(&size), sizeof(size));
      os.write(reinterpret_cast<const char *>(each.data()),
               static_cast<std::streamsize>(size));
    }
  }
266
  // the 3st field, Tensor
Y
Yi Wang 已提交
267
  TensorToStream(os, static_cast<Tensor>(tensor), dev_ctx);
武毅 已提交
268 269
}

Y
Yancey 已提交
270 271
void DeserializeFromStream(std::istream &is, LoDTensor *tensor,
                           const platform::DeviceContext &dev_ctx) {
272
  {
Y
Yancey 已提交
273
    // the 1st field, unit32_t version for LoDTensor
274 275 276
    uint32_t version;
    is.read(reinterpret_cast<char *>(&version), sizeof(version));
    PADDLE_ENFORCE_EQ(version, 0U, "Only version 0 is supported");
武毅 已提交
277
  }
278 279
  {
    // the 2st field, LoD information
武毅 已提交
280 281 282 283 284 285 286 287 288 289 290 291 292
    uint64_t lod_level;
    is.read(reinterpret_cast<char *>(&lod_level), sizeof(lod_level));
    auto &lod = *tensor->mutable_lod();
    lod.resize(lod_level);
    for (uint64_t i = 0; i < lod_level; ++i) {
      uint64_t size;
      is.read(reinterpret_cast<char *>(&size), sizeof(size));
      std::vector<size_t> tmp(size / sizeof(size_t));
      is.read(reinterpret_cast<char *>(tmp.data()),
              static_cast<std::streamsize>(size));
      lod[i] = tmp;
    }
  }
293
  // the 3st filed, Tensor
Y
Yi Wang 已提交
294
  TensorFromStream(is, static_cast<Tensor *>(tensor), dev_ctx);
武毅 已提交
295 296
}

F
fengjiayi 已提交
297
void WriteToRecordIO(recordio::Writer *writer,
Y
Yu Yang 已提交
298 299 300 301 302 303 304 305
                     const std::vector<LoDTensor> &tensor,
                     const platform::DeviceContext &dev_ctx) {
  std::stringstream buffer;
  size_t sz = tensor.size();
  buffer.write(reinterpret_cast<const char *>(&sz), sizeof(uint32_t));
  for (auto &each : tensor) {
    SerializeToStream(buffer, each, dev_ctx);
  }
F
fengjiayi 已提交
306
  writer->Write(buffer.str());
Y
Yu Yang 已提交
307 308 309
}

std::vector<LoDTensor> ReadFromRecordIO(
F
fengjiayi 已提交
310
    recordio::Scanner *scanner, const platform::DeviceContext &dev_ctx) {
Y
Yu Yang 已提交
311
  std::vector<LoDTensor> result;
F
fengjiayi 已提交
312 313 314 315 316 317 318 319
  if (scanner->HasNext()) {
    std::istringstream sin(scanner->Next());
    uint32_t sz;
    sin.read(reinterpret_cast<char *>(&sz), sizeof(uint32_t));
    result.resize(sz);
    for (uint32_t i = 0; i < sz; ++i) {
      DeserializeFromStream(sin, &result[i], dev_ctx);
    }
Y
Yu Yang 已提交
320 321 322 323
  }
  return result;
}

Y
Yang Yang 已提交
324 325 326
std::vector<LoDTensor> LoDTensor::SplitLoDTensor(
    const std::vector<platform::Place> places) const {
  check_memory_size();
Y
Yang Yang 已提交
327 328 329 330
  int batch_size =
      lod().empty() ? dims()[0] : static_cast<int>(lod()[0].size()) - 1;
  size_t result_size = std::min(static_cast<size_t>(batch_size), places.size());
  size_t remainder = batch_size % places.size();
Y
Yu Yang 已提交
331 332 333 334

  std::vector<LoDTensor> results;
  results.reserve(result_size);

Y
Yang Yang 已提交
335
  int step_width = static_cast<int>(batch_size / result_size);
Y
Yu Yang 已提交
336 337 338 339 340 341
  for (size_t i = 0; i < result_size; ++i) {
    int begin = static_cast<int>(i * step_width);
    int end = static_cast<int>((i + 1) * step_width);
    if (i + 1 == places.size()) {  // last
      end += remainder;
    }
Y
Yang Yang 已提交
342

343
    LoDTensor dst;
Y
Yang Yang 已提交
344 345
    if (lod().empty()) {
      auto src = Slice(begin, end);
Y
Yang Yang 已提交
346
      auto &dst_place = places[i];
Y
Yi Wang 已提交
347
      framework::TensorCopy(src, dst_place, &dst);
Y
Yang Yang 已提交
348 349 350 351 352
    } else {
      auto lod_and_offset = GetSubLoDAndAbsoluteOffset(lod(), begin, end, 0);

      auto &offset = lod_and_offset.second;
      auto src = Slice(offset.first, offset.second);
Y
Yang Yang 已提交
353
      auto &dst_place = places[i];
Y
Yi Wang 已提交
354
      framework::TensorCopy(src, dst_place, &dst);
Y
Yang Yang 已提交
355 356 357 358 359 360 361 362 363 364 365

      LoD my_lod;
      for (auto &l : lod_and_offset.first) {
        std::vector<size_t> v{0};
        for (auto &ll : l) {
          v.push_back(ll + v.back());
        }
        my_lod.emplace_back(v);
      }
      dst.set_lod(my_lod);
    }
Y
Yang Yang 已提交
366
    results.emplace_back(dst);
Y
Yang Yang 已提交
367 368
  }

Y
Yu Yang 已提交
369
  return results;
Y
Yang Yang 已提交
370 371
}

Y
Yang Yang 已提交
372
void LoDTensor::MergeLoDTensor(
373 374
    const std::vector<const LoDTensor *> &lod_tensors,
    platform::Place dst_place) {
Y
Yang Yang 已提交
375
  PADDLE_ENFORCE(!lod_tensors.empty());
Y
Yang Yang 已提交
376

Y
Yang Yang 已提交
377 378
  framework::DDim new_dim = lod_tensors[0]->dims();
  std::type_index new_type = lod_tensors[0]->type();
Y
Yang Yang 已提交
379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397
  framework::DataLayout new_layout = lod_tensors[0]->layout();
  LoD new_lod = lod_tensors[0]->lod();
  for (size_t i = 1; i < lod_tensors.size(); ++i) {
    auto *t = lod_tensors[i];
    PADDLE_ENFORCE_EQ(new_type.hash_code(), t->type().hash_code());
    PADDLE_ENFORCE_EQ(new_layout, t->layout());

    PADDLE_ENFORCE_EQ(framework::product(new_dim) / new_dim[0],
                      framework::product(t->dims()) / t->dims()[0]);
    new_dim[0] += t->dims()[0];

    auto &lod = t->lod();
    for (size_t j = 0; j < lod.size(); ++j) {
      auto &sub_lod = new_lod[j];
      auto &offset = sub_lod.back();
      for (size_t k = 1; k < lod[j].size(); ++k) {
        sub_lod.push_back(lod[j][k] + offset);
      }
    }
Y
Yang Yang 已提交
398 399
  }
  Resize(new_dim);
400
  set_layout(new_layout);
Y
Yang Yang 已提交
401
  set_lod(new_lod);
402
  mutable_data(dst_place, new_type);
Y
Yang Yang 已提交
403

404
  int begin = 0;
Y
Yang Yang 已提交
405
  for (auto *src : lod_tensors) {
406 407
    int end = begin + src->dims()[0];
    auto dst = Slice(begin, end);
Y
Yi Wang 已提交
408
    framework::TensorCopy(*src, dst_place, &dst);
409
    begin = end;
Y
Yang Yang 已提交
410 411 412
  }
}

413 414
}  // namespace framework
}  // namespace paddle