ipu_executor.cc 17.1 KB
Newer Older
J
jianghaicheng 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

15 16
#include "paddle/fluid/platform/device/ipu/ipu_executor.h"

17 18 19 20
#include "paddle/fluid/framework/operator.h"
#include "paddle/fluid/platform/device/ipu/ipu_compiler.h"
#include "paddle/fluid/platform/device/ipu/ipu_names.h"
#include "paddle/fluid/platform/device/ipu/ipu_strategy.h"
J
jianghaicheng 已提交
21 22 23 24 25

namespace paddle {
namespace platform {
namespace ipu {

26 27
namespace {

A
Allen Guo 已提交
28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61
// Get paddle prefix and popart postfix of weight states
// Format: {popart_postfix, paddle_prefix}
std::vector<std::pair<std::string, std::string>> GetOptPrePostfix(
    const std::string &opt_type) {
  std::vector<std::pair<std::string, std::string>> pre_post_fix;
  // Weight self
  pre_post_fix.push_back(std::make_pair("", ""));

  // Weight states
  // TODO(alleng) support pair("Accl1___", "_moment1_{id!=0}")
  if (opt_type == "adam" || opt_type == "lamb" || opt_type == "adamw") {
    pre_post_fix.push_back(std::make_pair("Accl1___", "_moment1_0"));
    pre_post_fix.push_back(std::make_pair("Accl2___", "_moment2_0"));
    pre_post_fix.push_back(std::make_pair("Step___", "_beta1_pow_acc_0"));
  } else if (opt_type == "momentum") {
    pre_post_fix.push_back(std::make_pair("Accl___", "_velocity_0"));
  } else if (opt_type == "adamax") {
    pre_post_fix.push_back(std::make_pair("Accl1___", "_moment_0"));
    pre_post_fix.push_back(std::make_pair("Accl2___", "_inf_norm__0"));
    pre_post_fix.push_back(std::make_pair("Step___", "_beta1_pow_acc_0"));
  } else if (opt_type == "adagrad") {
    pre_post_fix.push_back(std::make_pair("Accl1___", "_moment_0"));
  } else if (opt_type == "adadelta") {
    pre_post_fix.push_back(std::make_pair("Accl1___", "__avg_squared_grad_0"));
    pre_post_fix.push_back(
        std::make_pair("Accl2___", "__avg_squared_update_0"));
  } else if (opt_type == "rmsprop") {
    pre_post_fix.push_back(std::make_pair("Accl1___", "_mean_square_0"));
    pre_post_fix.push_back(std::make_pair("Accl2___", "_mean_grad_0"));
    pre_post_fix.push_back(std::make_pair("Accl3___", "_momentum__0"));
  }
  return pre_post_fix;
}

62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78
class PdIArray final : public popart::IArray {
 public:
  explicit PdIArray(const Tensor *tensor) {
    tensor_.ShareDataWith(*tensor);
    for (int i = 0; i < tensor->dims().size(); ++i) {
      shape_.push_back(tensor->dims().at(i));
    }
  }

 public:
  void *data() { return tensor_.data(); }
  popart::DataType dataType() const {
    return PhiDType2PopartDType(tensor_.dtype());
  }
  std::size_t rank() const { return tensor_.dims().size(); }
  int64_t dim(size_t index) const { return tensor_.dims().at(index); }
  std::size_t nelms() const {
79 80 81 82
    return std::accumulate(shape_.begin(),
                           shape_.end(),
                           static_cast<int64_t>(1),
                           std::multiplies<int64_t>());
83 84 85 86 87 88 89 90 91 92
  }
  const popart::Shape shape() const { return shape_; }

 private:
  Tensor tensor_;
  std::vector<int64_t> shape_;
};

}  // namespace

A
Allen Guo 已提交
93
Executor::~Executor() { Reset(); }
94 95 96

void Executor::Prepare(const std::string &proto) {
  VLOG(10) << "enter Executor::Prepare";
A
Allen Guo 已提交
97
  compile_only_ = GetBoolEnv("IPU_COMPILE_ONLY");
J
jianghaicheng 已提交
98

99 100
  AcquireDevice();
  executor_resources_ = std::make_unique<ExecutorResources>();
J
jianghaicheng 已提交
101 102 103

  auto art = popart::AnchorReturnType("All");
  std::map<popart::TensorId, popart::AnchorReturnType> anchor_ids;
104
  for (const auto &id : compiler_resources_->outputs) {
J
jianghaicheng 已提交
105 106 107 108
    anchor_ids.emplace(id, art);
  }
  auto dataFlow = popart::DataFlow(ipu_strategy_->batches_per_step, anchor_ids);

109
  if (ipu_strategy_->is_training) {
J
jianghaicheng 已提交
110
    VLOG(10) << "Creating TrainingSession from Onnx Model...";
111
    auto optimizer = compiler_resources_->NewOptimizer();
J
jianghaicheng 已提交
112
    session_ = popart::TrainingSession::createFromOnnxModel(
113 114 115 116 117 118 119
        proto,
        dataFlow,
        compiler_resources_->loss_var,
        *optimizer,
        device_,
        popart::InputShapeInfo(),
        ipu_strategy_->popart_options,
120
        ipu_strategy_->popart_patterns);
J
jianghaicheng 已提交
121 122 123
  } else {
    VLOG(10) << "Creating InferenceSession from Onnx Model...";
    session_ = popart::InferenceSession::createFromOnnxModel(
124 125 126 127 128 129
        proto,
        dataFlow,
        device_,
        popart::InputShapeInfo(),
        ipu_strategy_->popart_options,
        ipu_strategy_->popart_patterns);
J
jianghaicheng 已提交
130 131 132
  }
  VLOG(10) << "Creating session from Onnx Model...done";

A
Allen Guo 已提交
133 134 135 136 137 138 139 140 141 142 143 144
  if (compile_only_) {
    LOG(INFO)
        << "Save the offline cache as offline_cache.popart in current path.";
    VLOG(10) << "Compile only...";
    session_->compileAndExport("./offline_cache.popart");
    VLOG(10) << "Compile only...done";
    return;
  } else {
    VLOG(10) << "Preparing session device...";
    session_->prepareDevice();
    VLOG(10) << "Preparing session device...done";
  }
J
jianghaicheng 已提交
145 146 147 148 149 150 151

  SetWeightsIO();

  VLOG(10) << "Copy weights from paddle to popart...";
  WeightsFromPaddle();
  VLOG(10) << "Copy weights from paddle to popart...done";

A
Allen Guo 已提交
152 153 154
  if (ipu_strategy_->random_seed != std::numeric_limits<std::uint64_t>::max()) {
    VLOG(10) << "Setting random seed to: " << ipu_strategy_->random_seed;
    session_->setRandomSeed(ipu_strategy_->random_seed);
J
jianghaicheng 已提交
155 156 157
  }
}

158 159
void Executor::Run(const std::vector<const Tensor *> &inputs,
                   const std::vector<Tensor *> &outputs,
J
jianghaicheng 已提交
160
                   const framework::ExecutionContext &ctx) {
A
Allen Guo 已提交
161 162 163 164 165
  if (compile_only_) {
    LOG(INFO) << "If IPU_COMPILE_ONLY=True, skip exe.run";
    return;
  }

166
  VLOG(10) << "enter Executor::Run";
J
jianghaicheng 已提交
167 168
  // inputs
  std::map<popart::TensorId, popart::IArray &> popart_inputs;
169
  std::map<popart::TensorId, PdIArray> input_wrappers;
J
jianghaicheng 已提交
170
  for (size_t i = 0; i < inputs.size(); i++) {
171
    auto tensor_id = compiler_resources_->inputs[i];
172
    input_wrappers.emplace(tensor_id, PdIArray(inputs[i]));
J
jianghaicheng 已提交
173 174 175 176
    popart_inputs.emplace(tensor_id, input_wrappers.at(tensor_id));
  }
  // anchors
  std::map<popart::TensorId, popart::IArray &> popart_anchors;
177
  std::map<popart::TensorId, PdIArray> anchor_wrappers;
J
jianghaicheng 已提交
178
  for (size_t i = 0; i < outputs.size(); i++) {
179
    auto tensor_id = compiler_resources_->outputs[i];
J
jianghaicheng 已提交
180 181 182 183 184 185 186
    // get dims & dtype from session
    auto fetch_info = session_->getInfo(tensor_id);
    auto output_shape = fetch_info.shape();
    if (ipu_strategy_->batches_per_step > 1) {
      output_shape.insert(output_shape.begin(),
                          ipu_strategy_->batches_per_step);
    }
187 188 189 190 191 192 193 194 195 196
    if (ipu_strategy_->popart_options.enableGradientAccumulation) {
      output_shape.insert(output_shape.begin(),
                          ipu_strategy_->popart_options.accumulationFactor);
    }
    if (ipu_strategy_->popart_options.enableReplicatedGraphs) {
      output_shape.insert(output_shape.begin(),
                          ipu_strategy_->popart_options.replicatedGraphCount);
    }

    auto *tensor = outputs[i];
197
    tensor->Resize(phi::make_ddim(output_shape));
J
jianghaicheng 已提交
198
    auto fetch_dtype = fetch_info.dataType();
199
    auto paddle_type = PopartDType2VarType(fetch_dtype);
200
    tensor->mutable_data(ctx.GetPlace(),
201
                         framework::TransToPhiDataType(paddle_type));
202
    anchor_wrappers.emplace(tensor_id, PdIArray(tensor));
J
jianghaicheng 已提交
203 204
    popart_anchors.emplace(tensor_id, anchor_wrappers.at(tensor_id));
  }
205 206
  VLOG(10) << "Prepared inputs/anchors";

207 208 209
  if (ipu_strategy_->is_training && compiler_resources_->with_lr_sched &&
      !(ipu_strategy_->popart_options.createImplicitPipeliningFwdOnlyProgram &&
        ipu_strategy_->runtime_options.enable_eval)) {
A
Allen Guo 已提交
210 211 212 213 214 215
    popart::Optimizer *optimizer;
    if (ipu_strategy_->runtime_options.enable_eval) {
      VLOG(10) << "Switch optimizer to eval mode";
      optimizer = compiler_resources_->eval_optimizer.get();
    } else {
      VLOG(10) << "Update learning_rate";
216 217 218 219 220 221 222
      float new_lr;
      if (ipu_strategy_->is_dynamic) {
        new_lr = ipu_strategy_->lr;
      } else {
        new_lr =
            GetSingleVarFromScope<float>(scope_, compiler_resources_->lr_var);
      }
A
Allen Guo 已提交
223 224 225
      VLOG(10) << "New Lr: " << new_lr;
      optimizer = compiler_resources_->UpdateOptimizer(new_lr);
    }
226 227
    auto *session = dynamic_cast<popart::TrainingSession *>(session_.get());
    session->updateOptimizerFromHost(optimizer);
J
jianghaicheng 已提交
228 229 230 231
  }

  popart::StepIO stepio(popart_inputs, popart_anchors);
  VLOG(10) << "Running...";
232 233 234 235 236 237
  if (ipu_strategy_->popart_options.createImplicitPipeliningFwdOnlyProgram &&
      ipu_strategy_->runtime_options.enable_eval) {
    session_->run("implicitPipeliningFwdOnly", stepio);
  } else {
    session_->run(stepio);
  }
J
jianghaicheng 已提交
238
  VLOG(10) << "Running...done";
A
Allen Guo 已提交
239
}
J
jianghaicheng 已提交
240

A
Allen Guo 已提交
241 242
void Executor::WeightsToHost() {
  if (ipu_strategy_->is_training && session_) {
J
jianghaicheng 已提交
243
    WeightsToPaddle();
A
Allen Guo 已提交
244 245
  } else {
    LOG(WARNING) << "For a non-trainning graph, cannot sync weights from IPU.";
J
jianghaicheng 已提交
246 247 248
  }
}

249 250 251 252 253 254
void Executor::AcquireDevice() {
  VLOG(10) << "enter Executor::AcquireDevice";
  if (device_) {
    Detach();
    device_.reset();
  }
J
jianghaicheng 已提交
255

256
  bool use_ipu_model = GetBoolEnv("POPLAR_IPUMODEL");
A
Allen Guo 已提交
257
  bool enable_distribution = ipu_strategy_->enable_distribution;
258
  if (use_ipu_model) {
A
Allen Guo 已提交
259
    VLOG(10) << "Create IPU model device...";
A
Allen Guo 已提交
260 261
    std::map<std::string, std::string> deviceOpts{
        {
262 263
            "numIPUs",
            std::to_string(ipu_strategy_->num_ipus),
A
Allen Guo 已提交
264
        },
265
        {"tilesPerIPU", std::to_string(ipu_strategy_->tiles_per_ipu)},
A
Allen Guo 已提交
266 267
        {"ipuVersion", "ipu2"},
    };
268 269
    device_ = popart::DeviceManager::createDeviceManager().createIpuModelDevice(
        deviceOpts);
A
Allen Guo 已提交
270 271 272 273 274
    VLOG(10) << "Create IPU model device...done";
  } else if (compile_only_) {
    VLOG(10) << "Create offline device...";
    std::map<std::string, std::string> deviceOpts{
        {
275 276
            "numIPUs",
            std::to_string(ipu_strategy_->num_ipus),
A
Allen Guo 已提交
277
        },
278
        {"tilesPerIPU", std::to_string(ipu_strategy_->tiles_per_ipu)},
A
Allen Guo 已提交
279 280 281 282 283 284
        {"ipuVersion", "ipu2"},
    };
    device_ =
        popart::DeviceManager::createDeviceManager().createOfflineIPUDevice(
            deviceOpts);
    VLOG(10) << "Create offline device...done";
A
Allen Guo 已提交
285
  } else if (enable_distribution) {
A
Allen Guo 已提交
286
    VLOG(10) << "Create distribution device...";
A
Allen Guo 已提交
287 288 289 290 291 292
    auto ipus_per_replica = ipu_strategy_->num_ipus /
                            ipu_strategy_->popart_options.replicatedGraphCount;
    auto device_id = popdist_get_device(ipus_per_replica);
    device_ = popart::DeviceManager::createDeviceManager().acquireDeviceById(
        device_id);
    PADDLE_ENFORCE_NOT_NULL(
293 294 295
        device_,
        errors::Unavailable("Can't attach IPU in distribution, ipu_num = %d.",
                            RequestIpus(ipu_strategy_->num_ipus)));
A
Allen Guo 已提交
296
    VLOG(10) << "Create distribution device...done";
297
  } else {
A
Allen Guo 已提交
298
    VLOG(10) << "Create IPU device...";
299 300 301
    device_ =
        popart::DeviceManager::createDeviceManager().acquireAvailableDevice(
            RequestIpus(ipu_strategy_->num_ipus));
302
    PADDLE_ENFORCE_NOT_NULL(
303 304 305
        device_,
        errors::Unavailable("Can't attach IPU, ipu_num = %d.",
                            RequestIpus(ipu_strategy_->num_ipus)));
A
Allen Guo 已提交
306
    VLOG(10) << "Create IPU device...done";
307 308
  }
  VLOG(10) << "leave Executor::AcquireDevice";
J
jianghaicheng 已提交
309 310
}

311 312 313 314 315 316
void Executor::Detach() {
  if (device_ && device_->isAttached()) {
    VLOG(10) << "trying to detach IPU";
    device_->detach();
    VLOG(10) << " detached IPU";
  }
J
jianghaicheng 已提交
317 318
}

A
Allen Guo 已提交
319 320 321 322 323 324
void Executor::Reset() {
  Detach();
  session_.reset();
  executor_resources_.reset();
}

J
jianghaicheng 已提交
325
void Executor::SetWeightsIO() {
326 327
  auto opt_type = compiler_resources_->optimizer_type;
  VLOG(10) << "SetWeightsIO for " << opt_type;
J
jianghaicheng 已提交
328
  auto pre_post_fix = GetOptPrePostfix(opt_type);
A
Allen Guo 已提交
329
  for (const auto &weight_pd : compiler_resources_->weights) {
J
jianghaicheng 已提交
330 331
    for (const auto &pair : pre_post_fix) {
      // pair.first : popart prefix, pair.second : paddle postfix
A
Allen Guo 已提交
332 333 334
      auto weight_pop = compiler_resources_->tensors[weight_pd];
      auto popart_var = pair.first + weight_pop;
      auto paddle_var = weight_pd + pair.second;
J
jianghaicheng 已提交
335

A
Allen Guo 已提交
336
      if (scope_->FindVar(paddle_var) == nullptr) {
J
jianghaicheng 已提交
337 338
        continue;
      }
A
Allen Guo 已提交
339
      if (!session_->hasInfo(popart_var)) {
340 341 342
        continue;
      }

A
Allen Guo 已提交
343 344 345
      VLOG(10) << "Connect paddle weight: " << paddle_var
               << " with popart weight: " << popart_var;
      auto var = scope_->GetVar(paddle_var);
346
      auto data_ptr = var->GetMutable<framework::LoDTensor>()->data();
A
Allen Guo 已提交
347 348
      auto tensor_info = session_->getInfo(popart_var);
      executor_resources_->weights_io.insert(popart_var,
349 350
                                             {data_ptr, tensor_info});
      executor_resources_->weights_and_opt_state.emplace_back(
A
Allen Guo 已提交
351
          std::make_pair(popart_var, paddle_var));
J
jianghaicheng 已提交
352 353 354 355
    }
  }
}

356 357 358 359
// align_to_popart: align dtype to popart if true, else to paddle
void Executor::ConvertWeights(bool align_to_popart) {
  for (auto weight_pair : executor_resources_->weights_and_opt_state) {
    auto paddle_var = scope_->GetVar(weight_pair.second);
360
    auto paddle_var_dtype = PhiDType2PopartDType(
A
Allen Guo 已提交
361
        paddle_var->GetMutable<framework::LoDTensor>()->dtype());
362 363 364 365

    PADDLE_ENFORCE_EQ((paddle_var_dtype == popart::DataType::FLOAT ||
                       paddle_var_dtype == popart::DataType::FLOAT16),
                      true,
366
                      errors::InvalidArgument(
367 368 369 370 371 372 373 374 375
                          "Currently, we only support FLOAT16 and FLOAT with "
                          "Paddle, but received type is %s.",
                          paddle_var_dtype));

    popart::TensorInfo info = session_->getInfo(weight_pair.first);
    auto popart_var_dtype = info.dataType();
    PADDLE_ENFORCE_EQ((popart_var_dtype == popart::DataType::FLOAT ||
                       popart_var_dtype == popart::DataType::FLOAT16),
                      true,
376
                      errors::InvalidArgument(
377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393
                          "Currently, we only support FLOAT16 and FLOAT with "
                          "popart, but received type is %s.",
                          popart_var_dtype));

    if (paddle_var_dtype == popart_var_dtype) {
      VLOG(10) << weight_pair.first << " and " << weight_pair.second
               << " have the same dtype : " << popart_var_dtype;
      continue;
    } else if (paddle_var_dtype == popart::DataType::FLOAT) {
      VLOG(10) << weight_pair.first << " and " << weight_pair.second
               << " have different dtype : " << popart_var_dtype;
      auto *data_ptr =
          paddle_var->GetMutable<framework::LoDTensor>()->data<float>();

      auto num_elem = info.nelms();
      if (align_to_popart) {
        std::vector<uint16_t> fp16_data;
394 395
        std::transform(data_ptr,
                       data_ptr + num_elem,
396 397
                       std::back_inserter(fp16_data),
                       [&](float elem) { return popart::floatToHalf(elem); });
398 399
        memcpy(reinterpret_cast<void *>(data_ptr),
               fp16_data.data(),
400 401 402 403
               num_elem * sizeof(float16));
      } else {
        std::vector<float> fp32_data;
        auto fp16_data_ptr = reinterpret_cast<uint16_t *>(data_ptr);
404 405 406 407 408 409 410
        std::transform(
            fp16_data_ptr,
            fp16_data_ptr + num_elem,
            std::back_inserter(fp32_data),
            [&](uint16_t elem) { return popart::halfToFloat(elem); });
        memcpy(reinterpret_cast<void *>(data_ptr),
               fp32_data.data(),
411 412 413
               num_elem * sizeof(float));
      }
    } else {
414 415
      PADDLE_THROW(
          errors::Unimplemented("Convert Paddle FLOAT16 to popart FLOAT"));
416 417
    }
  }
J
jianghaicheng 已提交
418 419
}

420 421 422 423 424 425 426 427 428 429 430 431 432
// |-----------------------------------------------------|
// | Paddle  | Popart  |             Method              |
// |-----------------------------------------------------|
// |  FLOAT  |  FLOAT  |         Paddle -> Popart        |
// |  FLOAT  | FLOAT16 | floatToHalf -> Paddle -> Popart |
// | FLOAT16 |  FLOAT  |         Unimplemented           |
// | FLOAT16 | FLOAT16 |         Paddle -> Popart        |
// |-----------------------------------------------------|
// floatToHalf -> Paddle: cast then save to paddle
// Paddle -> Popart: copy from paddle to popart
void Executor::WeightsFromPaddle() {
  ConvertWeights(true);
  session_->writeWeights(executor_resources_->weights_io);
A
Allen Guo 已提交
433
  session_->weightsFromHost();
434
}
J
jianghaicheng 已提交
435

436 437 438 439 440 441 442 443 444 445 446
// |-----------------------------------------------------|
// | Paddle  | Popart  |             Method              |
// |-----------------------------------------------------|
// |  FLOAT  |  FLOAT  |         Popart -> Paddle        |
// |  FLOAT  | FLOAT16 | Popart -> Paddle -> halfToFloat |
// | FLOAT16 |  FLOAT  |         Unimplemented           |
// | FLOAT16 | FLOAT16 |         Popart -> Paddle        |
// |-----------------------------------------------------|
// Paddle -> halfToFloat: cast then save to paddle
// Popart -> Paddle: copy from paddle to popart
void Executor::WeightsToPaddle() {
A
Allen Guo 已提交
447
  session_->weightsToHost();
448 449 450
  session_->readWeights(executor_resources_->weights_io);
  ConvertWeights(false);
}
J
jianghaicheng 已提交
451

452 453 454 455 456 457 458
void Executor::SaveModelToHost(const std::string &path) {
  if (session_) {
    WeightsToPaddle();
    session_->modelToHost(path);
  } else {
    LOG(WARNING) << "Model is empty";
  }
J
jianghaicheng 已提交
459 460 461 462 463
}

}  // namespace ipu
}  // namespace platform
}  // namespace paddle