ipu_executor.cc 16.6 KB
Newer Older
J
jianghaicheng 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

15 16
#include "paddle/fluid/platform/device/ipu/ipu_executor.h"

17 18 19 20
#include "paddle/fluid/framework/operator.h"
#include "paddle/fluid/platform/device/ipu/ipu_compiler.h"
#include "paddle/fluid/platform/device/ipu/ipu_names.h"
#include "paddle/fluid/platform/device/ipu/ipu_strategy.h"
J
jianghaicheng 已提交
21 22 23 24 25

namespace paddle {
namespace platform {
namespace ipu {

26 27
namespace {

A
Allen Guo 已提交
28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61
// Get paddle prefix and popart postfix of weight states
// Format: {popart_postfix, paddle_prefix}
std::vector<std::pair<std::string, std::string>> GetOptPrePostfix(
    const std::string &opt_type) {
  std::vector<std::pair<std::string, std::string>> pre_post_fix;
  // Weight self
  pre_post_fix.push_back(std::make_pair("", ""));

  // Weight states
  // TODO(alleng) support pair("Accl1___", "_moment1_{id!=0}")
  if (opt_type == "adam" || opt_type == "lamb" || opt_type == "adamw") {
    pre_post_fix.push_back(std::make_pair("Accl1___", "_moment1_0"));
    pre_post_fix.push_back(std::make_pair("Accl2___", "_moment2_0"));
    pre_post_fix.push_back(std::make_pair("Step___", "_beta1_pow_acc_0"));
  } else if (opt_type == "momentum") {
    pre_post_fix.push_back(std::make_pair("Accl___", "_velocity_0"));
  } else if (opt_type == "adamax") {
    pre_post_fix.push_back(std::make_pair("Accl1___", "_moment_0"));
    pre_post_fix.push_back(std::make_pair("Accl2___", "_inf_norm__0"));
    pre_post_fix.push_back(std::make_pair("Step___", "_beta1_pow_acc_0"));
  } else if (opt_type == "adagrad") {
    pre_post_fix.push_back(std::make_pair("Accl1___", "_moment_0"));
  } else if (opt_type == "adadelta") {
    pre_post_fix.push_back(std::make_pair("Accl1___", "__avg_squared_grad_0"));
    pre_post_fix.push_back(
        std::make_pair("Accl2___", "__avg_squared_update_0"));
  } else if (opt_type == "rmsprop") {
    pre_post_fix.push_back(std::make_pair("Accl1___", "_mean_square_0"));
    pre_post_fix.push_back(std::make_pair("Accl2___", "_mean_grad_0"));
    pre_post_fix.push_back(std::make_pair("Accl3___", "_momentum__0"));
  }
  return pre_post_fix;
}

62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90
class PdIArray final : public popart::IArray {
 public:
  explicit PdIArray(const Tensor *tensor) {
    tensor_.ShareDataWith(*tensor);
    for (int i = 0; i < tensor->dims().size(); ++i) {
      shape_.push_back(tensor->dims().at(i));
    }
  }

 public:
  void *data() { return tensor_.data(); }
  popart::DataType dataType() const {
    return PhiDType2PopartDType(tensor_.dtype());
  }
  std::size_t rank() const { return tensor_.dims().size(); }
  int64_t dim(size_t index) const { return tensor_.dims().at(index); }
  std::size_t nelms() const {
    return std::accumulate(shape_.begin(), shape_.end(),
                           static_cast<int64_t>(1), std::multiplies<int64_t>());
  }
  const popart::Shape shape() const { return shape_; }

 private:
  Tensor tensor_;
  std::vector<int64_t> shape_;
};

}  // namespace

91 92 93 94 95 96 97 98
Executor::~Executor() {
  Detach();
  session_.reset();
  executor_resources_.reset();
}

void Executor::Prepare(const std::string &proto) {
  VLOG(10) << "enter Executor::Prepare";
A
Allen Guo 已提交
99
  compile_only_ = GetBoolEnv("IPU_COMPILE_ONLY");
J
jianghaicheng 已提交
100

101 102
  AcquireDevice();
  executor_resources_ = std::make_unique<ExecutorResources>();
J
jianghaicheng 已提交
103 104 105

  auto art = popart::AnchorReturnType("All");
  std::map<popart::TensorId, popart::AnchorReturnType> anchor_ids;
106
  for (const auto &id : compiler_resources_->outputs) {
J
jianghaicheng 已提交
107 108 109 110
    anchor_ids.emplace(id, art);
  }
  auto dataFlow = popart::DataFlow(ipu_strategy_->batches_per_step, anchor_ids);

111
  if (ipu_strategy_->is_training) {
J
jianghaicheng 已提交
112
    VLOG(10) << "Creating TrainingSession from Onnx Model...";
113
    auto optimizer = compiler_resources_->NewOptimizer();
J
jianghaicheng 已提交
114
    session_ = popart::TrainingSession::createFromOnnxModel(
115 116 117
        proto, dataFlow, compiler_resources_->loss_var, *optimizer, device_,
        popart::InputShapeInfo(), ipu_strategy_->popart_options,
        ipu_strategy_->popart_patterns);
J
jianghaicheng 已提交
118 119 120
  } else {
    VLOG(10) << "Creating InferenceSession from Onnx Model...";
    session_ = popart::InferenceSession::createFromOnnxModel(
121 122
        proto, dataFlow, device_, popart::InputShapeInfo(),
        ipu_strategy_->popart_options, ipu_strategy_->popart_patterns);
J
jianghaicheng 已提交
123 124 125
  }
  VLOG(10) << "Creating session from Onnx Model...done";

A
Allen Guo 已提交
126 127 128 129 130 131 132 133 134 135 136 137
  if (compile_only_) {
    LOG(INFO)
        << "Save the offline cache as offline_cache.popart in current path.";
    VLOG(10) << "Compile only...";
    session_->compileAndExport("./offline_cache.popart");
    VLOG(10) << "Compile only...done";
    return;
  } else {
    VLOG(10) << "Preparing session device...";
    session_->prepareDevice();
    VLOG(10) << "Preparing session device...done";
  }
J
jianghaicheng 已提交
138 139 140 141 142 143 144

  SetWeightsIO();

  VLOG(10) << "Copy weights from paddle to popart...";
  WeightsFromPaddle();
  VLOG(10) << "Copy weights from paddle to popart...done";

A
Allen Guo 已提交
145 146 147
  if (ipu_strategy_->random_seed != std::numeric_limits<std::uint64_t>::max()) {
    VLOG(10) << "Setting random seed to: " << ipu_strategy_->random_seed;
    session_->setRandomSeed(ipu_strategy_->random_seed);
J
jianghaicheng 已提交
148 149 150
  }
}

151 152
void Executor::Run(const std::vector<const Tensor *> &inputs,
                   const std::vector<Tensor *> &outputs,
J
jianghaicheng 已提交
153
                   const framework::ExecutionContext &ctx) {
A
Allen Guo 已提交
154 155 156 157 158
  if (compile_only_) {
    LOG(INFO) << "If IPU_COMPILE_ONLY=True, skip exe.run";
    return;
  }

159
  VLOG(10) << "enter Executor::Run";
J
jianghaicheng 已提交
160 161
  // inputs
  std::map<popart::TensorId, popart::IArray &> popart_inputs;
162
  std::map<popart::TensorId, PdIArray> input_wrappers;
J
jianghaicheng 已提交
163
  for (size_t i = 0; i < inputs.size(); i++) {
164
    auto tensor_id = compiler_resources_->inputs[i];
165
    input_wrappers.emplace(tensor_id, PdIArray(inputs[i]));
J
jianghaicheng 已提交
166 167 168 169
    popart_inputs.emplace(tensor_id, input_wrappers.at(tensor_id));
  }
  // anchors
  std::map<popart::TensorId, popart::IArray &> popart_anchors;
170
  std::map<popart::TensorId, PdIArray> anchor_wrappers;
J
jianghaicheng 已提交
171
  for (size_t i = 0; i < outputs.size(); i++) {
172
    auto tensor_id = compiler_resources_->outputs[i];
J
jianghaicheng 已提交
173 174 175 176 177 178 179
    // get dims & dtype from session
    auto fetch_info = session_->getInfo(tensor_id);
    auto output_shape = fetch_info.shape();
    if (ipu_strategy_->batches_per_step > 1) {
      output_shape.insert(output_shape.begin(),
                          ipu_strategy_->batches_per_step);
    }
180 181 182 183 184 185 186 187 188 189
    if (ipu_strategy_->popart_options.enableGradientAccumulation) {
      output_shape.insert(output_shape.begin(),
                          ipu_strategy_->popart_options.accumulationFactor);
    }
    if (ipu_strategy_->popart_options.enableReplicatedGraphs) {
      output_shape.insert(output_shape.begin(),
                          ipu_strategy_->popart_options.replicatedGraphCount);
    }

    auto *tensor = outputs[i];
190
    tensor->Resize(phi::make_ddim(output_shape));
J
jianghaicheng 已提交
191
    auto fetch_dtype = fetch_info.dataType();
192
    auto paddle_type = PopartDType2VarType(fetch_dtype);
193
    tensor->mutable_data(ctx.GetPlace(),
194
                         framework::TransToPhiDataType(paddle_type));
195
    anchor_wrappers.emplace(tensor_id, PdIArray(tensor));
J
jianghaicheng 已提交
196 197
    popart_anchors.emplace(tensor_id, anchor_wrappers.at(tensor_id));
  }
198 199
  VLOG(10) << "Prepared inputs/anchors";

200 201 202
  if (ipu_strategy_->is_training && compiler_resources_->with_lr_sched &&
      !(ipu_strategy_->popart_options.createImplicitPipeliningFwdOnlyProgram &&
        ipu_strategy_->runtime_options.enable_eval)) {
A
Allen Guo 已提交
203 204 205 206 207 208 209 210 211 212 213
    popart::Optimizer *optimizer;
    if (ipu_strategy_->runtime_options.enable_eval) {
      VLOG(10) << "Switch optimizer to eval mode";
      optimizer = compiler_resources_->eval_optimizer.get();
    } else {
      VLOG(10) << "Update learning_rate";
      auto new_lr =
          GetSingleVarFromScope<float>(scope_, compiler_resources_->lr_var);
      VLOG(10) << "New Lr: " << new_lr;
      optimizer = compiler_resources_->UpdateOptimizer(new_lr);
    }
214 215
    auto *session = dynamic_cast<popart::TrainingSession *>(session_.get());
    session->updateOptimizerFromHost(optimizer);
J
jianghaicheng 已提交
216 217 218 219
  }

  popart::StepIO stepio(popart_inputs, popart_anchors);
  VLOG(10) << "Running...";
220 221 222 223 224 225
  if (ipu_strategy_->popart_options.createImplicitPipeliningFwdOnlyProgram &&
      ipu_strategy_->runtime_options.enable_eval) {
    session_->run("implicitPipeliningFwdOnly", stepio);
  } else {
    session_->run(stepio);
  }
J
jianghaicheng 已提交
226
  VLOG(10) << "Running...done";
A
Allen Guo 已提交
227
}
J
jianghaicheng 已提交
228

A
Allen Guo 已提交
229 230
void Executor::WeightsToHost() {
  if (ipu_strategy_->is_training && session_) {
J
jianghaicheng 已提交
231
    WeightsToPaddle();
A
Allen Guo 已提交
232 233
  } else {
    LOG(WARNING) << "For a non-trainning graph, cannot sync weights from IPU.";
J
jianghaicheng 已提交
234 235 236
  }
}

237 238 239 240 241 242
void Executor::AcquireDevice() {
  VLOG(10) << "enter Executor::AcquireDevice";
  if (device_) {
    Detach();
    device_.reset();
  }
J
jianghaicheng 已提交
243

244
  bool use_ipu_model = GetBoolEnv("POPLAR_IPUMODEL");
A
Allen Guo 已提交
245
  bool enable_distribution = ipu_strategy_->enable_distribution;
246
  if (use_ipu_model) {
A
Allen Guo 已提交
247
    VLOG(10) << "Create IPU model device...";
A
Allen Guo 已提交
248 249 250 251 252 253
    std::map<std::string, std::string> deviceOpts{
        {
            "numIPUs", std::to_string(ipu_strategy_->num_ipus),
        },
        {"ipuVersion", "ipu2"},
    };
254 255
    device_ = popart::DeviceManager::createDeviceManager().createIpuModelDevice(
        deviceOpts);
A
Allen Guo 已提交
256 257 258 259 260 261 262 263 264 265 266 267 268
    VLOG(10) << "Create IPU model device...done";
  } else if (compile_only_) {
    VLOG(10) << "Create offline device...";
    std::map<std::string, std::string> deviceOpts{
        {
            "numIPUs", std::to_string(ipu_strategy_->num_ipus),
        },
        {"ipuVersion", "ipu2"},
    };
    device_ =
        popart::DeviceManager::createDeviceManager().createOfflineIPUDevice(
            deviceOpts);
    VLOG(10) << "Create offline device...done";
A
Allen Guo 已提交
269
  } else if (enable_distribution) {
A
Allen Guo 已提交
270
    VLOG(10) << "Create distribution device...";
A
Allen Guo 已提交
271 272 273 274 275 276
    auto ipus_per_replica = ipu_strategy_->num_ipus /
                            ipu_strategy_->popart_options.replicatedGraphCount;
    auto device_id = popdist_get_device(ipus_per_replica);
    device_ = popart::DeviceManager::createDeviceManager().acquireDeviceById(
        device_id);
    PADDLE_ENFORCE_NOT_NULL(
277 278 279
        device_,
        errors::Unavailable("Can't attach IPU in distribution, ipu_num = %d.",
                            RequestIpus(ipu_strategy_->num_ipus)));
A
Allen Guo 已提交
280
    VLOG(10) << "Create distribution device...done";
281
  } else {
A
Allen Guo 已提交
282
    VLOG(10) << "Create IPU device...";
283 284 285
    device_ =
        popart::DeviceManager::createDeviceManager().acquireAvailableDevice(
            RequestIpus(ipu_strategy_->num_ipus));
286 287 288
    PADDLE_ENFORCE_NOT_NULL(
        device_, errors::Unavailable("Can't attach IPU, ipu_num = %d.",
                                     RequestIpus(ipu_strategy_->num_ipus)));
A
Allen Guo 已提交
289
    VLOG(10) << "Create IPU device...done";
290 291
  }
  VLOG(10) << "leave Executor::AcquireDevice";
J
jianghaicheng 已提交
292 293
}

294 295 296 297 298 299
void Executor::Detach() {
  if (device_ && device_->isAttached()) {
    VLOG(10) << "trying to detach IPU";
    device_->detach();
    VLOG(10) << " detached IPU";
  }
J
jianghaicheng 已提交
300 301 302
}

void Executor::SetWeightsIO() {
303 304
  auto opt_type = compiler_resources_->optimizer_type;
  VLOG(10) << "SetWeightsIO for " << opt_type;
J
jianghaicheng 已提交
305
  auto pre_post_fix = GetOptPrePostfix(opt_type);
A
Allen Guo 已提交
306
  for (const auto &weight_pd : compiler_resources_->weights) {
J
jianghaicheng 已提交
307 308
    for (const auto &pair : pre_post_fix) {
      // pair.first : popart prefix, pair.second : paddle postfix
A
Allen Guo 已提交
309 310 311
      auto weight_pop = compiler_resources_->tensors[weight_pd];
      auto popart_var = pair.first + weight_pop;
      auto paddle_var = weight_pd + pair.second;
J
jianghaicheng 已提交
312

A
Allen Guo 已提交
313
      if (scope_->FindVar(paddle_var) == nullptr) {
J
jianghaicheng 已提交
314 315
        continue;
      }
A
Allen Guo 已提交
316
      if (!session_->hasInfo(popart_var)) {
317 318 319
        continue;
      }

A
Allen Guo 已提交
320 321 322
      VLOG(10) << "Connect paddle weight: " << paddle_var
               << " with popart weight: " << popart_var;
      auto var = scope_->GetVar(paddle_var);
323
      auto data_ptr = var->GetMutable<framework::LoDTensor>()->data();
A
Allen Guo 已提交
324 325
      auto tensor_info = session_->getInfo(popart_var);
      executor_resources_->weights_io.insert(popart_var,
326 327
                                             {data_ptr, tensor_info});
      executor_resources_->weights_and_opt_state.emplace_back(
A
Allen Guo 已提交
328
          std::make_pair(popart_var, paddle_var));
J
jianghaicheng 已提交
329 330 331 332
    }
  }
}

333 334 335 336
// align_to_popart: align dtype to popart if true, else to paddle
void Executor::ConvertWeights(bool align_to_popart) {
  for (auto weight_pair : executor_resources_->weights_and_opt_state) {
    auto paddle_var = scope_->GetVar(weight_pair.second);
337
    auto paddle_var_dtype = PhiDType2PopartDType(
A
Allen Guo 已提交
338
        paddle_var->GetMutable<framework::LoDTensor>()->dtype());
339 340 341 342

    PADDLE_ENFORCE_EQ((paddle_var_dtype == popart::DataType::FLOAT ||
                       paddle_var_dtype == popart::DataType::FLOAT16),
                      true,
343
                      errors::InvalidArgument(
344 345 346 347 348 349 350 351 352
                          "Currently, we only support FLOAT16 and FLOAT with "
                          "Paddle, but received type is %s.",
                          paddle_var_dtype));

    popart::TensorInfo info = session_->getInfo(weight_pair.first);
    auto popart_var_dtype = info.dataType();
    PADDLE_ENFORCE_EQ((popart_var_dtype == popart::DataType::FLOAT ||
                       popart_var_dtype == popart::DataType::FLOAT16),
                      true,
353
                      errors::InvalidArgument(
354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386
                          "Currently, we only support FLOAT16 and FLOAT with "
                          "popart, but received type is %s.",
                          popart_var_dtype));

    if (paddle_var_dtype == popart_var_dtype) {
      VLOG(10) << weight_pair.first << " and " << weight_pair.second
               << " have the same dtype : " << popart_var_dtype;
      continue;
    } else if (paddle_var_dtype == popart::DataType::FLOAT) {
      VLOG(10) << weight_pair.first << " and " << weight_pair.second
               << " have different dtype : " << popart_var_dtype;
      auto *data_ptr =
          paddle_var->GetMutable<framework::LoDTensor>()->data<float>();

      auto num_elem = info.nelms();
      if (align_to_popart) {
        std::vector<uint16_t> fp16_data;
        std::transform(data_ptr, data_ptr + num_elem,
                       std::back_inserter(fp16_data),
                       [&](float elem) { return popart::floatToHalf(elem); });
        memcpy(reinterpret_cast<void *>(data_ptr), fp16_data.data(),
               num_elem * sizeof(float16));
      } else {
        std::vector<float> fp32_data;
        auto fp16_data_ptr = reinterpret_cast<uint16_t *>(data_ptr);
        std::transform(fp16_data_ptr, fp16_data_ptr + num_elem,
                       std::back_inserter(fp32_data), [&](uint16_t elem) {
                         return popart::halfToFloat(elem);
                       });
        memcpy(reinterpret_cast<void *>(data_ptr), fp32_data.data(),
               num_elem * sizeof(float));
      }
    } else {
387 388
      PADDLE_THROW(
          errors::Unimplemented("Convert Paddle FLOAT16 to popart FLOAT"));
389 390
    }
  }
J
jianghaicheng 已提交
391 392
}

393 394 395 396 397 398 399 400 401 402 403 404 405
// |-----------------------------------------------------|
// | Paddle  | Popart  |             Method              |
// |-----------------------------------------------------|
// |  FLOAT  |  FLOAT  |         Paddle -> Popart        |
// |  FLOAT  | FLOAT16 | floatToHalf -> Paddle -> Popart |
// | FLOAT16 |  FLOAT  |         Unimplemented           |
// | FLOAT16 | FLOAT16 |         Paddle -> Popart        |
// |-----------------------------------------------------|
// floatToHalf -> Paddle: cast then save to paddle
// Paddle -> Popart: copy from paddle to popart
void Executor::WeightsFromPaddle() {
  ConvertWeights(true);
  session_->writeWeights(executor_resources_->weights_io);
A
Allen Guo 已提交
406
  session_->weightsFromHost();
407
}
J
jianghaicheng 已提交
408

409 410 411 412 413 414 415 416 417 418 419
// |-----------------------------------------------------|
// | Paddle  | Popart  |             Method              |
// |-----------------------------------------------------|
// |  FLOAT  |  FLOAT  |         Popart -> Paddle        |
// |  FLOAT  | FLOAT16 | Popart -> Paddle -> halfToFloat |
// | FLOAT16 |  FLOAT  |         Unimplemented           |
// | FLOAT16 | FLOAT16 |         Popart -> Paddle        |
// |-----------------------------------------------------|
// Paddle -> halfToFloat: cast then save to paddle
// Popart -> Paddle: copy from paddle to popart
void Executor::WeightsToPaddle() {
A
Allen Guo 已提交
420
  session_->weightsToHost();
421 422 423
  session_->readWeights(executor_resources_->weights_io);
  ConvertWeights(false);
}
J
jianghaicheng 已提交
424

425 426 427 428 429 430 431
void Executor::SaveModelToHost(const std::string &path) {
  if (session_) {
    WeightsToPaddle();
    session_->modelToHost(path);
  } else {
    LOG(WARNING) << "Model is empty";
  }
J
jianghaicheng 已提交
432 433 434 435 436
}

}  // namespace ipu
}  // namespace platform
}  // namespace paddle