eager_layout_transformer.h 13.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#pragma once

#include "paddle/fluid/eager/api/generated/eager_generated/forwards/dygraph_functions.h"
#include "paddle/fluid/imperative/layout_autotune.h"
#include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/core/tensor_utils.h"
namespace egr {
22 23
inline paddle::Tensor EagerTraceTransposeOp(const phi::DataLayout layout,
                                            const paddle::Tensor& in) {
24
  VLOG(4) << "AutoTune Transpose from " << in.layout() << " to " << layout
25
          << ", tensor's dim size is " << in.shape().size();
26 27 28 29
  if (in.shape().size() != 4) {
    return in;
  }
  std::vector<int> axis;
30
  if (layout == phi::DataLayout::NHWC) {
31
    axis = {0, 2, 3, 1};
32
  } else if (layout == phi::DataLayout::NCHW) {
33 34 35 36
    axis = {0, 3, 1, 2};
  } else {
    axis = {0, 1, 2, 3};
  }
N
niuliling123 已提交
37
  auto out_tensor = trans_layout_ad_func(in, axis);
38
  VLOG(4) << "AutoTune Transpose from " << in.layout() << " to " << layout;
39 40 41
  return out_tensor;
}

42
inline phi::DataLayout DesiredLayout() {
43 44 45
  return paddle::imperative::LayoutAutoTune::Instance().GetDesiredLayout();
}

46
inline phi::DataLayout DefaultLayout() {
47 48 49
  return paddle::imperative::LayoutAutoTune::Instance().GetDefaultLayout();
}

50
inline void UpdateLayout(paddle::Tensor* out_tensor,
51
                         const phi::DataLayout layout) {
52 53 54 55 56 57 58 59 60
  if (out_tensor->layout() != layout) {
    VLOG(4) << "Update out_tensor's layout from " << out_tensor->layout()
            << " to " << layout;
    phi::DenseTensorUtils::GetMutableMeta(
        static_cast<phi::DenseTensor*>(out_tensor->impl().get()))
        ->layout = layout;
  }
}

61
inline void DealWithShapeOp(paddle::Tensor* out_tensor,
62
                            const phi::DataLayout layout,
63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81
                            int dim_size) {
  auto des_layout = DesiredLayout();
  auto def_layout = DefaultLayout();
  int32_t* value =
      static_cast<phi::DenseTensor*>(out_tensor->impl().get())->data<int32_t>();
  bool change_dim =
      (des_layout != def_layout && layout == des_layout && dim_size == 4);
  VLOG(6) << "'Shape OP', layout autotune: True"
          << " desired_layout: " << des_layout
          << " default_layout: " << def_layout
          << " tensor layout: " << out_tensor->layout()
          << " tensor's shape size is : " << dim_size;
  // It's means input tensor has been autotune and tensor's layout is
  // desired_layout
  std::vector<int32_t> dims;
  dims.resize(dim_size);
  for (int i = 0; i < dim_size; i++) {
    dims[i] = value[i];
  }
82
  auto des_str = phi::DataLayoutToString(des_layout);
83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103
  if (change_dim && des_str == "NCHW") {
    // NCHW -> NHWC
    VLOG(6) << "layout autotune get Shape from NCHW -> NHWC " << value[0] << " "
            << value[1] << " " << value[2] << " " << value[3] << " to "
            << dims[0] << " " << dims[2] << " " << dims[3] << " " << dims[1];
    value[0] = dims[0];
    value[1] = dims[2];
    value[2] = dims[3];
    value[3] = dims[1];
  } else if (change_dim && des_str == "NHWC") {
    // NHWC -> NCHW
    VLOG(6) << "layout autotune get Shape from NHWC -> NCHW " << value[0] << " "
            << value[1] << " " << value[2] << " " << value[3] << " to "
            << dims[0] << " " << dims[3] << " " << dims[1] << " " << dims[2];
    value[0] = dims[0];
    value[1] = dims[3];
    value[2] = dims[1];
    value[3] = dims[2];
  }
}

104 105
// agnostic op
class EagerLayoutTransformer {
106
  using Layout = phi::DataLayout;
107

108
 public:
109
  EagerLayoutTransformer() : op_name_(""), final_layout_(Layout::UNDEFINED) {}
110 111 112 113 114

  EagerLayoutTransformer(const EagerLayoutTransformer&) = delete;

  EagerLayoutTransformer& operator=(const EagerLayoutTransformer&) = delete;

115 116
  explicit EagerLayoutTransformer(
      const std::string& op_name,
117
      const paddle::small_vector<std::vector<paddle::Tensor>,
118 119
                                 kSlotSmallVectorSize>& tensors_vector,
      const Layout final_layout = Layout::UNDEFINED)
120 121
      : op_name_(op_name), final_layout_(final_layout), dim_size_(1) {
    VLOG(4) << "Agnostic op : " << op_name_ << "'s layout is " << final_layout_;
122 123
  }

124 125
  virtual ~EagerLayoutTransformer() {}

126 127
  virtual paddle::Tensor TransInTensor(const std::string& in_name,
                                       const paddle::Tensor& in) {
128 129 130 131 132 133
    // update in shape size
    dim_size_ = in.shape().size();
    bool need_trans =
        !(final_layout_ == Layout::UNDEFINED || final_layout_ == in.layout());
    // This is for Agnostic op when layout is differnet
    if (need_trans) {
134 135 136 137 138 139
      auto out_tensor = EagerTraceTransposeOp(final_layout_, in);
      phi::DenseTensorUtils::GetMutableMeta(
          static_cast<phi::DenseTensor*>(out_tensor.impl().get()))
          ->layout = final_layout_;
      return out_tensor;
    }
140
    return in;
141 142
  }

143 144
  virtual paddle::optional<paddle::Tensor> TransInTensor(
      const std::string& in_name, const paddle::optional<paddle::Tensor>& in) {
145
    return in ? TransInTensor(in_name, *in) : in;
146 147
  }

148 149
  virtual std::vector<paddle::Tensor> TransInTensors(
      const std::string& in_name, const std::vector<paddle::Tensor>& in) {
150 151 152
    return in;
  }

153
  virtual paddle::optional<std::vector<paddle::Tensor>> TransInTensors(
154
      const std::string& in_name,
155
      const paddle::optional<std::vector<paddle::Tensor>>& in) {
156
    return (in ? TransInTensors(in_name, *in) : in);
157 158
  }

159
  virtual void SetOutTensorLayout(std::vector<paddle::Tensor>* out_tensor) {
160 161
    bool update_layout = !(final_layout_ == Layout::UNDEFINED);
    if (update_layout) {
162 163 164
      for (size_t i = 0; i < out_tensor->size(); i++) {
        phi::DenseTensorUtils::GetMutableMeta(
            static_cast<phi::DenseTensor*>((*out_tensor)[i].impl().get()))
165
            ->layout = DesiredLayout();
166 167
      }
    }
168 169 170
  }

  virtual void SetOutTensorLayout(
171
      paddle::optional<paddle::Tensor>* out_tensor) {
172
    VLOG(4) << "AutoTune out tensor is optional";
173 174
  }

175
  virtual void SetOutTensorLayout(
176
      paddle::optional<std::vector<paddle::Tensor>>* out_tensor) {
177
    VLOG(4) << "AutoTune out tensor is optional";
178 179
  }

180
  virtual void SetOutTensorLayout(paddle::Tensor* out_tensor) {
181 182 183 184 185 186
    if (op_name_ == "shape") {
      return DealWithShapeOp(out_tensor, final_layout_, dim_size_);
    }
    bool need_update = !(final_layout_ == Layout::UNDEFINED);
    if (need_update) {
      UpdateLayout(out_tensor, final_layout_);
187 188 189
    }
  }

190 191
 protected:
  std::string op_name_;
192
  const Layout final_layout_;
193
  int dim_size_;
194 195 196 197 198 199
};

class EagerHeavilyLayoutSensitiveOpTransformer : public EagerLayoutTransformer {
 public:
  explicit EagerHeavilyLayoutSensitiveOpTransformer(const std::string& op_name,
                                                    std::string* layout)
200 201
      : op_name_(op_name), desired_layout_(DesiredLayout()) {
    VLOG(4) << "Heavily op: " << op_name;
202
    *layout = phi::DataLayoutToString(DesiredLayout());
203 204
  }

205 206
  paddle::Tensor TransInTensor(const std::string& in_name,
                               const paddle::Tensor& in) {
207 208 209 210 211 212 213
    if (heavily_input_.count(in_name) != 0 && in.layout() != desired_layout_) {
      auto out_tensor = EagerTraceTransposeOp(desired_layout_, in);
      return out_tensor;
    }
    return in;
  }

214
  void SetOutTensorLayout(paddle::Tensor* out_tensor) {
215
    UpdateLayout(out_tensor, desired_layout_);
216 217
  }

218
  void SetOutTensorLayout(std::vector<paddle::Tensor*>* out_tensor) {
219 220 221 222 223
    for (size_t i = 0; i < out_tensor->size(); i++) {
      SetOutTensorLayout((*out_tensor)[i]);
    }
  }

224
  void SetOutTensorLayout(std::vector<paddle::Tensor>* out_tensor) {
225 226
    for (size_t i = 0; i < out_tensor->size(); i++) {
      if ((*out_tensor)[i].layout() != desired_layout_) {
227 228
        VLOG(4) << "Update out_tensor's layout from "
                << (*out_tensor)[i].layout() << " to " << desired_layout_;
229 230 231 232 233 234 235 236 237
        phi::DenseTensorUtils::GetMutableMeta(
            static_cast<phi::DenseTensor*>((*out_tensor)[i].impl().get()))
            ->layout = desired_layout_;
      }
    }
  }

 protected:
  std::string op_name_;
238
  const phi::DataLayout desired_layout_;
239 240 241 242 243 244
  std::unordered_set<std::string> heavily_input_{"x", "y", "input"};
};

class EagerLightlyLayoutSensitiveOpTransformer : public EagerLayoutTransformer {
 public:
  EagerLightlyLayoutSensitiveOpTransformer() {}
245 246 247 248
  explicit EagerLightlyLayoutSensitiveOpTransformer(
      const std::string& op_name) {
    VLOG(4) << "Lightly op : " << op_name;
    auto desired_layout = DesiredLayout();
249
    final_layout_ = phi::DataLayoutToString(desired_layout);
250 251 252
  }

  // transpose from desired to default
253 254
  paddle::Tensor TransInTensor(const std::string& in_name,
                               const paddle::Tensor& in) {
255
    std::string input_layout = phi::DataLayoutToString(in.layout());
256
    auto default_layout = DefaultLayout();
257
    if (final_layout_ == input_layout && in.shape().size() == 4) {
258
      auto out_tensor = EagerTraceTransposeOp(phi::DataLayout::UNDEFINED, in);
259 260 261 262 263 264 265 266
      phi::DenseTensorUtils::GetMutableMeta(
          static_cast<phi::DenseTensor*>(out_tensor.impl().get()))
          ->layout = default_layout;
      return out_tensor;
    }
    return in;
  }

267 268 269
  virtual std::vector<paddle::Tensor> TransInTensors(
      const std::string& in_name, const std::vector<paddle::Tensor>& in) {
    std::vector<paddle::Tensor> result;
270 271
    auto desired_layout = DesiredLayout();
    auto default_layout = DefaultLayout();
272 273 274
    for (size_t i = 0; i < in.size(); i++) {
      auto in_tensor = in[i];
      if (in_tensor.layout() == desired_layout) {
275 276
        auto out_tensor =
            EagerTraceTransposeOp(phi::DataLayout::UNDEFINED, in_tensor);
277 278 279 280 281 282 283 284 285 286 287
        phi::DenseTensorUtils::GetMutableMeta(
            static_cast<phi::DenseTensor*>(out_tensor.impl().get()))
            ->layout = default_layout;
        result.emplace_back(out_tensor);
      } else {
        result.emplace_back(in_tensor);
      }
    }
    return result;
  }

288
  void SetOutTensorLayout(paddle::Tensor* out_tensor) {
289
    UpdateLayout(out_tensor, DefaultLayout());
290 291
  }

292
  void SetOutTensorLayout(std::vector<paddle::Tensor*>* out_tensor) {
293 294 295 296 297
    for (size_t i = 0; i < out_tensor->size(); i++) {
      SetOutTensorLayout((*out_tensor)[i]);
    }
  }

298
  void SetOutTensorLayout(std::vector<paddle::Tensor>* out_tensor) {
299
    auto default_layout = DefaultLayout();
300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315
    for (size_t i = 0; i < out_tensor->size(); i++) {
      phi::DenseTensorUtils::GetMutableMeta(
          static_cast<phi::DenseTensor*>((*out_tensor)[i].impl().get()))
          ->layout = default_layout;
    }
  }

 protected:
  std::string final_layout_;
  std::unordered_set<std::string> heavily_input_{"x", "y", "input"};
};

class EagerTransposeOpTransformer
    : public EagerLightlyLayoutSensitiveOpTransformer {
 public:
  EagerTransposeOpTransformer() {}
316 317
  explicit EagerTransposeOpTransformer(const std::string& op_name) {
    VLOG(4) << "AutoTuneTransformer op: " << op_name;
318 319 320 321 322 323 324 325 326 327 328 329
  }

  void SetAttr(std::vector<int>* axis, bool is_nhwc) {
    std::vector<int> perm_nchw = {0, 2, 3, 1};
    std::vector<int> perm_nhwc = {0, 3, 1, 2};
    auto perm = is_nhwc ? perm_nhwc : perm_nchw;
    (*axis)[0] = perm[(*axis)[0]];
    (*axis)[1] = perm[(*axis)[1]];
    (*axis)[2] = perm[(*axis)[2]];
    (*axis)[3] = perm[(*axis)[3]];
  }

330 331
  paddle::Tensor TransInTensor(const std::string& in_name,
                               const paddle::Tensor& in) {
332 333 334
    return in;
  }

335
  void SetOutTensorLayout(paddle::Tensor* out_tensor) {
336
    UpdateLayout(out_tensor, DefaultLayout());
337 338 339 340 341 342 343
  }
};

class EagerArgmaxOpTransformer
    : public EagerLightlyLayoutSensitiveOpTransformer {
 public:
  EagerArgmaxOpTransformer() {}
344 345
  explicit EagerArgmaxOpTransformer(const std::string& op_name) {
    VLOG(4) << "AutoTuneTransformer op: " << op_name;
346 347 348 349 350 351 352 353 354 355
  }

  void SetAttr(paddle::experimental::Scalar* axis, bool is_nhwc) {
    std::vector<int> perm_nhwc = {0, 3, 1, 2};
    std::vector<int> perm_nchw = {0, 2, 3, 1};
    auto perm = is_nhwc ? perm_nhwc : perm_nchw;
    int axes = axis->to<int>();
    (*axis) = static_cast<paddle::experimental::Scalar>(perm[axes]);
  }

356
  void SetOutTensorLayout(paddle::Tensor* out_tensor) {
357
    UpdateLayout(out_tensor, DesiredLayout());
358 359 360 361 362 363 364
  }
};

class EagerFlattenOpTransformer
    : public EagerLightlyLayoutSensitiveOpTransformer {
 public:
  EagerFlattenOpTransformer() {}
365 366
  explicit EagerFlattenOpTransformer(const std::string& op_name) {
    VLOG(4) << "AutoTuneTransformer op: " << op_name;
367 368 369
  }

  // transpose from NHWC to NCHW
370 371
  paddle::Tensor TransInTensor(const std::string& in_name,
                               const paddle::Tensor& in) {
372 373 374
    return in;
  }

375
  void SetOutTensorLayout(paddle::Tensor* out_tensor) {
376
    UpdateLayout(out_tensor, DefaultLayout());
377 378 379 380 381 382 383
  }
};

class EagerConcatOpTransformer
    : public EagerLightlyLayoutSensitiveOpTransformer {
 public:
  EagerConcatOpTransformer() {}
384 385
  explicit EagerConcatOpTransformer(const std::string& op_name) {
    VLOG(4) << "AutoTuneTransformer op : " << op_name;
386 387
  }

388
  void SetAttr(paddle::experimental::Scalar* axis, phi::DataLayout layout) {
389 390 391
    std::vector<int> perm_nhwc = {0, 3, 1, 2};
    std::vector<int> perm_nchw = {0, 2, 3, 1};
    int axes = axis->to<int>();
392
    axes = axes < 0 ? axes + 4 : axes;
393
    auto perm = (phi::DataLayout::NHWC == layout) ? perm_nhwc : perm_nchw;
394 395 396
    (*axis) = static_cast<paddle::experimental::Scalar>(perm[axes]);
  }

397 398
  virtual std::vector<paddle::Tensor> TransInTensors(
      const std::string& in_name, const std::vector<paddle::Tensor>& in) {
399 400 401
    return in;
  }

402
  void SetOutTensorLayout(paddle::Tensor* out_tensor) {
403
    UpdateLayout(out_tensor, DesiredLayout());
404 405 406
  }
};
}  // namespace egr