data_transform.cc 5.4 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Q
Qiao Longfei 已提交
2 3 4 5 6 7 8 9 10 11 12 13 14

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

Y
Yi Wang 已提交
15
#include "paddle/fluid/framework/data_transform.h"
16

Y
Yi Wang 已提交
17 18 19
#include "paddle/fluid/framework/data_device_transform.h"
#include "paddle/fluid/framework/data_layout_transform.h"
#include "paddle/fluid/framework/data_type_transform.h"
Q
Qiao Longfei 已提交
20

W
wanghuancoder 已提交
21 22 23 24 25 26
namespace paddle {
namespace framework {
class Variable;
}  // namespace framework
}  // namespace paddle

27 28 29 30
#ifdef PADDLE_WITH_MKLDNN
#include "paddle/fluid/platform/mkldnn_helper.h"
#endif

Q
Qiao Longfei 已提交
31 32 33
namespace paddle {
namespace framework {

Y
yuyang18 已提交
34
static void PassTensorData(Tensor *from, Tensor *to) {
35 36 37 38
  to->ShareDataWith(*from);
  *from = Tensor();
}

Y
yuyang18 已提交
39 40
void TransformData(const OpKernelType &expected_kernel_type,
                   const OpKernelType &kernel_type_for_var,
41 42
                   const Tensor &input_tensor,
                   Tensor *output_tensor) {
43 44 45 46
  bool transformed = false;
  Tensor in;
  in.ShareDataWith(input_tensor);
  Tensor out;
47 48
  const DataLayout lin = kernel_type_for_var.data_layout_;
  const DataLayout lout = expected_kernel_type.data_layout_;
49
  // do layout transform
M
mozga-intel 已提交
50
  if (NeedTransformLayout(lout, lin)) {
51
#ifdef PADDLE_WITH_MKLDNN
M
mozga-intel 已提交
52
    if (lin == DataLayout::kMKLDNN || lout == DataLayout::kMKLDNN) {
53
      PADDLE_ENFORCE_EQ(
54 55
          !(lin == DataLayout::kMKLDNN && lout == DataLayout::kMKLDNN),
          true,
56 57
          platform::errors::PreconditionNotMet(
              "No layout transform needed between two MKLDNN OPKernels."));
M
mozga-intel 已提交
58 59 60 61

      if (lin != DataLayout::kMKLDNN && lout == DataLayout::kMKLDNN) {
        // Case1 - transform from Non-MKLDNN OPKernel to MKLDNN OPKernel
        // Just set layout/format. No real transform occur
62

63 64 65
        auto out_format = platform::MKLDNNFormatForSize(in.dims().size(),
                                                        ToMKLDNNFormat(lin));
        out.ShareDataWith(input_tensor);
66 67
        // For NHWC data we need reshape of tensors as MKL-DNN
        // is expecting NHWC dims description order
68
        if (lin == DataLayout::kNHWC || lin == DataLayout::kNDHWC) {
J
Jacek Czaja 已提交
69 70 71 72 73 74
          platform::MatchShapeToLayout(&out, lin, lout);
          // We register only NHWC assuming that model is consistent e.g. either
          // NHWC or NCHW
          paddle::platform::MKLDNNDeviceContext::tls()
              .set_cur_paddle_data_layout(lin);
        }
75 76
        dnnl::memory::desc out_mem_desc(
            vectorize(out.dims()),
77 78
            ToMKLDNNDataType(TransToProtoVarType(in.type())),
            out_format);
79
        out.set_mem_desc(out_mem_desc);
M
mozga-intel 已提交
80 81 82
      } else {
        // Case2 - transfrom from MKLDNN OPKernel to Non-MKLDNN OPKernel
        // Do transform via MKLDNN lib
83 84
        TransDataLayoutFromMKLDNN(
            kernel_type_for_var, expected_kernel_type, in, &out);
M
mozga-intel 已提交
85 86 87 88 89
      }
    } else {
      // Case3 - transfrom between Non-MKLDNN OPKernels
      TransDataLayout(kernel_type_for_var, expected_kernel_type, in, &out);
    }
90 91 92 93
#else
    // Case3 - transfrom between Non-MKLDNN OPKernels
    TransDataLayout(kernel_type_for_var, expected_kernel_type, in, &out);
#endif
94 95 96 97
    transformed = true;
    PassTensorData(&out, &in);
  }

98
  // do data type transform
Q
Qiao Longfei 已提交
99 100 101 102 103 104
  if (expected_kernel_type.data_type_ != kernel_type_for_var.data_type_) {
    TransDataType(kernel_type_for_var, expected_kernel_type, in, &out);
    transformed = true;
    PassTensorData(&out, &in);
  }

105
  // do device transform
106 107
  if (!platform::is_same_place(kernel_type_for_var.place_,
                               expected_kernel_type.place_)) {
Q
Qiao Longfei 已提交
108
    TransDataDevice(in, expected_kernel_type.place_, &out);
109 110
    transformed = true;
    PassTensorData(&out, &in);
111
  }
112

113
  PADDLE_ENFORCE_EQ(
114 115
      transformed,
      true,
116 117
      platform::errors::PreconditionNotMet(
          "No transform is applied for the data needs to be transformed."));
118 119
  // get output data
  output_tensor->ShareDataWith(in);
120 121
}

122 123
void SetTensorToVariable(const Variable &in_var,
                         const Tensor &tensor,
Y
yuyang18 已提交
124
                         Variable *out_var) {
125
  if (in_var.IsType<LoDTensor>()) {
Y
yuyang18 已提交
126 127
    auto &in_lod_tensor = in_var.Get<LoDTensor>();
    auto *tran_lod_tensor = out_var->GetMutable<LoDTensor>();
128 129
    tran_lod_tensor->set_lod(in_lod_tensor.lod());
    tran_lod_tensor->set_layout(in_lod_tensor.layout());
J
Jacek Czaja 已提交
130
#ifdef PADDLE_WITH_MKLDNN
131
    tran_lod_tensor->set_mem_desc(in_lod_tensor.mem_desc());
J
Jacek Czaja 已提交
132
#endif
133
    tran_lod_tensor->ShareDataWith(tensor);
134 135 136
  } else if (in_var.IsType<phi::SelectedRows>()) {
    auto &in_selected_rows = in_var.Get<phi::SelectedRows>();
    auto *trans_selected_rows = out_var->GetMutable<phi::SelectedRows>();
137 138 139 140
    trans_selected_rows->set_height(in_selected_rows.height());
    trans_selected_rows->set_rows(in_selected_rows.rows());
    trans_selected_rows->mutable_value()->ShareDataWith(tensor);
  } else {
141 142 143 144
    PADDLE_THROW(platform::errors::Unavailable(
        "Unsupported variable type, only supports LoDTensor or SelectedRows, "
        "but the input variable type is %s.",
        ToTypeName(in_var.Type())));
145 146 147
  }
}

Q
Qiao Longfei 已提交
148 149
}  // namespace framework
}  // namespace paddle