“47e5978f499fdad105f385937f774ec93c056600”上不存在“src/operators/kernel/arm/relu_kernel.cpp”
data_transform.cc 4.0 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Q
Qiao Longfei 已提交
2 3 4 5 6 7 8 9 10 11 12 13 14

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

Y
Yi Wang 已提交
15
#include "paddle/fluid/framework/data_transform.h"
16

Y
Yi Wang 已提交
17 18 19
#include "paddle/fluid/framework/data_device_transform.h"
#include "paddle/fluid/framework/data_layout_transform.h"
#include "paddle/fluid/framework/data_type_transform.h"
Q
Qiao Longfei 已提交
20 21 22 23

namespace paddle {
namespace framework {

Y
yuyang18 已提交
24
static void PassTensorData(Tensor *from, Tensor *to) {
25 26 27 28
  to->ShareDataWith(*from);
  *from = Tensor();
}

Y
yuyang18 已提交
29 30 31
void TransferData(const OpKernelType &expected_kernel_type,
                  const OpKernelType &kernel_type_for_var,
                  const Tensor &input_tensor, Tensor *output_tensor) {
32 33 34 35
  bool transformed = false;
  Tensor in;
  in.ShareDataWith(input_tensor);
  Tensor out;
M
mozga-intel 已提交
36 37
  DataLayout lin = kernel_type_for_var.data_layout_;
  DataLayout lout = expected_kernel_type.data_layout_;
38 39

  // do layout transform
M
mozga-intel 已提交
40 41 42 43 44 45 46 47 48 49
  if (NeedTransformLayout(lout, lin)) {
    if (lin == DataLayout::kMKLDNN || lout == DataLayout::kMKLDNN) {
      PADDLE_ENFORCE(
          !(lin == DataLayout::kMKLDNN && lout == DataLayout::kMKLDNN),
          "No layout transform needed between two MKLDNN OPKernels");

      if (lin != DataLayout::kMKLDNN && lout == DataLayout::kMKLDNN) {
#ifdef PADDLE_WITH_MKLDNN
        // Case1 - transform from Non-MKLDNN OPKernel to MKLDNN OPKernel
        // Just set layout/format. No real transform occur
50 51 52 53

        auto out_format =
            MKLDNNFormatForSize(in.dims().size(), ToMKLDNNFormat(lin));

M
mozga-intel 已提交
54 55
        out.ShareDataWith(input_tensor);
        out.set_layout(DataLayout::kMKLDNN);
56
        out.set_format(out_format);
M
mozga-intel 已提交
57 58 59 60 61 62 63 64 65 66 67
#endif
      } else {
        // Case2 - transfrom from MKLDNN OPKernel to Non-MKLDNN OPKernel
        // Do transform via MKLDNN lib
        TransDataLayoutFromMKLDNN(kernel_type_for_var, expected_kernel_type, in,
                                  &out);
      }
    } else {
      // Case3 - transfrom between Non-MKLDNN OPKernels
      TransDataLayout(kernel_type_for_var, expected_kernel_type, in, &out);
    }
68 69 70 71
    transformed = true;
    PassTensorData(&out, &in);
  }

72
  // do data type transform
Q
Qiao Longfei 已提交
73 74 75 76 77 78
  if (expected_kernel_type.data_type_ != kernel_type_for_var.data_type_) {
    TransDataType(kernel_type_for_var, expected_kernel_type, in, &out);
    transformed = true;
    PassTensorData(&out, &in);
  }

79
  // do device transform
80 81
  if (!platform::is_same_place(kernel_type_for_var.place_,
                               expected_kernel_type.place_)) {
Q
Qiao Longfei 已提交
82
    TransDataDevice(in, expected_kernel_type.place_, &out);
83 84
    transformed = true;
    PassTensorData(&out, &in);
85
  }
86

Q
Qiao Longfei 已提交
87
  PADDLE_ENFORCE(transformed, "No transform is applied, please check!");
88 89
  // get output data
  output_tensor->ShareDataWith(in);
90 91
}

Y
yuyang18 已提交
92 93
void SetTensorToVariable(const Variable &in_var, const Tensor &tensor,
                         Variable *out_var) {
94
  if (in_var.IsType<LoDTensor>()) {
Y
yuyang18 已提交
95 96
    auto &in_lod_tensor = in_var.Get<LoDTensor>();
    auto *tran_lod_tensor = out_var->GetMutable<LoDTensor>();
97 98 99 100
    tran_lod_tensor->set_lod(in_lod_tensor.lod());
    tran_lod_tensor->set_layout(in_lod_tensor.layout());
    tran_lod_tensor->ShareDataWith(tensor);
  } else if (in_var.IsType<SelectedRows>()) {
Y
yuyang18 已提交
101 102
    auto &in_selected_rows = in_var.Get<SelectedRows>();
    auto *trans_selected_rows = out_var->GetMutable<SelectedRows>();
103 104 105 106 107 108 109 110
    trans_selected_rows->set_height(in_selected_rows.height());
    trans_selected_rows->set_rows(in_selected_rows.rows());
    trans_selected_rows->mutable_value()->ShareDataWith(tensor);
  } else {
    PADDLE_THROW("unknown var type");
  }
}

Q
Qiao Longfei 已提交
111 112
}  // namespace framework
}  // namespace paddle