lrn_mkldnn_op.cc 5.7 KB
Newer Older
T
Tomasz Patejko 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#include "paddle/fluid/framework/tensor.h"
#include "paddle/fluid/operators/lrn_op.h"
17
#include "paddle/fluid/platform/mkldnn_reuse.h"
T
Tomasz Patejko 已提交
18 19 20 21 22 23 24 25 26 27 28

namespace paddle {
namespace operators {

using paddle::framework::Tensor;
using paddle::platform::MKLDNNDeviceContext;

template <typename T>
class LRNMKLDNNOpKernel : public paddle::framework::OpKernel<T> {
 public:
  void Compute(const paddle::framework::ExecutionContext& ctx) const override {
M
minqiyang 已提交
29
    const bool is_float_type = std::is_same<T, float>::value;
30 31 32 33 34 35
    PADDLE_ENFORCE_EQ(
        is_float_type, true,
        platform::errors::PreconditionNotMet("DNNL LRN must use float data."));
    PADDLE_ENFORCE_EQ(platform::is_cpu_place(ctx.GetPlace()), true,
                      paddle::platform::errors::PreconditionNotMet(
                          "Operator DNNL LRN must use CPUPlace"));
36 37 38
    auto& dev_ctx =
        ctx.template device_context<platform::MKLDNNDeviceContext>();
    const auto& mkldnn_engine = dev_ctx.GetEngine();
T
Tomasz Patejko 已提交
39 40 41 42 43

    auto x = ctx.Input<Tensor>("X");
    auto out = ctx.Output<Tensor>("Out");
    auto mid = ctx.Output<Tensor>("MidOut");

44 45
    platform::LRNMKLDNNHandler<T> handler(
        ctx, dev_ctx, mkldnn_engine, ctx.GetPlace(), x, ctx.OutputName("Out"));
J
Jacek Czaja 已提交
46 47 48 49

    auto src_memory = handler.AcquireSrcMemory(x);
    auto dst_memory = handler.AcquireDstMemory(out);

A
Adam 已提交
50 51 52 53 54 55 56
    auto lrn_p = handler.AcquireForwardPrimitive();

    auto workspace_memory = handler.AcquireWorkspaceMemory(mid);
    mid->set_layout(framework::DataLayout::kMKLDNN);

    mkldnn::stream astream(dev_ctx.GetEngine());
    if (!workspace_memory->get_desc().is_zero()) {
57
      mid->set_format(platform::GetMKLDNNFormat(*workspace_memory));
A
Adam 已提交
58 59 60
      lrn_p->execute(astream, {{MKLDNN_ARG_SRC, *src_memory},
                               {MKLDNN_ARG_DST, *dst_memory},
                               {MKLDNN_ARG_WORKSPACE, *workspace_memory}});
J
Jacek Czaja 已提交
61 62 63 64 65 66
    } else {
      // mid has to be allocated and filled
      // k to pass LRN unit tests
      // TODO(jczaja): Disable checking mid in unit tests (Require API change)
      mid->mutable_data<T>(ctx.GetPlace());
      auto e_mid = framework::EigenTensor<T, 4>::From(*mid);
67
      const float k = ctx.Attr<float>("k");
J
Jacek Czaja 已提交
68
      e_mid = e_mid.constant(k);
A
Adam 已提交
69
      mid->set_format(platform::GetMKLDNNFormat(*dst_memory));
70

A
Adam 已提交
71 72 73 74
      lrn_p->execute(astream, {{MKLDNN_ARG_SRC, *src_memory},
                               {MKLDNN_ARG_DST, *dst_memory}});
    }
    astream.wait();
75 76

    out->set_layout(framework::DataLayout::kMKLDNN);
A
Adam 已提交
77
    out->set_format(platform::GetMKLDNNFormat(*dst_memory));
T
Tomasz Patejko 已提交
78 79 80 81 82 83 84
  }
};

template <typename T>
class LRNMKLDNNGradOpKernel : public paddle::framework::OpKernel<T> {
 public:
  void Compute(const paddle::framework::ExecutionContext& ctx) const override {
M
minqiyang 已提交
85
    const bool is_float_type = std::is_same<T, float>::value;
86 87 88 89 90 91 92 93 94 95
    PADDLE_ENFORCE_EQ(is_float_type, true,
                      platform::errors::PreconditionNotMet(
                          "DNNL LRN GradOpKernl must use float data."));
    PADDLE_ENFORCE_EQ(platform::is_cpu_place(ctx.GetPlace()), true,
                      paddle::platform::errors::PreconditionNotMet(
                          "Operator DNNL LRNGrad must use CPUPlace"));
    PADDLE_ENFORCE_EQ(
        ctx.Attr<bool>("is_test"), false,
        platform::errors::PreconditionNotMet(
            "is_test attribute should be set to False in training phase."));
T
Tomasz Patejko 已提交
96 97

    auto x = ctx.Input<Tensor>("X");
J
Jacek Czaja 已提交
98
    auto mid = ctx.Input<Tensor>("MidOut");
T
Tomasz Patejko 已提交
99 100 101 102 103

    auto out_grad = ctx.Input<Tensor>(framework::GradVarName("Out"));
    auto x_grad = ctx.Output<Tensor>(framework::GradVarName("X"));

    const int n = ctx.Attr<int>("n");
104
    const float alpha = ctx.Attr<float>("alpha") * static_cast<float>(n);
T
Tomasz Patejko 已提交
105 106 107 108 109
    const float beta = ctx.Attr<float>("beta");
    const float k = ctx.Attr<float>("k");

    auto& dev_ctx = ctx.template device_context<MKLDNNDeviceContext>();

A
Adam 已提交
110
    auto dims = paddle::framework::vectorize<int64_t>(x->dims());
T
Tomasz Patejko 已提交
111

H
hong 已提交
112 113 114
    platform::LRNMKLDNNHandler<T> handler(dims, n, alpha, beta, k, x->format(),
                                          out_grad->format(), dev_ctx,
                                          ctx.GetPlace(), ctx.InputName("Out"));
T
Tomasz Patejko 已提交
115

J
Jacek Czaja 已提交
116 117 118 119
    auto src_memory = handler.AcquireSrcMemory(x);
    auto workspace = handler.AcquireBackwardWorkspaceMemory(mid);
    auto diff_dst_memory = handler.AcquireDiffDstMemory(out_grad);
    auto diff_src_memory = handler.AcquireDiffSrcMemory(x_grad);
T
Tomasz Patejko 已提交
120

A
Adam 已提交
121
    auto lrn_bwd = handler.AcquireBackwardPrimitive();
T
Tomasz Patejko 已提交
122

A
Adam 已提交
123 124 125 126 127 128
    mkldnn::stream astream(dev_ctx.GetEngine());
    lrn_bwd->execute(astream, {{MKLDNN_ARG_SRC, *src_memory},
                               {MKLDNN_ARG_DIFF_DST, *diff_dst_memory},
                               {MKLDNN_ARG_DIFF_SRC, *diff_src_memory},
                               {MKLDNN_ARG_WORKSPACE, *workspace}});
    astream.wait();
129 130

    x_grad->set_layout(framework::DataLayout::kMKLDNN);
A
Adam 已提交
131
    x_grad->set_format(platform::GetMKLDNNFormat(*diff_src_memory));
T
Tomasz Patejko 已提交
132 133 134 135 136 137 138 139 140 141 142
  }
};
}  // namespace operators
}  // namespace paddle

namespace ops = paddle::operators;

REGISTER_OP_KERNEL(lrn, MKLDNN, paddle::platform::CPUPlace,
                   ops::LRNMKLDNNOpKernel<float>);
REGISTER_OP_KERNEL(lrn_grad, MKLDNN, paddle::platform::CPUPlace,
                   ops::LRNMKLDNNGradOpKernel<float>);