softmax_mkldnn_op.cc 7.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

15
#include "paddle/fluid/framework/op_registry.h"
J
Jacek Czaja 已提交
16
#include "paddle/fluid/platform/mkldnn_reuse.h"
17
#include "paddle/phi/kernels/funcs/axis_utils.h"
18 19 20 21 22 23 24 25

namespace paddle {
namespace operators {

using paddle::framework::Tensor;
using paddle::platform::MKLDNNDeviceContext;
using paddle::platform::MKLDNNMemDesc;

26 27 28 29 30 31
using dnnl::memory;  // Note: paddle has also "memory" namespace
using dnnl::primitive;
using dnnl::prop_kind;
using dnnl::softmax_backward;
using dnnl::softmax_forward;
using dnnl::stream;
J
Jacek Czaja 已提交
32 33
using platform::to_void_cast;

34
template <typename T>
35
class SoftmaxMKLDNNHandler
36 37
    : public platform::MKLDNNHandlerNoCachingT<T,
                                               dnnl::softmax_forward,
38
                                               dnnl::softmax_backward> {
J
Jacek Czaja 已提交
39
 public:
40
  SoftmaxMKLDNNHandler(const dnnl::engine mkldnn_engine,
41 42 43 44 45 46
                       platform::Place cpu_place,
                       const Tensor* input,
                       Tensor* output,
                       const int axis)
      : platform::MKLDNNHandlerNoCachingT<T,
                                          dnnl::softmax_forward,
47 48
                                          dnnl::softmax_backward>(mkldnn_engine,
                                                                  cpu_place) {
49
    PADDLE_ENFORCE_EQ(
50 51
        input->dims(),
        output->dims(),
52 53 54
        platform::errors::InvalidArgument(
            "The shape of input and output tensor must be identical."));

55 56
    this->AcquireForwardPrimitiveDescriptor(
        prop_kind::forward_scoring, input->mem_desc(), axis);
57
  }
J
Jacek Czaja 已提交
58

59
  SoftmaxMKLDNNHandler(const framework::ExecutionContext& ctx,
60
                       const dnnl::engine mkldnn_engine,
61 62 63 64
                       platform::Place cpu_place,
                       const Tensor* out,
                       const Tensor* out_grad,
                       Tensor* in_x_grad,
65
                       const std::string& unique_name)
66 67
      : platform::MKLDNNHandlerNoCachingT<T,
                                          dnnl::softmax_forward,
68 69
                                          dnnl::softmax_backward>(mkldnn_engine,
                                                                  cpu_place) {
70 71
    PADDLE_ENFORCE_EQ(out_grad->dims(),
                      in_x_grad->dims(),
72 73 74 75
                      platform::errors::InvalidArgument(
                          "The shape of softmax_grad's input "
                          "and output must be identical, but shapes differ, "
                          "out_grad: %s in_grad: %s",
76 77
                          out_grad->dims(),
                          in_x_grad->dims()));
78 79

    auto dims = out_grad->dims();  // input and output share the same shape
80 81
    const int axis =
        phi::funcs::CanonicalAxis(ctx.Attr<int>("axis"), dims.size());
82

83 84 85 86
    this->AcquireForwardPrimitiveDescriptor(
        prop_kind::forward_scoring, out->mem_desc(), axis);
    this->AcquireBackwardPrimitiveDescriptor(
        out_grad->mem_desc(), out->mem_desc(), axis);
87
  }
J
Jacek Czaja 已提交
88
};
89 90 91 92 93 94

template <typename T>
class SoftmaxMKLDNNKernel : public paddle::framework::OpKernel<T> {
 public:
  void Compute(const paddle::framework::ExecutionContext& ctx) const override {
    auto& dev_ctx = ctx.template device_context<MKLDNNDeviceContext>();
95 96
    const auto& mkldnn_engine = dev_ctx.GetEngine();

97 98
    const Tensor* input = ctx.Input<Tensor>("X");
    Tensor* output = ctx.Output<Tensor>("Out");
99
    bool is_inplaced = input->IsSharedBufferWith(*output);
F
fengjiayi 已提交
100

101 102
    const int axis =
        phi::funcs::CanonicalAxis(ctx.Attr<int>("axis"), input->dims().size());
103

104 105
    SoftmaxMKLDNNHandler<T> handler(
        mkldnn_engine, ctx.GetPlace(), input, output, axis);
106

107
    auto softmax_src_memory_p = handler.AcquireSrcMemory(input);
108
    // For Inplace src and and dst are the same memory object
109 110 111 112 113 114 115
    std::shared_ptr<dnnl::memory> softmax_dst_memory_p = nullptr;
    if (is_inplaced) {
      softmax_dst_memory_p = softmax_src_memory_p;
      output->mutable_data<T>(ctx.GetPlace());
    } else {
      softmax_dst_memory_p = handler.AcquireDstMemory(output);
    }
116 117
    auto softmax_p = handler.AcquireForwardPrimitive();

118
    auto& astream = paddle::platform::MKLDNNDeviceContext::tls().get_stream();
119 120 121
    softmax_p->execute(astream,
                       {{DNNL_ARG_SRC, *softmax_src_memory_p},
                        {DNNL_ARG_DST, *softmax_dst_memory_p}});
A
Adam 已提交
122
    astream.wait();
J
Jacek Czaja 已提交
123 124 125

    const bool is_test = ctx.Attr<bool>("is_test");
    if (!is_test) {
126
      T* output_data = output->mutable_data<T>(ctx.GetPlace());
A
Adam 已提交
127
      std::for_each(output_data, &output_data[output->numel()], [](T& val) {
128 129
        val = std::max(val, static_cast<T>(exp(-64)));
      });
J
Jacek Czaja 已提交
130
    }
131

132
    output->set_mem_desc(softmax_dst_memory_p->get_desc());
133 134 135
  }
};

J
Jacek Czaja 已提交
136 137 138 139
template <typename T>
class SoftmaxMKLDNNGradKernel : public paddle::framework::OpKernel<T> {
 public:
  void Compute(const paddle::framework::ExecutionContext& ctx) const override {
140 141
    PADDLE_ENFORCE_EQ(platform::is_cpu_place(ctx.GetPlace()),
                      true,
142 143
                      paddle::platform::errors::PreconditionNotMet(
                          "Operator DNNL SoftmaxGrad must use CPUPlace"));
J
Jacek Czaja 已提交
144
    auto& dev_ctx = ctx.template device_context<MKLDNNDeviceContext>();
145
    const auto& mkldnn_engine = dev_ctx.GetEngine();
J
Jacek Czaja 已提交
146
    const Tensor* output = ctx.Input<Tensor>("Out");
147 148
    auto* out_grad = ctx.template Input<Tensor>(framework::GradVarName("Out"));
    auto* in_x_grad = ctx.template Output<Tensor>(framework::GradVarName("X"));
F
fengjiayi 已提交
149

150 151 152 153 154 155 156
    SoftmaxMKLDNNHandler<T> handler(ctx,
                                    mkldnn_engine,
                                    ctx.GetPlace(),
                                    output,
                                    out_grad,
                                    in_x_grad,
                                    ctx.InputName("Out"));
157

158
    auto dst_memory_p = handler.AcquireDstMemory(output);
159 160
    auto diff_dst_memory_p = handler.AcquireDiffDstMemory(out_grad);
    auto diff_src_memory_p = handler.AcquireDiffSrcMemory(in_x_grad);
J
Jacek Czaja 已提交
161

A
Adam 已提交
162
    auto softmax_bwd_p = handler.AcquireBackwardPrimitive();
J
Jacek Czaja 已提交
163

164
    auto& astream = platform::MKLDNNDeviceContext::tls().get_stream();
165 166 167 168
    softmax_bwd_p->execute(astream,
                           {{DNNL_ARG_DST, *dst_memory_p},
                            {DNNL_ARG_DIFF_DST, *diff_dst_memory_p},
                            {DNNL_ARG_DIFF_SRC, *diff_src_memory_p}});
A
Adam 已提交
169
    astream.wait();
170

171
    in_x_grad->set_mem_desc(diff_src_memory_p->get_desc());
J
Jacek Czaja 已提交
172 173
  }
};
174 175 176 177 178
}  // namespace operators
}  // namespace paddle

namespace ops = paddle::operators;

179 180 181
REGISTER_OP_KERNEL(softmax,
                   MKLDNN,
                   ::paddle::platform::CPUPlace,
182 183
                   ops::SoftmaxMKLDNNKernel<float>,
                   ops::SoftmaxMKLDNNKernel<paddle::platform::bfloat16>);
184 185 186
REGISTER_OP_KERNEL(softmax_grad,
                   MKLDNN,
                   ::paddle::platform::CPUPlace,
J
Jacek Czaja 已提交
187
                   ops::SoftmaxMKLDNNGradKernel<float>);