“d6537cb87a4897be941004769552a7924048f5fd”上不存在“projects/oceanbase/imports.yml”
concat_mkldnn_op.cc 9.0 KB
Newer Older
M
Michal Gallus 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

M
Michal Gallus 已提交
15
#include <memory>
M
Michal Gallus 已提交
16 17
#include "paddle/fluid/operators/concat_op.h"
#include "paddle/fluid/platform/mkldnn_helper.h"
18
#include "paddle/fluid/platform/mkldnn_reuse.h"
M
Michal Gallus 已提交
19 20 21 22 23 24 25 26 27 28 29 30 31 32

namespace paddle {
namespace operators {

using framework::DataLayout;
using framework::Tensor;
using mkldnn::memory;
using mkldnn::primitive;
using mkldnn::concat;
using mkldnn::stream;
using platform::to_void_cast;

static void EnforceLayouts(const std::vector<const Tensor*> inputs) {
  for (auto* input : inputs) {
33 34 35 36 37 38
    PADDLE_ENFORCE_EQ(
        input->layout(), DataLayout::kMKLDNN,
        platform::errors::InvalidArgument("Wrong layout set for Input tensor"));
    PADDLE_ENFORCE_NE(
        input->format(), MKLDNNMemoryFormat::undef,
        platform::errors::InvalidArgument("Wrong format set for Input tensor"));
M
Michal Gallus 已提交
39 40 41
  }
}

A
Adam 已提交
42 43 44
static memory::desc CreateMemDesc(const Tensor& input,
                                  const memory::data_type& dt) {
  const auto dims = paddle::framework::vectorize<int64_t>(input.dims());
M
Michal Gallus 已提交
45
  const auto format = input.format();
A
Adam 已提交
46 47
  auto mem_desc = memory::desc(dims, dt, format);
  return mem_desc;
M
Michal Gallus 已提交
48 49 50 51 52 53
}

static platform::CPUPlace GetCpuPlace(
    const paddle::framework::ExecutionContext& ctx) {
  auto place = ctx.GetPlace();
  PADDLE_ENFORCE(paddle::platform::is_cpu_place(place),
54
                 platform::errors::InvalidArgument("It must use CPUPlace."));
55
  return BOOST_GET_CONST(platform::CPUPlace, place);
M
Michal Gallus 已提交
56 57
}

M
Michal Gallus 已提交
58
static const mkldnn::engine& GetMKLDNNEngine(
59 60 61
    const paddle::framework::ExecutionContext& ctx) {
  auto& dev_ctx = ctx.template device_context<platform::MKLDNNDeviceContext>();
  return dev_ctx.GetEngine();
M
Michal Gallus 已提交
62
}
M
Michal Gallus 已提交
63

64 65 66 67 68 69 70 71 72 73
// From a multi-input, gather only nonempty inputs
static const std::vector<const Tensor*> ReduceMultiInput(
    const std::vector<const Tensor*>& inputs) {
  std::vector<const Tensor*> reduced(inputs.size());
  auto end_it = std::copy_if(inputs.begin(), inputs.end(), reduced.begin(),
                             [](const Tensor* t) { return t->numel() > 0; });
  reduced.resize(std::distance(reduced.begin(), end_it));
  return reduced;
}

74 75 76 77 78 79 80 81 82
static const std::vector<int> GetDimsForKey(
    const std::vector<const Tensor*>& inputs) {
  auto dims_key = paddle::framework::vectorize<int>(inputs[0]->dims());
  for (auto it = std::next(inputs.begin()); it != inputs.end(); ++it) {
    dims_key.push_back((*it)->dims()[0]);
  }
  return dims_key;
}

M
Michal Gallus 已提交
83 84 85 86 87
template <typename T>
class ConcatPrimitiveFactory {
 public:
  concat::primitive_desc CreateConcatPrimDescriptor(
      const std::vector<const Tensor*> multi_input, Tensor* output,
88 89 90 91
      int concat_axis, const mkldnn::engine& mkldnn_engine,
      const memory::data_type& dt = memory::data_type::f32) {
    CreateSourcesDescriptors(multi_input, mkldnn_engine, dt);
    auto dst_desc = CreateDstMemDescriptor(output, dt);
A
Adam 已提交
92
    return concat::primitive_desc(dst_desc, concat_axis, srcs_d, mkldnn_engine);
M
Michal Gallus 已提交
93
  }
M
Michal Gallus 已提交
94

M
Michal Gallus 已提交
95
  concat CreateConcatPrimitive(const concat::primitive_desc& concat_pd,
A
Adam 已提交
96 97
                               Tensor* output, platform::CPUPlace place,
                               const mkldnn::engine& mkldnn_engine) {
98 99 100 101
    dst_mem = mkldnn::memory(
        concat_pd.dst_desc(), mkldnn_engine,
        output->mutable_data<T>(place, concat_pd.dst_desc().get_size()));

A
Adam 已提交
102
    return concat(concat_pd);
M
Michal Gallus 已提交
103 104
  }

105 106 107 108 109 110 111 112 113 114 115 116 117
  void SetSrcDataHandleByIndex(const std::vector<memory>& srcs, const size_t& i,
                               void* handler) {
    srcs[i].set_data_handle(handler);
  }

  void SetDstDataHandle(const memory& dst_mem, void* handler) {
    dst_mem.set_data_handle(handler);
  }

  std::vector<memory> GetSrcs() { return srcs; }

  memory GetDst() { return dst_mem.get(); }

M
Michal Gallus 已提交
118
 private:
119 120
  memory::desc CreateDstMemDescriptor(Tensor* output,
                                      const memory::data_type& dt) {
A
Adam 已提交
121
    auto dst_dims = paddle::framework::vectorize<int64_t>(output->dims());
122
    return memory::desc(dst_dims, dt, MKLDNNMemoryFormat::any);
M
Michal Gallus 已提交
123 124 125
  }

  void CreateSourcesDescriptors(const std::vector<const Tensor*> multi_input,
126 127
                                const mkldnn::engine& mkldnn_engine,
                                const memory::data_type& dt) {
M
Michal Gallus 已提交
128
    for (size_t i = 0; i < multi_input.size(); i++) {
A
Adam 已提交
129 130 131 132
      auto mem_desc = CreateMemDesc(*multi_input[i], dt);
      srcs_d.push_back(mem_desc);
      srcs.push_back(memory(mem_desc, mkldnn_engine,
                            to_void_cast(multi_input[i]->data<T>())));
M
Michal Gallus 已提交
133
    }
M
Michal Gallus 已提交
134 135 136
  }

 private:
A
Adam 已提交
137 138 139
  std::vector<memory::desc> srcs_d;
  std::vector<mkldnn::memory> srcs;
  boost::optional<mkldnn::memory> dst_mem;
140
};
M
Michal Gallus 已提交
141 142 143 144 145

template <typename T>
class ConcatMKLDNNOpKernel : public paddle::framework::OpKernel<T> {
 public:
  void Compute(const paddle::framework::ExecutionContext& ctx) const override {
146 147
    // If any of the multiple inputs of concat has an input size of 0, the
    // actual size of the multi_input will change
148
    auto multi_input = ReduceMultiInput(ctx.MultiInput<Tensor>("X"));
M
Michal Gallus 已提交
149 150
    EnforceLayouts(multi_input);
    Tensor* output = ctx.Output<Tensor>("Out");
A
Adam 已提交
151
    int concat_axis = ctx.Attr<int>("axis");
152 153 154 155 156 157
    const int rank = multi_input[0]->dims().size();
    PADDLE_ENFORCE_EQ(
        concat_axis >= -rank && concat_axis < rank, true,
        platform::errors::InvalidArgument(
            "The axis is expected to be in range of [%d, %d), but got %d",
            -rank, rank, concat_axis));
158
    platform::MKLDNNDeviceContext::tls().log_lib_version();
159 160 161
    if (concat_axis < 0) {
      concat_axis = concat_axis + rank;
    }
162 163 164 165 166 167
    auto& dev_ctx =
        ctx.template device_context<paddle::platform::MKLDNNDeviceContext>();
    auto place = GetCpuPlace(ctx);

    memory::data_type dt =
        paddle::framework::ToMKLDNNDataType(multi_input[0]->type());
M
Michal Gallus 已提交
168 169

    ConcatPrimitiveFactory<T> prim_creator;
170 171 172
    std::string key =
        platform::CreateKey(dev_ctx, GetDimsForKey(multi_input),
                            multi_input.size(), ctx.OutputName("Out"), dt);
173
    key = platform::ExtendKeyWithThreadInfoIfNeeded(dev_ctx, key);
A
Adam 已提交
174

175 176 177 178 179 180 181 182 183 184
    const std::string key_prim = key + "@concat_p";
    const std::string key_concat_pd = key + "@concat_pd";
    const std::string key_srcs = key + "@concat_srcs";
    const std::string key_dst = key + "@concat_dst";

    std::shared_ptr<concat::primitive_desc> concat_pd;
    std::shared_ptr<std::vector<memory>> srcs;
    std::shared_ptr<memory> dst_mem;
    auto concat_p = std::static_pointer_cast<concat>(dev_ctx.GetBlob(key_prim));

A
Adam 已提交
185
    const auto& mkldnn_engine = dev_ctx.GetEngine();
186 187
    if (concat_p == nullptr) {
      concat_pd = std::make_shared<concat::primitive_desc>(
A
Adam 已提交
188 189 190 191
          prim_creator.CreateConcatPrimDescriptor(
              multi_input, output, concat_axis, mkldnn_engine, dt));
      concat_p = std::make_shared<concat>(prim_creator.CreateConcatPrimitive(
          *concat_pd, output, place, mkldnn_engine));
192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207
      srcs = std::make_shared<std::vector<memory>>(prim_creator.GetSrcs());
      dst_mem = std::make_shared<memory>(prim_creator.GetDst());
      dev_ctx.SetBlob(key_prim, concat_p);
      dev_ctx.SetBlob(key_concat_pd, concat_pd);
      dev_ctx.SetBlob(key_srcs, srcs);
      dev_ctx.SetBlob(key_dst, dst_mem);
    } else {
      srcs = std::static_pointer_cast<std::vector<memory>>(
          dev_ctx.GetBlob(key_srcs));
      dst_mem = std::static_pointer_cast<memory>(dev_ctx.GetBlob(key_dst));
      concat_pd = std::static_pointer_cast<concat::primitive_desc>(
          dev_ctx.GetBlob(key_concat_pd));
      for (size_t i = 0; i < multi_input.size(); i++) {
        prim_creator.SetSrcDataHandleByIndex(
            *srcs, i, to_void_cast<T>(multi_input[i]->data<T>()));
      }
208 209 210
      prim_creator.SetDstDataHandle(
          *dst_mem,
          output->mutable_data<T>(place, concat_pd->dst_desc().get_size()));
211 212
    }

213
    auto& astream = platform::MKLDNNDeviceContext::tls().get_stream();
A
Adam 已提交
214 215 216 217 218 219 220 221
    std::unordered_map<int, memory> args;
    for (size_t i = 0; i < multi_input.size(); ++i) {
      args.insert({MKLDNN_ARG_MULTIPLE_SRC + i, (*srcs).at(i)});
    }
    args.insert({MKLDNN_ARG_DST, *dst_mem});

    concat_p->execute(astream, args);
    astream.wait();
M
Michal Gallus 已提交
222

223
    output->set_layout(DataLayout::kMKLDNN);
A
Adam 已提交
224
    output->set_format(platform::GetMKLDNNFormat(*dst_mem));
M
Michal Gallus 已提交
225 226 227 228 229 230 231 232
  }
};
}  // namespace operators
}  // namespace paddle

namespace ops = paddle::operators;

REGISTER_OP_KERNEL(concat, MKLDNN, ::paddle::platform::CPUPlace,
233
                   ops::ConcatMKLDNNOpKernel<float>,
234
                   ops::ConcatMKLDNNOpKernel<paddle::platform::bfloat16>,
235 236
                   ops::ConcatMKLDNNOpKernel<int8_t>,
                   ops::ConcatMKLDNNOpKernel<uint8_t>);