elementwise_add.h 2.7 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once

#include <llvm/ADT/SmallVector.h>

#include "paddle/infrt/host_context/kernel_utils.h"
19 20
#include "paddle/infrt/kernel/pten/infershaped/infershaped_kernel_launcher.h"
#include "paddle/infrt/kernel/pten/infershaped/infershaped_utils.h"
21 22 23 24 25

// This file contains a example of the infershape ElementwiseAdd kernel.
// Some of the following code should be generated from PTEN by script.

namespace infrt {
26
namespace kernel {
27

28 29 30
static void ElementwiseAddInferShape(const ::pten::MetaTensor& a,
                                     const ::pten::MetaTensor& b,
                                     ::pten::MetaTensor* c) {}
31

32 33 34 35
static void ElementwiseAdd(const ::pten::CPUContext& /*Context*/,
                           const ::pten::DenseTensor& a,
                           const ::pten::DenseTensor& b,
                           ::pten::DenseTensor* c) {}
36

37 38 39 40 41
template <typename KernelFunc,
          KernelFunc kernel,
          typename InferShapedFunc,
          InferShapedFunc infershape>
class KernelLauncher : public InferShapedKernelLauncher {
42
 public:
43
  static const uint16_t num_input_tensors{InferShapeHelper<KernelFunc>::count};
44 45 46 47 48 49 50 51
  static const bool turn_on_infer_shape_cache{true};
  void Invoke(host_context::KernelFrame* frame) override {
    // Build the infershape KernelFrame if needed.
    // TODO(Superjomn) add unlikely here.
    if (infershape_kernel_frame_builder.IsEmpty()) {
      CreateKernelFrameForInferShape(frame);
    }
    if (turn_on_infer_shape_cache) {
52 53 54 55
      if (!turn_on_infer_shape_cache || IsShapeChanged(num_input_tensors)) {
        ::infrt::host_context::KernelImpl<InferShapedFunc, infershape>::Invoke(
            &infershape_kernel_frame_builder);
        BuildInferShapeCache(num_input_tensors);
56 57 58
      }
    }

59
    ::infrt::host_context::KernelImpl<KernelFunc, kernel>::Invoke(frame);
60 61 62
  }
};

63 64 65 66 67 68 69 70 71 72 73
template <typename KernelFunc,
          KernelFunc kernel,
          typename InferShapedFunc,
          InferShapedFunc infershape>
void KernelLauncherFunc(
    KernelLauncher<KernelFunc, kernel, InferShapedFunc, infershape> launcher,
    host_context::KernelFrame* frame) {
  launcher.Invoke(frame);
}

}  // namespace kernel
74
}  // namespace infrt