context.h 2.2 KB
Newer Older
S
superjomn 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#pragma once
S
superjomn 已提交
16
#include <paddle/fluid/lite/cuda/blas.h>
S
superjomn 已提交
17 18
#include <memory>
#include <vector>
S
superjomn 已提交
19
#include "paddle/fluid/lite/core/target_wrapper.h"
S
superjomn 已提交
20

S
superjomn 已提交
21 22 23 24
#ifdef PADDLE_WITH_CUDA
#include "paddle/fluid/lite/cuda/cuda_utils.h"
#endif

S
superjomn 已提交
25 26 27
namespace paddle {
namespace lite {

S
superjomn 已提交
28 29 30 31 32 33 34 35
#ifdef LITE_WITH_CUDA
// Only works with CUDA kernels.
struct CUDAContext {
  // overall information
  cudaStream_t exec_stream;
  cudaStream_t io_stream;

  // not thread-safe, should allocate for each thread.
36
  std::shared_ptr<cuda::Blas<float>> blas_fp32;
S
superjomn 已提交
37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55

  // kernel information
  std::vector<cudaEvent_t> input_events;
  std::vector<cudaEvent_t> output_events;
};
#endif

#ifdef LITE_WITH_X86
struct X86Context {
  // overall information
  // kernel information
};
#endif

// Context for running a kernel.
// Holds the necessary resource and information.
class KernelContext {
 public:
#ifdef LITE_WITH_CUDA
56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81
  CUDAContext& AsCudaContext() {
    if (target_ != TARGET(kUnk)) {
      CHECK(target_ == TARGET(kCUDA));
    } else {
      target_ = TARGET(kCUDA);
      cuda_ctx_.reset(new CUDAContext);
    }
    return *cuda_ctx_;
  }
#endif  // LITE_WITH_CUDA

#ifdef LITE_WITH_X86
  X86Context& AsX86Context() {
    if (target_ != TARGET(kUnk)) {
      CHECK(target_ == TARGET(kX86));
    } else {
      target_ = TARGET(kX86);
      x86_ctx_.reset(new X86Context);
    }
    return *x86_ctx_;
  }
#endif  // lite_with_x86

 private:
#ifdef LITE_WITH_CUDA
  std::unique_ptr<CUDAContext> cuda_ctx_;
S
superjomn 已提交
82 83 84
#endif

#ifdef LITE_WITH_X86
85
  std::unique_ptr<X86Context> x86_ctx_;
S
superjomn 已提交
86
#endif
87 88

  TargetType target_{TARGET(kUnk)};
S
superjomn 已提交
89 90
};

S
superjomn 已提交
91 92
}  // namespace lite
}  // namespace paddle