target_wrapper.h 4.1 KB
Newer Older
S
superjomn 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#pragma once
#include <iostream>

namespace paddle {
namespace lite {

S
superjomn 已提交
21 22 23 24
enum class TargetType : int { kHost = 0, kX86, kCUDA, kLastAsPlaceHolder };
enum class PrecisionType : int { kFloat = 0, kInt8, kLastAsPlaceHolder };
enum class DataLayoutType : int { kNCHW = 0, kLastAsPlaceHolder };

S
update  
superjomn 已提交
25
// Some helper macro to get a specific TargetType.
S
superjomn 已提交
26 27
#define TARGET(item__) paddle::lite::TargetType::item__
#define TARGET_VAL(item__) static_cast<int>(TARGET(item__))
S
superjomn 已提交
28 29 30 31
// Some helper macro to get a specific PrecisionType.
#define PRECISION(item__) paddle::lite::PrecisionType::item__
#define PRECISION_VAL(item__) static_cast<int>(PRECISION(item__))
#define DATALAYOUT(item__) paddle::lite::DataLayoutType::item__
S
superjomn 已提交
32 33

/*
S
superjomn 已提交
34 35
 * Place specifies the execution context of a Kernel or input/output for a
 * kernel. It is used to make the analysis of the MIR more clear and accurate.
S
superjomn 已提交
36
 */
S
superjomn 已提交
37 38 39 40
struct Place {
  TargetType target{TARGET(kHost)};
  PrecisionType precision{PRECISION(kFloat)};
  DataLayoutType layout{DATALAYOUT(kNCHW)};
S
Superjomn 已提交
41
  short device{0};  // device ID
S
superjomn 已提交
42

S
superjomn 已提交
43 44
  Place() = default;
  Place(TargetType target, PrecisionType precision,
S
Superjomn 已提交
45 46 47 48 49 50 51
        DataLayoutType layout = DATALAYOUT(kNCHW), short device = 0)
      : target(target), precision(precision), layout(layout), device(device) {}

  bool operator==(const Place& other) const {
    return target == other.target && precision == other.precision &&
           layout == other.layout && device == other.device;
  }
S
superjomn 已提交
52
};
S
superjomn 已提交
53

S
superjomn 已提交
54
constexpr const int kNumPrecisions =
S
superjomn 已提交
55
    PRECISION_VAL(kLastAsPlaceHolder) - PRECISION_VAL(kFloat);
S
superjomn 已提交
56 57
constexpr const int kNumTargets =
    TARGET_VAL(kLastAsPlaceHolder) - TARGET_VAL(kHost);
S
superjomn 已提交
58

S
superjomn 已提交
59 60 61 62 63
static const std::string target2string[] = {"host", "x86", "cuda"};
static const std::string& TargetToStr(TargetType target) {
  return target2string[static_cast<int>(target)];
}

S
superjomn 已提交
64
static const std::string precision2string[] = {"float", "int8"};
S
superjomn 已提交
65 66 67 68
static const std::string& PrecisionToStr(PrecisionType precision) {
  return precision2string[static_cast<int>(precision)];
}

S
superjomn 已提交
69 70 71 72 73
static const std::string datalayout2string[] = {"NCHW"};
static const std::string& DataLayoutToStr(DataLayoutType x) {
  return datalayout2string[static_cast<int>(x)];
}

S
superjomn 已提交
74
// Event sync for multi-stream devices like CUDA and OpenCL.
S
update  
superjomn 已提交
75
// For the devices without support of stream, leave it empty.
S
superjomn 已提交
76 77 78 79 80
template <TargetType Target>
class Event {};

// Memory copy directions.
enum class IoDirection {
S
update  
superjomn 已提交
81 82 83
  HtoH = 0,  // Host to host
  HtoD,      // Host to device
  DtoH,      // Device to host
S
superjomn 已提交
84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106
};

// This interface should be specified by each kind of target.
template <TargetType Target>
class TargetWrapper {
 public:
  using stream_t = int;
  using event_t = Event<Target>;

  static size_t num_devices() { return 0; }
  static size_t maximum_stream() { return 0; }

  static void CreateStream(stream_t* stream) {}
  static void DestroyStream(const stream_t& stream) {}

  static void CreateEvent(event_t* event) {}
  static void DestroyEvent(const event_t& event) {}

  static void RecordEvent(const event_t& event) {}
  static void SyncEvent(const event_t& event) {}

  static void StreamSync(const stream_t& stream) {}

S
superjomn 已提交
107 108
  static void* Malloc(size_t size) { return new char[size]; }
  static void Free(void* ptr) { delete[] static_cast<char*>(ptr); }
S
superjomn 已提交
109 110 111 112 113 114 115 116 117 118

  static void MemcpySync(void* dst, void* src, size_t size, IoDirection dir) {}
  static void MemcpyAsync(void* dst, void* src, size_t size,
                          const stream_t& stream, IoDirection dir) {
    MemcpySync(dst, src, size, dir);
  }
};

}  // namespace lite
}  // namespace paddle