place.cc 2.9 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
L
liaogang 已提交
2

L
Luo Tao 已提交
3 4 5
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
L
liaogang 已提交
6

L
Luo Tao 已提交
7
    http://www.apache.org/licenses/LICENSE-2.0
L
liaogang 已提交
8

L
Luo Tao 已提交
9 10 11 12 13
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
L
liaogang 已提交
14

Y
Yi Wang 已提交
15
#include "paddle/fluid/platform/place.h"
Y
Yi Wang 已提交
16

Z
Zeng Jinle 已提交
17 18 19 20 21 22
PADDLE_DEFINE_EXPORTED_bool(
    benchmark, false,
    "Doing memory benchmark. It will make deleting scope synchronized, "
    "and add some memory usage logs."
    "Default cuda is asynchronous device, set to True will"
    "force op run in synchronous mode.");
D
dzhwinter 已提交
23

Y
Yi Wang 已提交
24 25
namespace paddle {
namespace platform {
Y
Yi Wang 已提交
26 27 28

namespace detail {

L
liaogang 已提交
29
class PlacePrinter : public boost::static_visitor<> {
Y
Yi Wang 已提交
30
 public:
L
liaogang 已提交
31
  explicit PlacePrinter(std::ostream &os) : os_(os) {}
32
  void operator()(const CPUPlace &) { os_ << "CPUPlace"; }
D
dzhwinter 已提交
33 34 35
  void operator()(const CUDAPlace &p) {
    os_ << "CUDAPlace(" << p.device << ")";
  }
36
  void operator()(const XPUPlace &p) { os_ << "XPUPlace(" << p.device << ")"; }
37
  void operator()(const NPUPlace &p) { os_ << "NPUPlace(" << p.device << ")"; }
38
  void operator()(const NPUPinnedPlace &p) { os_ << "NPUPinnedPlace"; }
C
chengduoZH 已提交
39
  void operator()(const CUDAPinnedPlace &p) { os_ << "CUDAPinnedPlace"; }
Y
Yi Wang 已提交
40

Y
Yi Wang 已提交
41
 private:
Y
Yi Wang 已提交
42
  std::ostream &os_;
Y
Yi Wang 已提交
43 44
};

Y
Yi Wang 已提交
45
}  // namespace detail
Y
Yi Wang 已提交
46

Y
Yi Wang 已提交
47
bool is_gpu_place(const Place &p) {
D
dzhwinter 已提交
48
  return boost::apply_visitor(IsCUDAPlace(), p);
Y
Yi Wang 已提交
49 50
}

51 52 53 54
bool is_xpu_place(const Place &p) {
  return boost::apply_visitor(IsXPUPlace(), p);
}

55 56 57 58
bool is_npu_place(const Place &p) {
  return boost::apply_visitor(IsNPUPlace(), p);
}

C
chengduoZH 已提交
59 60 61 62 63 64 65
bool is_cpu_place(const Place &p) {
  return boost::apply_visitor(IsCPUPlace(), p);
}

bool is_cuda_pinned_place(const Place &p) {
  return boost::apply_visitor(IsCUDAPinnedPlace(), p);
}
T
tensor-tang 已提交
66

67 68 69 70
bool is_npu_pinned_place(const Place &p) {
  return boost::apply_visitor(IsNPUPinnedPlace(), p);
}

Y
Yi Wang 已提交
71
bool places_are_same_class(const Place &p1, const Place &p2) {
Y
Yu Yang 已提交
72
  return p1.which() == p2.which();
Y
Yi Wang 已提交
73 74
}

75 76
bool is_same_place(const Place &p1, const Place &p2) {
  if (places_are_same_class(p1, p2)) {
C
chengduoZH 已提交
77
    if (is_cpu_place(p1) || is_cuda_pinned_place(p1)) {
78
      return true;
79 80
    } else if (is_xpu_place(p1)) {
      return BOOST_GET_CONST(XPUPlace, p1) == BOOST_GET_CONST(XPUPlace, p2);
81 82
    } else if (is_npu_place(p1)) {
      return BOOST_GET_CONST(NPUPlace, p1) == BOOST_GET_CONST(NPUPlace, p2);
83
    } else {
84
      return BOOST_GET_CONST(CUDAPlace, p1) == BOOST_GET_CONST(CUDAPlace, p2);
85 86 87 88 89 90
    }
  } else {
    return false;
  }
}

Y
Yi Wang 已提交
91 92
std::ostream &operator<<(std::ostream &os, const Place &p) {
  detail::PlacePrinter printer(os);
L
liaogang 已提交
93 94
  boost::apply_visitor(printer, p);
  return os;
Y
Yi Wang 已提交
95 96
}

Y
Yi Wang 已提交
97 98
}  // namespace platform
}  // namespace paddle