place.cc 2.9 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
L
liaogang 已提交
2

L
Luo Tao 已提交
3 4 5
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
L
liaogang 已提交
6

L
Luo Tao 已提交
7
    http://www.apache.org/licenses/LICENSE-2.0
L
liaogang 已提交
8

L
Luo Tao 已提交
9 10 11 12 13
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
L
liaogang 已提交
14

Y
Yi Wang 已提交
15
#include "paddle/fluid/platform/place.h"
Y
Yi Wang 已提交
16

D
dzhwinter 已提交
17 18 19 20 21 22
DEFINE_bool(benchmark, false,
            "Doing memory benchmark. It will make deleting scope synchronized, "
            "and add some memory usage logs."
            "Default cuda is asynchronous device, set to True will"
            "force op run in synchronous mode.");

Y
Yi Wang 已提交
23 24
namespace paddle {
namespace platform {
Y
Yi Wang 已提交
25 26 27

namespace detail {

L
liaogang 已提交
28
class PlacePrinter : public boost::static_visitor<> {
Y
Yi Wang 已提交
29
 public:
L
liaogang 已提交
30
  explicit PlacePrinter(std::ostream &os) : os_(os) {}
31
  void operator()(const CPUPlace &) { os_ << "CPUPlace"; }
D
dzhwinter 已提交
32 33 34
  void operator()(const CUDAPlace &p) {
    os_ << "CUDAPlace(" << p.device << ")";
  }
35
  void operator()(const XPUPlace &p) { os_ << "XPUPlace(" << p.device << ")"; }
36
  void operator()(const NPUPlace &p) { os_ << "NPUPlace(" << p.device << ")"; }
37
  void operator()(const NPUPinnedPlace &p) { os_ << "NPUPinnedPlace"; }
C
chengduoZH 已提交
38
  void operator()(const CUDAPinnedPlace &p) { os_ << "CUDAPinnedPlace"; }
Y
Yi Wang 已提交
39

Y
Yi Wang 已提交
40
 private:
Y
Yi Wang 已提交
41
  std::ostream &os_;
Y
Yi Wang 已提交
42 43
};

Y
Yi Wang 已提交
44
}  // namespace detail
Y
Yi Wang 已提交
45

Y
Yi Wang 已提交
46
bool is_gpu_place(const Place &p) {
D
dzhwinter 已提交
47
  return boost::apply_visitor(IsCUDAPlace(), p);
Y
Yi Wang 已提交
48 49
}

50 51 52 53
bool is_xpu_place(const Place &p) {
  return boost::apply_visitor(IsXPUPlace(), p);
}

54 55 56 57
bool is_npu_place(const Place &p) {
  return boost::apply_visitor(IsNPUPlace(), p);
}

C
chengduoZH 已提交
58 59 60 61 62 63 64
bool is_cpu_place(const Place &p) {
  return boost::apply_visitor(IsCPUPlace(), p);
}

bool is_cuda_pinned_place(const Place &p) {
  return boost::apply_visitor(IsCUDAPinnedPlace(), p);
}
T
tensor-tang 已提交
65

66 67 68 69
bool is_npu_pinned_place(const Place &p) {
  return boost::apply_visitor(IsNPUPinnedPlace(), p);
}

Y
Yi Wang 已提交
70
bool places_are_same_class(const Place &p1, const Place &p2) {
Y
Yu Yang 已提交
71
  return p1.which() == p2.which();
Y
Yi Wang 已提交
72 73
}

74 75
bool is_same_place(const Place &p1, const Place &p2) {
  if (places_are_same_class(p1, p2)) {
C
chengduoZH 已提交
76
    if (is_cpu_place(p1) || is_cuda_pinned_place(p1)) {
77
      return true;
78 79
    } else if (is_xpu_place(p1)) {
      return BOOST_GET_CONST(XPUPlace, p1) == BOOST_GET_CONST(XPUPlace, p2);
80 81
    } else if (is_npu_place(p1)) {
      return BOOST_GET_CONST(NPUPlace, p1) == BOOST_GET_CONST(NPUPlace, p2);
82
    } else {
83
      return BOOST_GET_CONST(CUDAPlace, p1) == BOOST_GET_CONST(CUDAPlace, p2);
84 85 86 87 88 89
    }
  } else {
    return false;
  }
}

Y
Yi Wang 已提交
90 91
std::ostream &operator<<(std::ostream &os, const Place &p) {
  detail::PlacePrinter printer(os);
L
liaogang 已提交
92 93
  boost::apply_visitor(printer, p);
  return os;
Y
Yi Wang 已提交
94 95
}

Y
Yi Wang 已提交
96 97
}  // namespace platform
}  // namespace paddle