place.cc 2.7 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
L
liaogang 已提交
2

L
Luo Tao 已提交
3 4 5
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
L
liaogang 已提交
6

L
Luo Tao 已提交
7
    http://www.apache.org/licenses/LICENSE-2.0
L
liaogang 已提交
8

L
Luo Tao 已提交
9 10 11 12 13
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
L
liaogang 已提交
14

Y
Yi Wang 已提交
15
#include "paddle/fluid/platform/place.h"
Y
Yi Wang 已提交
16

D
dzhwinter 已提交
17 18 19 20 21 22
DEFINE_bool(benchmark, false,
            "Doing memory benchmark. It will make deleting scope synchronized, "
            "and add some memory usage logs."
            "Default cuda is asynchronous device, set to True will"
            "force op run in synchronous mode.");

Y
Yi Wang 已提交
23 24
namespace paddle {
namespace platform {
Y
Yi Wang 已提交
25 26 27

namespace detail {

L
liaogang 已提交
28
class PlacePrinter : public boost::static_visitor<> {
Y
Yi Wang 已提交
29
 public:
L
liaogang 已提交
30
  explicit PlacePrinter(std::ostream &os) : os_(os) {}
31
  void operator()(const CPUPlace &) { os_ << "CPUPlace"; }
D
dzhwinter 已提交
32 33 34
  void operator()(const CUDAPlace &p) {
    os_ << "CUDAPlace(" << p.device << ")";
  }
35
  void operator()(const XPUPlace &p) { os_ << "XPUPlace(" << p.device << ")"; }
36
  void operator()(const NPUPlace &p) { os_ << "NPUPlace(" << p.device << ")"; }
C
chengduoZH 已提交
37
  void operator()(const CUDAPinnedPlace &p) { os_ << "CUDAPinnedPlace"; }
Y
Yi Wang 已提交
38

Y
Yi Wang 已提交
39
 private:
Y
Yi Wang 已提交
40
  std::ostream &os_;
Y
Yi Wang 已提交
41 42
};

Y
Yi Wang 已提交
43
}  // namespace detail
Y
Yi Wang 已提交
44

Y
Yi Wang 已提交
45
bool is_gpu_place(const Place &p) {
D
dzhwinter 已提交
46
  return boost::apply_visitor(IsCUDAPlace(), p);
Y
Yi Wang 已提交
47 48
}

49 50 51 52
bool is_xpu_place(const Place &p) {
  return boost::apply_visitor(IsXPUPlace(), p);
}

53 54 55 56
bool is_npu_place(const Place &p) {
  return boost::apply_visitor(IsNPUPlace(), p);
}

C
chengduoZH 已提交
57 58 59 60 61 62 63
bool is_cpu_place(const Place &p) {
  return boost::apply_visitor(IsCPUPlace(), p);
}

bool is_cuda_pinned_place(const Place &p) {
  return boost::apply_visitor(IsCUDAPinnedPlace(), p);
}
T
tensor-tang 已提交
64

Y
Yi Wang 已提交
65
bool places_are_same_class(const Place &p1, const Place &p2) {
Y
Yu Yang 已提交
66
  return p1.which() == p2.which();
Y
Yi Wang 已提交
67 68
}

69 70
bool is_same_place(const Place &p1, const Place &p2) {
  if (places_are_same_class(p1, p2)) {
C
chengduoZH 已提交
71
    if (is_cpu_place(p1) || is_cuda_pinned_place(p1)) {
72
      return true;
73 74
    } else if (is_xpu_place(p1)) {
      return BOOST_GET_CONST(XPUPlace, p1) == BOOST_GET_CONST(XPUPlace, p2);
75 76
    } else if (is_npu_place(p1)) {
      return BOOST_GET_CONST(NPUPlace, p1) == BOOST_GET_CONST(NPUPlace, p2);
77
    } else {
78
      return BOOST_GET_CONST(CUDAPlace, p1) == BOOST_GET_CONST(CUDAPlace, p2);
79 80 81 82 83 84
    }
  } else {
    return false;
  }
}

Y
Yi Wang 已提交
85 86
std::ostream &operator<<(std::ostream &os, const Place &p) {
  detail::PlacePrinter printer(os);
L
liaogang 已提交
87 88
  boost::apply_visitor(printer, p);
  return os;
Y
Yi Wang 已提交
89 90
}

Y
Yi Wang 已提交
91 92
}  // namespace platform
}  // namespace paddle