place.cc 2.5 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
L
liaogang 已提交
2

L
Luo Tao 已提交
3 4 5
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
L
liaogang 已提交
6

L
Luo Tao 已提交
7
    http://www.apache.org/licenses/LICENSE-2.0
L
liaogang 已提交
8

L
Luo Tao 已提交
9 10 11 12 13
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
L
liaogang 已提交
14

Y
Yi Wang 已提交
15
#include "paddle/fluid/platform/place.h"
Y
Yi Wang 已提交
16

D
dzhwinter 已提交
17 18 19 20 21 22
DEFINE_bool(benchmark, false,
            "Doing memory benchmark. It will make deleting scope synchronized, "
            "and add some memory usage logs."
            "Default cuda is asynchronous device, set to True will"
            "force op run in synchronous mode.");

Y
Yi Wang 已提交
23 24
namespace paddle {
namespace platform {
Y
Yi Wang 已提交
25 26 27

namespace detail {

L
liaogang 已提交
28
class PlacePrinter : public boost::static_visitor<> {
Y
Yi Wang 已提交
29
 public:
L
liaogang 已提交
30
  explicit PlacePrinter(std::ostream &os) : os_(os) {}
31
  void operator()(const CPUPlace &) { os_ << "CPUPlace"; }
D
dzhwinter 已提交
32 33 34
  void operator()(const CUDAPlace &p) {
    os_ << "CUDAPlace(" << p.device << ")";
  }
35
  void operator()(const XPUPlace &p) { os_ << "XPUPlace(" << p.device << ")"; }
C
chengduoZH 已提交
36
  void operator()(const CUDAPinnedPlace &p) { os_ << "CUDAPinnedPlace"; }
Y
Yi Wang 已提交
37

Y
Yi Wang 已提交
38
 private:
Y
Yi Wang 已提交
39
  std::ostream &os_;
Y
Yi Wang 已提交
40 41
};

Y
Yi Wang 已提交
42
}  // namespace detail
Y
Yi Wang 已提交
43

Y
Yi Wang 已提交
44
bool is_gpu_place(const Place &p) {
D
dzhwinter 已提交
45
  return boost::apply_visitor(IsCUDAPlace(), p);
Y
Yi Wang 已提交
46 47
}

48 49 50 51
bool is_xpu_place(const Place &p) {
  return boost::apply_visitor(IsXPUPlace(), p);
}

C
chengduoZH 已提交
52 53 54 55 56 57 58
bool is_cpu_place(const Place &p) {
  return boost::apply_visitor(IsCPUPlace(), p);
}

bool is_cuda_pinned_place(const Place &p) {
  return boost::apply_visitor(IsCUDAPinnedPlace(), p);
}
T
tensor-tang 已提交
59

Y
Yi Wang 已提交
60
bool places_are_same_class(const Place &p1, const Place &p2) {
Y
Yu Yang 已提交
61
  return p1.which() == p2.which();
Y
Yi Wang 已提交
62 63
}

64 65
bool is_same_place(const Place &p1, const Place &p2) {
  if (places_are_same_class(p1, p2)) {
C
chengduoZH 已提交
66
    if (is_cpu_place(p1) || is_cuda_pinned_place(p1)) {
67
      return true;
68 69
    } else if (is_xpu_place(p1)) {
      return BOOST_GET_CONST(XPUPlace, p1) == BOOST_GET_CONST(XPUPlace, p2);
70
    } else {
71
      return BOOST_GET_CONST(CUDAPlace, p1) == BOOST_GET_CONST(CUDAPlace, p2);
72 73 74 75 76 77
    }
  } else {
    return false;
  }
}

Y
Yi Wang 已提交
78 79
std::ostream &operator<<(std::ostream &os, const Place &p) {
  detail::PlacePrinter printer(os);
L
liaogang 已提交
80 81
  boost::apply_visitor(printer, p);
  return os;
Y
Yi Wang 已提交
82 83
}

Y
Yi Wang 已提交
84 85
}  // namespace platform
}  // namespace paddle