place.cc 2.2 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
L
liaogang 已提交
2

L
Luo Tao 已提交
3 4 5
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
L
liaogang 已提交
6

L
Luo Tao 已提交
7
    http://www.apache.org/licenses/LICENSE-2.0
L
liaogang 已提交
8

L
Luo Tao 已提交
9 10 11 12 13
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
L
liaogang 已提交
14

Y
Yi Wang 已提交
15
#include "paddle/fluid/platform/place.h"
Y
Yi Wang 已提交
16

D
dzhwinter 已提交
17 18 19 20 21 22
DEFINE_bool(benchmark, false,
            "Doing memory benchmark. It will make deleting scope synchronized, "
            "and add some memory usage logs."
            "Default cuda is asynchronous device, set to True will"
            "force op run in synchronous mode.");

Y
Yi Wang 已提交
23 24
namespace paddle {
namespace platform {
Y
Yi Wang 已提交
25 26 27

namespace detail {

L
liaogang 已提交
28
class PlacePrinter : public boost::static_visitor<> {
Y
Yi Wang 已提交
29
 public:
L
liaogang 已提交
30
  explicit PlacePrinter(std::ostream &os) : os_(os) {}
31
  void operator()(const CPUPlace &) { os_ << "CPUPlace"; }
D
dzhwinter 已提交
32 33 34
  void operator()(const CUDAPlace &p) {
    os_ << "CUDAPlace(" << p.device << ")";
  }
C
chengduoZH 已提交
35
  void operator()(const CUDAPinnedPlace &p) { os_ << "CUDAPinnedPlace"; }
Y
Yi Wang 已提交
36

Y
Yi Wang 已提交
37
 private:
Y
Yi Wang 已提交
38
  std::ostream &os_;
Y
Yi Wang 已提交
39 40
};

Y
Yi Wang 已提交
41
}  // namespace detail
Y
Yi Wang 已提交
42

Y
Yi Wang 已提交
43
bool is_gpu_place(const Place &p) {
D
dzhwinter 已提交
44
  return boost::apply_visitor(IsCUDAPlace(), p);
Y
Yi Wang 已提交
45 46
}

C
chengduoZH 已提交
47 48 49 50 51 52 53
bool is_cpu_place(const Place &p) {
  return boost::apply_visitor(IsCPUPlace(), p);
}

bool is_cuda_pinned_place(const Place &p) {
  return boost::apply_visitor(IsCUDAPinnedPlace(), p);
}
T
tensor-tang 已提交
54

Y
Yi Wang 已提交
55
bool places_are_same_class(const Place &p1, const Place &p2) {
Y
Yu Yang 已提交
56
  return p1.which() == p2.which();
Y
Yi Wang 已提交
57 58
}

59 60
bool is_same_place(const Place &p1, const Place &p2) {
  if (places_are_same_class(p1, p2)) {
C
chengduoZH 已提交
61
    if (is_cpu_place(p1) || is_cuda_pinned_place(p1)) {
62 63 64 65 66 67 68 69 70
      return true;
    } else {
      return boost::get<CUDAPlace>(p1) == boost::get<CUDAPlace>(p2);
    }
  } else {
    return false;
  }
}

Y
Yi Wang 已提交
71 72
std::ostream &operator<<(std::ostream &os, const Place &p) {
  detail::PlacePrinter printer(os);
L
liaogang 已提交
73 74
  boost::apply_visitor(printer, p);
  return os;
Y
Yi Wang 已提交
75 76
}

Y
Yi Wang 已提交
77 78
}  // namespace platform
}  // namespace paddle