init.cc 5.2 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2

L
Luo Tao 已提交
3 4 5
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
D
dzhwinter 已提交
6

L
Luo Tao 已提交
7
    http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8

L
Luo Tao 已提交
9 10 11 12 13
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
Y
Yang Yu 已提交
14
#include <string.h>  // for strdup
D
dzhwinter 已提交
15
#include <algorithm>
D
dzhwinter 已提交
16
#include <stdexcept>
D
dzhwinter 已提交
17 18
#include <string>

Y
Yi Wang 已提交
19
#include "paddle/fluid/framework/operator.h"
T
tensor-tang 已提交
20
#include "paddle/fluid/platform/cpu_helper.h"
T
tensor-tang 已提交
21
#include "paddle/fluid/platform/cpu_info.h"
S
sneaxiy 已提交
22
#ifdef PADDLE_WITH_CUDA
Y
Yu Yang 已提交
23
#include "paddle/fluid/platform/cuda_device_guard.h"
S
sneaxiy 已提交
24
#endif
Y
Yi Wang 已提交
25
#include "paddle/fluid/platform/device_context.h"
26
#include "paddle/fluid/platform/init.h"
Y
Yi Wang 已提交
27
#include "paddle/fluid/platform/place.h"
28
#include "paddle/fluid/string/piece.h"
D
dzhwinter 已提交
29

T
tensor-tang 已提交
30 31
DEFINE_int32(paddle_num_threads, 1,
             "Number of threads for each paddle instance.");
T
tensor-tang 已提交
32

D
dzhwinter 已提交
33 34 35 36
namespace paddle {
namespace framework {

std::once_flag gflags_init_flag;
X
Xin Pan 已提交
37
std::once_flag p2p_init_flag;
D
dzhwinter 已提交
38

39
void InitGflags(std::vector<std::string> argv) {
D
dzhwinter 已提交
40
  std::call_once(gflags_init_flag, [&]() {
W
wanghaoshuang 已提交
41
    argv.insert(argv.begin(), "dummy");
D
dzhwinter 已提交
42 43 44 45 46 47 48 49 50
    int argc = argv.size();
    char **arr = new char *[argv.size()];
    std::string line;
    for (size_t i = 0; i < argv.size(); i++) {
      arr[i] = &argv[i][0];
      line += argv[i];
      line += ' ';
    }
    google::ParseCommandLineFlags(&argc, &arr, true);
51
    VLOG(10) << "Init commandline: " << line;
D
dzhwinter 已提交
52 53 54
  });
}

55 56 57 58 59 60 61 62 63 64 65 66 67 68 69
void InitP2P(std::vector<int> devices) {
#ifdef PADDLE_WITH_CUDA
  std::call_once(p2p_init_flag, [&]() {
    int count = devices.size();
    for (int i = 0; i < count; ++i) {
      for (int j = 0; j < count; ++j) {
        if (devices[i] == devices[j]) continue;
        int can_acess = -1;
        PADDLE_ENFORCE(
            cudaDeviceCanAccessPeer(&can_acess, devices[i], devices[j]),
            "Failed to test P2P access.");
        if (can_acess != 1) {
          LOG(WARNING) << "Cannot enable P2P access from " << devices[i]
                       << " to " << devices[j];
        } else {
Y
Yu Yang 已提交
70
          platform::CUDADeviceGuard guard(devices[i]);
71 72 73 74 75 76 77 78
          cudaDeviceEnablePeerAccess(devices[j], 0);
        }
      }
    }
  });
#endif
}

X
Xin Pan 已提交
79
void InitDevices(bool init_p2p) {
80
  /*Init all available devices by default */
81
  std::vector<int> devices;
82
#ifdef PADDLE_WITH_CUDA
D
dzhwinter 已提交
83
  try {
84 85 86 87
    int count = platform::GetCUDADeviceCount();
    for (int i = 0; i < count; ++i) {
      devices.push_back(i);
    }
D
dzhwinter 已提交
88 89
  } catch (const std::exception &exp) {
    LOG(WARNING) << "Compiled with WITH_GPU, but no GPU found in runtime.";
90
  }
D
dzhwinter 已提交
91
#endif
92
  InitDevices(init_p2p, devices);
D
dzhwinter 已提交
93 94
}

95 96 97 98 99 100 101 102 103 104 105 106
void InitDevices(bool init_p2p, const std::vector<int> devices) {
  std::vector<platform::Place> places;
  int count = 0;
#ifdef PADDLE_WITH_CUDA
  try {
    count = platform::GetCUDADeviceCount();
  } catch (const std::exception &exp) {
    LOG(WARNING) << "Compiled with WITH_GPU, but no GPU found in runtime.";
  }
#endif

  for (size_t i = 0; i < devices.size(); ++i) {
W
wanghaoshuang 已提交
107
    if (devices[i] >= count || devices[i] < 0) {
108 109 110 111 112 113 114 115 116 117
      LOG(WARNING) << "Invalid devices id.";
      continue;
    }
    places.emplace_back(platform::CUDAPlace(devices[i]));
  }
  if (init_p2p) {
    InitP2P(devices);
  }
  places.emplace_back(platform::CPUPlace());
  platform::DeviceContextPool::Init(places);
118
#ifndef PADDLE_WITH_MKLDNN
T
tensor-tang 已提交
119
  platform::SetNumThreads(FLAGS_paddle_num_threads);
120
#endif
T
tensor-tang 已提交
121

T
tensor-tang 已提交
122
#if !defined(_WIN32) && !defined(__APPLE__) && !defined(__OSX__)
T
tensor-tang 已提交
123 124 125 126 127
  if (platform::jit::MayIUse(platform::jit::avx)) {
#ifndef __AVX__
    LOG(WARNING) << "AVX is available, Please re-compile on local machine";
#endif
  }
128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146

// Throw some informations when CPU instructions mismatch.
#define AVX_GUIDE(compiletime, runtime)                                     \
  LOG(FATAL)                                                                \
      << "This version is compiled on higher instruction(" #compiletime     \
         ") system, you may encounter illegal instruction error running on" \
         " your local CPU machine. Please reinstall the " #runtime          \
         " version or compile from source code."

#ifdef __AVX512F__
  if (!platform::jit::MayIUse(platform::jit::avx512f)) {
    if (platform::jit::MayIUse(platform::jit::avx2)) {
      AVX_GUIDE(AVX512, AVX2);
    } else if (platform::jit::MayIUse(platform::jit::avx)) {
      AVX_GUIDE(AVX512, AVX);
    } else {
      AVX_GUIDE(AVX512, NonAVX);
    }
  }
T
tensor-tang 已提交
147
#endif
148 149 150 151 152 153 154 155

#ifdef __AVX2__
  if (!platform::jit::MayIUse(platform::jit::avx2)) {
    if (platform::jit::MayIUse(platform::jit::avx)) {
      AVX_GUIDE(AVX2, AVX);
    } else {
      AVX_GUIDE(AVX2, NonAVX);
    }
T
tensor-tang 已提交
156 157
  }
#endif
158 159 160 161

#ifdef __AVX__
  if (!platform::jit::MayIUse(platform::jit::avx)) {
    AVX_GUIDE(AVX, NonAVX);
T
tensor-tang 已提交
162
  }
163 164
#endif
#undef AVX_GUIDE
T
tensor-tang 已提交
165 166

#endif
167 168
}

Y
Yang Yu 已提交
169
void InitGLOG(const std::string &prog_name) {
Y
Yang Yu 已提交
170 171 172
  // glog will not hold the ARGV[0] inside.
  // Use strdup to alloc a new string.
  google::InitGoogleLogging(strdup(prog_name.c_str()));
Y
Yang Yu 已提交
173
  google::InstallFailureSignalHandler();
Y
Yang Yu 已提交
174 175
}

D
dzhwinter 已提交
176 177
}  // namespace framework
}  // namespace paddle