init.cc 6.5 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2

L
Luo Tao 已提交
3 4 5
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
D
dzhwinter 已提交
6

L
Luo Tao 已提交
7
    http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8

L
Luo Tao 已提交
9 10 11 12 13
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
Y
Yang Yu 已提交
14
#include <string.h>  // for strdup
D
dzhwinter 已提交
15
#include <algorithm>
Q
qingqing01 已提交
16 17
#include <memory>
#include <set>
D
dzhwinter 已提交
18
#include <stdexcept>
D
dzhwinter 已提交
19 20
#include <string>

Y
Yi Wang 已提交
21
#include "paddle/fluid/framework/operator.h"
T
tensor-tang 已提交
22
#include "paddle/fluid/platform/cpu_helper.h"
T
tensor-tang 已提交
23
#include "paddle/fluid/platform/cpu_info.h"
24
#include "paddle/fluid/string/split.h"
S
sneaxiy 已提交
25
#ifdef PADDLE_WITH_CUDA
Y
Yu Yang 已提交
26
#include "paddle/fluid/platform/cuda_device_guard.h"
27
#include "paddle/fluid/platform/dynload/cupti.h"
S
sneaxiy 已提交
28
#endif
Y
Yi Wang 已提交
29
#include "paddle/fluid/platform/device_context.h"
30
#include "paddle/fluid/platform/init.h"
Y
Yi Wang 已提交
31
#include "paddle/fluid/platform/place.h"
32
#include "paddle/fluid/string/piece.h"
D
dzhwinter 已提交
33

T
tensor-tang 已提交
34 35
DEFINE_int32(paddle_num_threads, 1,
             "Number of threads for each paddle instance.");
36 37 38
DEFINE_int32(multiple_of_cupti_buffer_size, 1,
             "Multiple of the CUPTI device buffer size. If the timestamps have "
             "been dropped when you are profiling, try increasing this value.");
T
tensor-tang 已提交
39

D
dzhwinter 已提交
40 41 42 43
namespace paddle {
namespace framework {

std::once_flag gflags_init_flag;
X
Xin Pan 已提交
44
std::once_flag p2p_init_flag;
D
dzhwinter 已提交
45

46
void InitGflags(std::vector<std::string> argv) {
D
dzhwinter 已提交
47
  std::call_once(gflags_init_flag, [&]() {
C
chengduo 已提交
48
    FLAGS_logtostderr = true;
W
wanghaoshuang 已提交
49
    argv.insert(argv.begin(), "dummy");
D
dzhwinter 已提交
50 51 52 53 54 55 56 57 58
    int argc = argv.size();
    char **arr = new char *[argv.size()];
    std::string line;
    for (size_t i = 0; i < argv.size(); i++) {
      arr[i] = &argv[i][0];
      line += argv[i];
      line += ' ';
    }
    google::ParseCommandLineFlags(&argc, &arr, true);
M
minqiyang 已提交
59
    VLOG(1) << "Init commandline: " << line;
D
dzhwinter 已提交
60 61 62
  });
}

63 64 65 66 67 68 69 70 71 72 73 74 75 76 77
void InitP2P(std::vector<int> devices) {
#ifdef PADDLE_WITH_CUDA
  std::call_once(p2p_init_flag, [&]() {
    int count = devices.size();
    for (int i = 0; i < count; ++i) {
      for (int j = 0; j < count; ++j) {
        if (devices[i] == devices[j]) continue;
        int can_acess = -1;
        PADDLE_ENFORCE(
            cudaDeviceCanAccessPeer(&can_acess, devices[i], devices[j]),
            "Failed to test P2P access.");
        if (can_acess != 1) {
          LOG(WARNING) << "Cannot enable P2P access from " << devices[i]
                       << " to " << devices[j];
        } else {
Y
Yu Yang 已提交
78
          platform::CUDADeviceGuard guard(devices[i]);
79 80 81 82 83 84 85 86
          cudaDeviceEnablePeerAccess(devices[j], 0);
        }
      }
    }
  });
#endif
}

87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108
void InitCupti() {
#ifdef PADDLE_WITH_CUPTI
  if (FLAGS_multiple_of_cupti_buffer_size == 1) return;
  size_t attrValue = 0, attrValueSize = sizeof(size_t);
#define MULTIPLY_ATTR_VALUE(attr)                                 \
  {                                                               \
    PADDLE_ENFORCE(!platform::dynload::cuptiActivityGetAttribute( \
        attr, &attrValueSize, &attrValue));                       \
    attrValue *= FLAGS_multiple_of_cupti_buffer_size;             \
    LOG(WARNING) << "Set " #attr " " << attrValue << " byte";     \
    PADDLE_ENFORCE(!platform::dynload::cuptiActivitySetAttribute( \
        attr, &attrValueSize, &attrValue));                       \
  }
  MULTIPLY_ATTR_VALUE(CUPTI_ACTIVITY_ATTR_DEVICE_BUFFER_SIZE);
  MULTIPLY_ATTR_VALUE(CUPTI_ACTIVITY_ATTR_DEVICE_BUFFER_SIZE_CDP);
#if CUDA_VERSION >= 9000
  MULTIPLY_ATTR_VALUE(CUPTI_ACTIVITY_ATTR_PROFILING_SEMAPHORE_POOL_SIZE);
#endif
#undef MULTIPLY_ATTR_VALUE
#endif
}

X
Xin Pan 已提交
109
void InitDevices(bool init_p2p) {
110 111 112
  // CUPTI attribute should be set before any CUDA context is created (see CUPTI
  // documentation about CUpti_ActivityAttribute).
  InitCupti();
113
  /*Init all available devices by default */
114
  std::vector<int> devices;
115
#ifdef PADDLE_WITH_CUDA
D
dzhwinter 已提交
116
  try {
117 118
    // use user specified GPUs in single-node multi-process mode.
    devices = platform::GetSelectedDevices();
D
dzhwinter 已提交
119 120
  } catch (const std::exception &exp) {
    LOG(WARNING) << "Compiled with WITH_GPU, but no GPU found in runtime.";
121
  }
D
dzhwinter 已提交
122
#endif
123
  InitDevices(init_p2p, devices);
D
dzhwinter 已提交
124 125
}

126 127 128 129
void InitDevices(bool init_p2p, const std::vector<int> devices) {
  std::vector<platform::Place> places;

  for (size_t i = 0; i < devices.size(); ++i) {
130 131 132
    // In multi process multi gpu mode, we may have gpuid = 7
    // but count = 1.
    if (devices[i] < 0) {
133 134 135
      LOG(WARNING) << "Invalid devices id.";
      continue;
    }
136

137 138 139 140 141 142 143
    places.emplace_back(platform::CUDAPlace(devices[i]));
  }
  if (init_p2p) {
    InitP2P(devices);
  }
  places.emplace_back(platform::CPUPlace());
  platform::DeviceContextPool::Init(places);
144
  platform::DeviceTemporaryAllocator::Init();
Q
qingqing01 已提交
145

146
#ifndef PADDLE_WITH_MKLDNN
T
tensor-tang 已提交
147
  platform::SetNumThreads(FLAGS_paddle_num_threads);
148
#endif
T
tensor-tang 已提交
149

T
tensor-tang 已提交
150
#if !defined(_WIN32) && !defined(__APPLE__) && !defined(__OSX__)
T
tensor-tang 已提交
151
  if (platform::MayIUse(platform::avx)) {
T
tensor-tang 已提交
152 153 154 155
#ifndef __AVX__
    LOG(WARNING) << "AVX is available, Please re-compile on local machine";
#endif
  }
156 157 158 159 160 161 162 163 164 165

// Throw some informations when CPU instructions mismatch.
#define AVX_GUIDE(compiletime, runtime)                                     \
  LOG(FATAL)                                                                \
      << "This version is compiled on higher instruction(" #compiletime     \
         ") system, you may encounter illegal instruction error running on" \
         " your local CPU machine. Please reinstall the " #runtime          \
         " version or compile from source code."

#ifdef __AVX512F__
T
tensor-tang 已提交
166 167
  if (!platform::MayIUse(platform::avx512f)) {
    if (platform::MayIUse(platform::avx2)) {
168
      AVX_GUIDE(AVX512, AVX2);
T
tensor-tang 已提交
169
    } else if (platform::MayIUse(platform::avx)) {
170 171 172 173 174
      AVX_GUIDE(AVX512, AVX);
    } else {
      AVX_GUIDE(AVX512, NonAVX);
    }
  }
T
tensor-tang 已提交
175
#endif
176 177

#ifdef __AVX2__
T
tensor-tang 已提交
178 179
  if (!platform::MayIUse(platform::avx2)) {
    if (platform::MayIUse(platform::avx)) {
180 181 182 183
      AVX_GUIDE(AVX2, AVX);
    } else {
      AVX_GUIDE(AVX2, NonAVX);
    }
T
tensor-tang 已提交
184 185
  }
#endif
186 187

#ifdef __AVX__
T
tensor-tang 已提交
188
  if (!platform::MayIUse(platform::avx)) {
189
    AVX_GUIDE(AVX, NonAVX);
T
tensor-tang 已提交
190
  }
191 192
#endif
#undef AVX_GUIDE
T
tensor-tang 已提交
193 194

#endif
195 196
}

Y
Yang Yu 已提交
197
void InitGLOG(const std::string &prog_name) {
Y
Yang Yu 已提交
198 199 200
  // glog will not hold the ARGV[0] inside.
  // Use strdup to alloc a new string.
  google::InitGoogleLogging(strdup(prog_name.c_str()));
P
peizhilin 已提交
201
#ifndef _WIN32
Y
Yang Yu 已提交
202
  google::InstallFailureSignalHandler();
P
peizhilin 已提交
203
#endif
Y
Yang Yu 已提交
204 205
}

D
dzhwinter 已提交
206 207
}  // namespace framework
}  // namespace paddle