init.cc 12.9 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2

L
Luo Tao 已提交
3 4 5
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
D
dzhwinter 已提交
6

L
Luo Tao 已提交
7
    http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8

L
Luo Tao 已提交
9 10 11 12 13
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
C
chengduo 已提交
14
#include <fstream>
D
dzhwinter 已提交
15 16
#include <string>

T
tensor-tang 已提交
17
#include "paddle/fluid/platform/cpu_helper.h"
T
tensor-tang 已提交
18
#include "paddle/fluid/platform/cpu_info.h"
19 20
#include "paddle/fluid/platform/npu_info.h"
#include "paddle/fluid/string/split.h"
21
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
Y
Yu Yang 已提交
22
#include "paddle/fluid/platform/cuda_device_guard.h"
23 24
#endif
#ifdef PADDLE_WITH_CUDA
25
#include "paddle/fluid/platform/dynload/cupti.h"
S
sneaxiy 已提交
26
#endif
Y
Yi Wang 已提交
27
#include "paddle/fluid/platform/device_context.h"
28
#include "paddle/fluid/platform/init.h"
Y
Yi Wang 已提交
29
#include "paddle/fluid/platform/place.h"
30

31
#ifdef PADDLE_WITH_XPU
Q
QingshuChen 已提交
32 33
#include "paddle/fluid/platform/xpu/xpu_header.h"
#include "paddle/fluid/platform/xpu/xpu_info.h"
34 35
#endif

36 37 38 39
#ifdef WITH_WIN_DUMP_DBG
#include <stdio.h>
#include <time.h>
#include <windows.h>
40

41 42 43
#include "DbgHelp.h"
#endif

44
DECLARE_int32(paddle_num_threads);
45 46 47
DEFINE_int32(multiple_of_cupti_buffer_size, 1,
             "Multiple of the CUPTI device buffer size. If the timestamps have "
             "been dropped when you are profiling, try increasing this value.");
T
tensor-tang 已提交
48

49 50 51 52
namespace paddle {
namespace platform {

void ParseCommandLineFlags(int argc, char **argv, bool remove) {
53
  ::GFLAGS_NAMESPACE::ParseCommandLineFlags(&argc, &argv, remove);
54 55 56 57 58
}

}  // namespace platform
}  // namespace paddle

D
dzhwinter 已提交
59 60 61
namespace paddle {
namespace framework {

62 63 64 65
#ifdef _WIN32
#define strdup _strdup
#endif

D
dzhwinter 已提交
66
std::once_flag gflags_init_flag;
67
std::once_flag glog_init_flag;
68
std::once_flag npu_init_flag;
D
dzhwinter 已提交
69

70
bool InitGflags(std::vector<std::string> args) {
71
  bool successed = false;
D
dzhwinter 已提交
72
  std::call_once(gflags_init_flag, [&]() {
C
chengduo 已提交
73
    FLAGS_logtostderr = true;
L
Leo Chen 已提交
74 75 76 77 78
    // NOTE(zhiqiu): dummy is needed, since the function
    // ParseNewCommandLineFlags in gflags.cc starts processing
    // commandline strings from idx 1.
    // The reason is, it assumes that the first one (idx 0) is
    // the filename of executable file.
79 80
    args.insert(args.begin(), "dummy");
    std::vector<char *> argv;
D
dzhwinter 已提交
81
    std::string line;
82 83 84 85
    int argc = args.size();
    for (auto &arg : args) {
      argv.push_back(const_cast<char *>(arg.data()));
      line += arg;
D
dzhwinter 已提交
86 87
      line += ' ';
    }
L
Leo Chen 已提交
88 89
    VLOG(1) << "Before Parse: argc is " << argc
            << ", Init commandline: " << line;
90 91

    char **arr = argv.data();
92
    ::GFLAGS_NAMESPACE::ParseCommandLineFlags(&argc, &arr, true);
93
    successed = true;
94 95

    VLOG(1) << "After Parse: argc is " << argc;
D
dzhwinter 已提交
96
  });
97
  return successed;
D
dzhwinter 已提交
98 99
}

100
#ifdef PADDLE_WITH_CUDA
101 102 103 104
void InitCupti() {
#ifdef PADDLE_WITH_CUPTI
  if (FLAGS_multiple_of_cupti_buffer_size == 1) return;
  size_t attrValue = 0, attrValueSize = sizeof(size_t);
G
GaoWei8 已提交
105 106 107 108 109 110 111 112 113 114 115 116
#define MULTIPLY_ATTR_VALUE(attr)                                            \
  {                                                                          \
    PADDLE_ENFORCE_EQ(                                                       \
        !platform::dynload::cuptiActivityGetAttribute(attr, &attrValueSize,  \
                                                      &attrValue),           \
        true, platform::errors::Unavailable("Get cupti attribute failed.")); \
    attrValue *= FLAGS_multiple_of_cupti_buffer_size;                        \
    LOG(WARNING) << "Set " #attr " " << attrValue << " byte";                \
    PADDLE_ENFORCE_EQ(                                                       \
        !platform::dynload::cuptiActivitySetAttribute(attr, &attrValueSize,  \
                                                      &attrValue),           \
        true, platform::errors::Unavailable("Set cupti attribute failed.")); \
117 118 119 120 121 122 123 124 125
  }
  MULTIPLY_ATTR_VALUE(CUPTI_ACTIVITY_ATTR_DEVICE_BUFFER_SIZE);
  MULTIPLY_ATTR_VALUE(CUPTI_ACTIVITY_ATTR_DEVICE_BUFFER_SIZE_CDP);
#if CUDA_VERSION >= 9000
  MULTIPLY_ATTR_VALUE(CUPTI_ACTIVITY_ATTR_PROFILING_SEMAPHORE_POOL_SIZE);
#endif
#undef MULTIPLY_ATTR_VALUE
#endif
}
126
#endif
127

128
void InitDevices() {
129 130 131
// CUPTI attribute should be set before any CUDA context is created (see CUPTI
// documentation about CUpti_ActivityAttribute).
#ifdef PADDLE_WITH_CUDA
132
  InitCupti();
133
#endif
134
  /*Init all available devices by default */
135
  std::vector<int> devices;
136
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
D
dzhwinter 已提交
137
  try {
138 139
    // use user specified GPUs in single-node multi-process mode.
    devices = platform::GetSelectedDevices();
D
dzhwinter 已提交
140 141
  } catch (const std::exception &exp) {
    LOG(WARNING) << "Compiled with WITH_GPU, but no GPU found in runtime.";
142
  }
143 144 145 146 147 148 149 150
#endif
#ifdef PADDLE_WITH_XPU
  try {
    // use user specified XPUs in single-node multi-process mode.
    devices = platform::GetXPUSelectedDevices();
  } catch (const std::exception &exp) {
    LOG(WARNING) << "Compiled with WITH_XPU, but no XPU found in runtime.";
  }
151 152 153 154 155 156 157 158 159 160 161
#endif
#ifdef PADDLE_WITH_ASCEND_CL
  // NOTE(zhiqiu): use singleton to explicitly init and finalize ACL
  platform::AclInstance::Instance();  // NOLINT
  try {
    // use user specified XPUs in single-node multi-process mode.
    devices = platform::GetSelectedNPUDevices();
  } catch (const std::exception &exp) {
    LOG(WARNING)
        << "Compiled with PADDLE_WITH_ASCEND_CL, but no NPU found in runtime.";
  }
D
dzhwinter 已提交
162
#endif
163
  InitDevices(devices);
D
dzhwinter 已提交
164 165
}

166
void InitDevices(const std::vector<int> devices) {
167 168 169
  std::vector<platform::Place> places;

  for (size_t i = 0; i < devices.size(); ++i) {
170 171 172
    // In multi process multi gpu mode, we may have gpuid = 7
    // but count = 1.
    if (devices[i] < 0) {
173 174 175
      LOG(WARNING) << "Invalid devices id.";
      continue;
    }
176

177
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
178
    places.emplace_back(platform::CUDAPlace(devices[i]));
179 180 181
#endif
#ifdef PADDLE_WITH_XPU
    places.emplace_back(platform::XPUPlace(devices[i]));
182 183 184
#endif
#ifdef PADDLE_WITH_ASCEND_CL
    places.emplace_back(platform::NPUPlace(devices[i]));
185
#endif
186 187
  }
  places.emplace_back(platform::CPUPlace());
188
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
189 190
  places.emplace_back(platform::CUDAPinnedPlace());
#endif
191
  platform::DeviceContextPool::Init(places);
Q
qingqing01 已提交
192

193
#ifndef PADDLE_WITH_MKLDNN
T
tensor-tang 已提交
194
  platform::SetNumThreads(FLAGS_paddle_num_threads);
195
#endif
T
tensor-tang 已提交
196

T
tensor-tang 已提交
197
#if !defined(_WIN32) && !defined(__APPLE__) && !defined(__OSX__)
T
tensor-tang 已提交
198
  if (platform::MayIUse(platform::avx)) {
T
tensor-tang 已提交
199 200 201 202
#ifndef __AVX__
    LOG(WARNING) << "AVX is available, Please re-compile on local machine";
#endif
  }
203 204

// Throw some informations when CPU instructions mismatch.
205 206 207 208 209 210
#define AVX_GUIDE(compiletime, runtime)                                  \
  PADDLE_THROW(platform::errors::Unavailable(                            \
      "This version is compiled on higher instruction(" #compiletime     \
      ") system, you may encounter illegal instruction error running on" \
      " your local CPU machine. Please reinstall the " #runtime          \
      " version or compile from source code."))
211 212

#ifdef __AVX512F__
T
tensor-tang 已提交
213 214
  if (!platform::MayIUse(platform::avx512f)) {
    if (platform::MayIUse(platform::avx2)) {
215
      AVX_GUIDE(AVX512, AVX2);
T
tensor-tang 已提交
216
    } else if (platform::MayIUse(platform::avx)) {
217 218 219 220 221
      AVX_GUIDE(AVX512, AVX);
    } else {
      AVX_GUIDE(AVX512, NonAVX);
    }
  }
T
tensor-tang 已提交
222
#endif
223 224

#ifdef __AVX2__
T
tensor-tang 已提交
225 226
  if (!platform::MayIUse(platform::avx2)) {
    if (platform::MayIUse(platform::avx)) {
227 228 229 230
      AVX_GUIDE(AVX2, AVX);
    } else {
      AVX_GUIDE(AVX2, NonAVX);
    }
T
tensor-tang 已提交
231 232
  }
#endif
233 234

#ifdef __AVX__
T
tensor-tang 已提交
235
  if (!platform::MayIUse(platform::avx)) {
236
    AVX_GUIDE(AVX, NonAVX);
T
tensor-tang 已提交
237
  }
238 239
#endif
#undef AVX_GUIDE
T
tensor-tang 已提交
240 241

#endif
242 243
}

C
chengduo 已提交
244
#ifndef _WIN32
245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275
// Description Quoted from
// https://pubs.opengroup.org/onlinepubs/9699919799/basedefs/signal.h.html
const struct {
  const char *name;
  const char *error_string;
} SignalErrorStrings[] = {
    {"SIGSEGV", "Segmentation fault"},
    {"SIGILL", "Illegal instruction"},
    {"SIGFPE", "Erroneous arithmetic operation"},
    {"SIGABRT", "Process abort signal"},
    {"SIGBUS", "Access to an undefined portion of a memory object"},
    {"SIGTERM", "Termination signal"},
};

bool StartsWith(const char *str, const char *prefix) {
  size_t len_prefix = strlen(prefix);
  size_t len_str = strlen(str);
  return len_str < len_prefix ? false : memcmp(prefix, str, len_prefix) == 0;
}

const char *ParseSignalErrorString(const std::string &str) {
  for (size_t i = 0;
       i < (sizeof(SignalErrorStrings) / sizeof(*(SignalErrorStrings))); ++i) {
    if (std::string::npos != str.find(SignalErrorStrings[i].name)) {
      return SignalErrorStrings[i].error_string;
    }
  }
  return "Unknown signal";
}

// Handle SIGSEGV, SIGILL, SIGFPE, SIGABRT, SIGBUS, and SIGTERM.
276
void SignalHandle(const char *data, int size) {
C
chengduo 已提交
277
  try {
278 279
    // NOTE1: The glog FailureSignalHandler dumped messages
    //   are deal with line by line
280
    auto signal_msg_dunmer_ptr = SignalMessageDumper::Instance().Get();
281 282 283
    // NOTE2: we only deal with the time info ane signal info,
    //   the stack trace will generated by paddle self
    if (StartsWith(data, "*** Aborted at")) {
284
      *signal_msg_dunmer_ptr << "\n  [TimeInfo: " << std::string(data, size - 1)
285
                             << "]\n";
286 287 288 289 290
    } else if (StartsWith(data, "***")) {
      std::string signal_info(data, size - 1);
      std::string useless_substr("; stack trace:");
      size_t start_pos = signal_info.rfind(useless_substr);
      signal_info.replace(start_pos, useless_substr.length(), "");
291
      *signal_msg_dunmer_ptr << "  [SignalInfo: " << signal_info << "]\n";
292 293 294

      // NOTE3: Final singal error message print.
      // Here does not throw an exception,
295
      // otherwise it will casue "terminate called recursively"
296
      std::ostringstream sout;
297 298 299 300 301 302 303 304 305 306 307
      sout << "\n\n--------------------------------------\n";
      sout << "C++ Traceback (most recent call last):";
      sout << "\n--------------------------------------\n";
      auto traceback = platform::GetCurrentTraceBackString(/*for_signal=*/true);
      if (traceback.empty()) {
        sout
            << "No stack trace in paddle, may be caused by external reasons.\n";
      } else {
        sout << traceback;
      }

308 309 310 311 312 313 314
      sout << "\n----------------------\nError Message "
              "Summary:\n----------------------\n";
      sout << platform::errors::Fatal(
                  "`%s` is detected by the operating system.",
                  ParseSignalErrorString(signal_info))
                  .to_string();
      std::cout << sout.str() << (*signal_msg_dunmer_ptr).str() << std::endl;
315
    }
C
chengduo 已提交
316
  } catch (...) {
317 318 319
    // Since the program has already triggered a system error,
    // no further processing is required here, glog FailureSignalHandler
    // will Kill program by the default signal handler
C
chengduo 已提交
320 321 322 323
  }
}
#endif

324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362
#ifdef WITH_WIN_DUMP_DBG
typedef BOOL(WINAPI *MINIDUMP_WRITE_DUMP)(
    IN HANDLE hProcess, IN DWORD ProcessId, IN HANDLE hFile,
    IN MINIDUMP_TYPE DumpType,
    IN CONST PMINIDUMP_EXCEPTION_INFORMATION ExceptionParam,
    OPTIONAL IN PMINIDUMP_USER_STREAM_INFORMATION UserStreamParam,
    OPTIONAL IN PMINIDUMP_CALLBACK_INFORMATION CallbackParam OPTIONAL);
void CreateDumpFile(LPCSTR lpstrDumpFilePathName,
                    EXCEPTION_POINTERS *pException) {
  HANDLE hDumpFile = CreateFile(lpstrDumpFilePathName, GENERIC_WRITE, 0, NULL,
                                CREATE_ALWAYS, FILE_ATTRIBUTE_NORMAL, NULL);
  MINIDUMP_EXCEPTION_INFORMATION dumpInfo;
  dumpInfo.ExceptionPointers = pException;
  dumpInfo.ThreadId = GetCurrentThreadId();
  dumpInfo.ClientPointers = TRUE;
  MINIDUMP_WRITE_DUMP MiniDumpWriteDump_;
  HMODULE hDbgHelp = LoadLibrary("DBGHELP.DLL");
  MiniDumpWriteDump_ =
      (MINIDUMP_WRITE_DUMP)GetProcAddress(hDbgHelp, "MiniDumpWriteDump");
  MiniDumpWriteDump_(GetCurrentProcess(), GetCurrentProcessId(), hDumpFile,
                     MiniDumpWithPrivateReadWriteMemory, &dumpInfo, NULL, NULL);
  CloseHandle(hDumpFile);
}

LONG ApplicationCrashHandler(EXCEPTION_POINTERS *pException) {
  time_t time_seconds = time(0);
  struct tm now_time;
  localtime_s(&now_time, &time_seconds);

  char buf[1024];
  sprintf_s(buf, "C:\\Paddle%04d%02d%02d-%02d%02d%02d.dmp",
            1900 + now_time.tm_year, 1 + now_time.tm_mon, now_time.tm_mday,
            now_time.tm_hour, now_time.tm_min, now_time.tm_sec);

  CreateDumpFile(buf, pException);
  return EXCEPTION_EXECUTE_HANDLER;
}
#endif

Y
Yang Yu 已提交
363
void InitGLOG(const std::string &prog_name) {
364
  std::call_once(glog_init_flag, [&]() {
365 366 367 368 369 370
// glog will not hold the ARGV[0] inside.
// Use strdup to alloc a new string.
#ifdef WITH_WIN_DUMP_DBG
    SetUnhandledExceptionFilter(
        (LPTOP_LEVEL_EXCEPTION_FILTER)ApplicationCrashHandler);
#endif
371
    google::InitGoogleLogging(strdup(prog_name.c_str()));
C
chengduo 已提交
372
#ifndef _WIN32
373 374
    google::InstallFailureSignalHandler();
    google::InstallFailureWriter(&SignalHandle);
C
chengduo 已提交
375
#endif
376
  });
Y
Yang Yu 已提交
377 378
}

D
dzhwinter 已提交
379 380
}  // namespace framework
}  // namespace paddle