init.cc 16.1 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2

L
Luo Tao 已提交
3 4 5
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
D
dzhwinter 已提交
6

L
Luo Tao 已提交
7
    http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8

L
Luo Tao 已提交
9 10 11 12 13
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
14
#include <csignal>
C
chengduo 已提交
15
#include <fstream>
D
dzhwinter 已提交
16 17
#include <string>

T
tensor-tang 已提交
18
#include "paddle/fluid/platform/cpu_helper.h"
19
#include "paddle/fluid/string/split.h"
20
#include "paddle/phi/backends/cpu/cpu_info.h"
21
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
Y
Yu Yang 已提交
22
#include "paddle/fluid/platform/cuda_device_guard.h"
23
#include "paddle/fluid/platform/device/gpu/gpu_info.h"
24 25
#endif
#ifdef PADDLE_WITH_CUDA
26
#include "paddle/fluid/platform/dynload/cupti.h"
S
sneaxiy 已提交
27
#endif
28
#include "paddle/fluid/platform/device/device_wrapper.h"
Y
Yi Wang 已提交
29
#include "paddle/fluid/platform/device_context.h"
30
#include "paddle/fluid/platform/init.h"
L
liutiexing 已提交
31
#include "paddle/fluid/platform/os_info.h"
Y
Yi Wang 已提交
32
#include "paddle/fluid/platform/place.h"
33

34
#ifdef PADDLE_WITH_XPU
35 36
#include "paddle/fluid/platform/device/xpu/xpu_header.h"
#include "paddle/fluid/platform/device/xpu/xpu_info.h"
37 38
#endif

39 40 41
#ifdef WITH_WIN_DUMP_DBG
#include <stdio.h>
#include <time.h>
42 43 44
#ifndef NOMINMAX
#define NOMINMAX  // msvc max/min macro conflict with std::min/max
#endif
45
#include <windows.h>
46

47 48 49
#include "DbgHelp.h"
#endif

J
jianghaicheng 已提交
50 51 52 53
#ifdef PADDLE_WITH_IPU
#include "paddle/fluid/platform/device/ipu/ipu_info.h"
#endif

54
#include "paddle/fluid/ir/dialect/pd_dialect.h"
55
#include "paddle/fluid/memory/allocation/allocator_facade.h"
56
#include "paddle/fluid/memory/memory.h"
57
#include "paddle/fluid/platform/flags.h"
58 59 60
#include "paddle/ir/core/builtin_dialect.h"
#include "paddle/ir/core/ir_context.h"
#include "paddle/ir/core/program.h"
61
#include "paddle/phi/common/memory_utils.h"
62
#include "paddle/phi/core/custom_kernel.h"
63

64
PHI_DECLARE_int32(paddle_num_threads);
Z
Zeng Jinle 已提交
65
PADDLE_DEFINE_EXPORTED_int32(
66 67
    multiple_of_cupti_buffer_size,
    1,
Z
Zeng Jinle 已提交
68 69
    "Multiple of the CUPTI device buffer size. If the timestamps have "
    "been dropped when you are profiling, try increasing this value.");
T
tensor-tang 已提交
70

D
dzhwinter 已提交
71 72 73
namespace paddle {
namespace framework {

74 75 76 77
#ifdef _WIN32
#define strdup _strdup
#endif

D
dzhwinter 已提交
78
std::once_flag gflags_init_flag;
79
std::once_flag glog_init_flag;
80
std::once_flag memory_method_init_flag;
D
dzhwinter 已提交
81

82
bool InitGflags(std::vector<std::string> args) {
83
  bool successed = false;
D
dzhwinter 已提交
84
  std::call_once(gflags_init_flag, [&]() {
C
chengduo 已提交
85
    FLAGS_logtostderr = true;
L
Leo Chen 已提交
86 87 88 89 90
    // NOTE(zhiqiu): dummy is needed, since the function
    // ParseNewCommandLineFlags in gflags.cc starts processing
    // commandline strings from idx 1.
    // The reason is, it assumes that the first one (idx 0) is
    // the filename of executable file.
91 92
    args.insert(args.begin(), "dummy");
    std::vector<char *> argv;
D
dzhwinter 已提交
93
    std::string line;
94 95 96 97
    int argc = args.size();
    for (auto &arg : args) {
      argv.push_back(const_cast<char *>(arg.data()));
      line += arg;
D
dzhwinter 已提交
98 99
      line += ' ';
    }
L
Leo Chen 已提交
100 101
    VLOG(1) << "Before Parse: argc is " << argc
            << ", Init commandline: " << line;
102 103

    char **arr = argv.data();
104
    ::GFLAGS_NAMESPACE::AllowCommandLineReparsing();
105
    ::GFLAGS_NAMESPACE::ParseCommandLineFlags(&argc, &arr, true);
106
    successed = true;
107 108

    VLOG(1) << "After Parse: argc is " << argc;
D
dzhwinter 已提交
109
  });
110
  return successed;
D
dzhwinter 已提交
111 112
}

113
#ifdef PADDLE_WITH_CUDA
114 115 116 117
void InitCupti() {
#ifdef PADDLE_WITH_CUPTI
  if (FLAGS_multiple_of_cupti_buffer_size == 1) return;
  size_t attrValue = 0, attrValueSize = sizeof(size_t);
118 119 120 121 122 123 124 125 126 127 128 129 130 131
#define MULTIPLY_ATTR_VALUE(attr)                                      \
  {                                                                    \
    PADDLE_ENFORCE_EQ(                                                 \
        !platform::dynload::cuptiActivityGetAttribute(                 \
            attr, &attrValueSize, &attrValue),                         \
        true,                                                          \
        platform::errors::Unavailable("Get cupti attribute failed.")); \
    attrValue *= FLAGS_multiple_of_cupti_buffer_size;                  \
    LOG(WARNING) << "Set " #attr " " << attrValue << " byte";          \
    PADDLE_ENFORCE_EQ(                                                 \
        !platform::dynload::cuptiActivitySetAttribute(                 \
            attr, &attrValueSize, &attrValue),                         \
        true,                                                          \
        platform::errors::Unavailable("Set cupti attribute failed.")); \
132 133 134 135 136 137 138 139 140
  }
  MULTIPLY_ATTR_VALUE(CUPTI_ACTIVITY_ATTR_DEVICE_BUFFER_SIZE);
  MULTIPLY_ATTR_VALUE(CUPTI_ACTIVITY_ATTR_DEVICE_BUFFER_SIZE_CDP);
#if CUDA_VERSION >= 9000
  MULTIPLY_ATTR_VALUE(CUPTI_ACTIVITY_ATTR_PROFILING_SEMAPHORE_POOL_SIZE);
#endif
#undef MULTIPLY_ATTR_VALUE
#endif
}
141
#endif
142

143 144 145
#ifdef PADDLE_WITH_CUSTOM_DEVICE
void LoadCustomDevice(const std::string &library_dir) {
  LOG(INFO) << "Try loading custom device libs from: [" << library_dir << "]";
146
  std::vector<std::string> libs = phi::ListAllLibraries(library_dir);
147 148 149 150 151 152 153
  for (const auto &lib_path : libs) {
    auto dso_handle = dlopen(lib_path.c_str(), RTLD_NOW);
    PADDLE_ENFORCE_NOT_NULL(
        dso_handle,
        platform::errors::InvalidArgument(
            "Fail to open library: %s with error: %s", lib_path, dlerror()));

154
    phi::LoadCustomRuntimeLib(lib_path, dso_handle);
155
  }
156
  phi::CustomKernelMap::Instance().RegisterCustomKernels();
157 158 159 160 161
  LOG(INFO) << "Finished in LoadCustomDevice with libs_path: [" << library_dir
            << "]";
}
#endif

R
ronnywang 已提交
162 163
static std::once_flag init_devices_flag;

164
void InitDevices() {
R
ronnywang 已提交
165 166 167
  std::call_once(init_devices_flag, []() {
    // set name at the entry point of Paddle
    platform::SetCurrentThreadName("MainThread");
168 169 170
// CUPTI attribute should be set before any CUDA context is created (see CUPTI
// documentation about CUpti_ActivityAttribute).
#ifdef PADDLE_WITH_CUDA
R
ronnywang 已提交
171
    InitCupti();
172
#endif
R
ronnywang 已提交
173 174
    /*Init all available devices by default */
    std::vector<int> devices;
175
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
R
ronnywang 已提交
176 177 178 179 180 181
    try {
      // use user specified GPUs in single-node multi-process mode.
      devices = platform::GetSelectedDevices();
    } catch (const std::exception &exp) {
      LOG(WARNING) << "Compiled with WITH_GPU, but no GPU found in runtime.";
    }
182 183
#endif
#ifdef PADDLE_WITH_XPU
R
ronnywang 已提交
184 185 186 187 188 189
    try {
      // use user specified XPUs in single-node multi-process mode.
      devices = platform::GetXPUSelectedDevices();
    } catch (const std::exception &exp) {
      LOG(WARNING) << "Compiled with WITH_XPU, but no XPU found in runtime.";
    }
190
#endif
J
jianghaicheng 已提交
191
#ifdef PADDLE_WITH_IPU
R
ronnywang 已提交
192 193 194 195 196 197 198
    try {
      // use user specified IPUs.
      devices = platform::GetSelectedIPUDevices();
    } catch (const std::exception &exp) {
      LOG(WARNING)
          << "Compiled with PADDLE_WITH_IPU, but no IPU found in runtime.";
    }
D
dzhwinter 已提交
199
#endif
R
ronnywang 已提交
200 201
    InitDevices(devices);
  });
D
dzhwinter 已提交
202 203
}

204
void InitDevices(const std::vector<int> devices) {
205 206 207
  ir::IrContext *ctx = ir::IrContext::Instance();
  ctx->GetOrRegisterDialect<paddle::dialect::PaddleDialect>();

208 209
  std::vector<platform::Place> places;

210
  for (auto device : devices) {
211 212
    // In multi process multi gpu mode, we may have gpuid = 7
    // but count = 1.
213
    if (device < 0) {
214 215 216
      LOG(WARNING) << "Invalid devices id.";
      continue;
    }
217

218
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
219
    places.emplace_back(platform::CUDAPlace(device));
220 221
#endif
#ifdef PADDLE_WITH_XPU
222
    places.emplace_back(platform::XPUPlace(device));
223
#endif
J
jianghaicheng 已提交
224
#ifdef PADDLE_WITH_IPU
225
    places.emplace_back(platform::IPUPlace(device));
J
jianghaicheng 已提交
226
#endif
227 228
  }
  places.emplace_back(platform::CPUPlace());
229
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
230 231
  places.emplace_back(platform::CUDAPinnedPlace());
#endif
232
#ifdef PADDLE_WITH_CUSTOM_DEVICE
233 234 235 236 237 238 239
  const char *custom_kernel_root_p = std::getenv("CUSTOM_DEVICE_ROOT");
  if (!custom_kernel_root_p) {
    VLOG(3) << "Env [CUSTOM_DEVICE_ROOT] is not set.";
  } else {
    std::string custom_kernel_root(custom_kernel_root_p);
    if (!custom_kernel_root.empty()) {
      LOG(INFO) << "ENV [CUSTOM_DEVICE_ROOT]=" << custom_kernel_root;
240 241
      LoadCustomDevice(custom_kernel_root);

242
      auto device_types = phi::DeviceManager::GetAllCustomDeviceTypes();
243
      for (auto &dev_type : device_types) {
244
        auto device_list = phi::DeviceManager::GetSelectedDeviceList(dev_type);
245
        LOG(INFO) << "CustomDevice: " << dev_type
246 247 248
                  << ", visible devices count: " << device_list.size();
        for (auto &dev_id : device_list) {
          places.push_back(platform::CustomPlace(dev_type, dev_id));
249 250
        }
      }
251 252 253 254
    } else {
      VLOG(3) << "ENV [CUSTOM_DEVICE_ROOT] is empty.";
    }
  }
255
#endif
256
  platform::DeviceContextPool::Init(places);
Q
qingqing01 已提交
257

258
#ifndef PADDLE_WITH_MKLDNN
T
tensor-tang 已提交
259
  platform::SetNumThreads(FLAGS_paddle_num_threads);
260
#endif
261 262
}

C
chengduo 已提交
263
#ifndef _WIN32
264 265
// Description Quoted from
// https://pubs.opengroup.org/onlinepubs/9699919799/basedefs/signal.h.html
266
const struct {  // NOLINT
267
  int signal_number;
268 269 270
  const char *name;
  const char *error_string;
} SignalErrorStrings[] = {
271 272 273 274 275 276
    {SIGSEGV, "SIGSEGV", "Segmentation fault"},
    {SIGILL, "SIGILL", "Illegal instruction"},
    {SIGFPE, "SIGFPE", "Erroneous arithmetic operation"},
    {SIGABRT, "SIGABRT", "Process abort signal"},
    {SIGBUS, "SIGBUS", "Access to an undefined portion of a memory object"},
    {SIGTERM, "SIGTERM", "Termination signal"},
277 278 279 280 281 282 283 284 285
};

bool StartsWith(const char *str, const char *prefix) {
  size_t len_prefix = strlen(prefix);
  size_t len_str = strlen(str);
  return len_str < len_prefix ? false : memcmp(prefix, str, len_prefix) == 0;
}

const char *ParseSignalErrorString(const std::string &str) {
286 287 288
  for (const auto &SignalErrorString : SignalErrorStrings) {
    if (std::string::npos != str.find(SignalErrorString.name)) {
      return SignalErrorString.error_string;
289 290 291 292 293 294
    }
  }
  return "Unknown signal";
}

// Handle SIGSEGV, SIGILL, SIGFPE, SIGABRT, SIGBUS, and SIGTERM.
295
void SignalHandle(const char *data, int size) {
C
chengduo 已提交
296
  try {
297 298
    // NOTE1: The glog FailureSignalHandler dumped messages
    //   are deal with line by line
299
    auto signal_msg_dunmer_ptr = SignalMessageDumper::Instance().Get();
300 301 302
    // NOTE2: we only deal with the time info ane signal info,
    //   the stack trace will generated by paddle self
    if (StartsWith(data, "*** Aborted at")) {
303
      *signal_msg_dunmer_ptr << "\n  [TimeInfo: " << std::string(data, size - 1)
304
                             << "]\n";
305 306 307 308 309
    } else if (StartsWith(data, "***")) {
      std::string signal_info(data, size - 1);
      std::string useless_substr("; stack trace:");
      size_t start_pos = signal_info.rfind(useless_substr);
      signal_info.replace(start_pos, useless_substr.length(), "");
310
      *signal_msg_dunmer_ptr << "  [SignalInfo: " << signal_info << "]\n";
311

C
co63oc 已提交
312
      // NOTE3: Final signal error message print.
313
      // Here does not throw an exception,
C
co63oc 已提交
314
      // otherwise it will cause "terminate called recursively"
315
      std::ostringstream sout;
316 317 318 319 320 321 322 323 324 325 326
      sout << "\n\n--------------------------------------\n";
      sout << "C++ Traceback (most recent call last):";
      sout << "\n--------------------------------------\n";
      auto traceback = platform::GetCurrentTraceBackString(/*for_signal=*/true);
      if (traceback.empty()) {
        sout
            << "No stack trace in paddle, may be caused by external reasons.\n";
      } else {
        sout << traceback;
      }

327 328 329 330 331 332 333
      sout << "\n----------------------\nError Message "
              "Summary:\n----------------------\n";
      sout << platform::errors::Fatal(
                  "`%s` is detected by the operating system.",
                  ParseSignalErrorString(signal_info))
                  .to_string();
      std::cout << sout.str() << (*signal_msg_dunmer_ptr).str() << std::endl;
334
    }
C
chengduo 已提交
335
  } catch (...) {
336 337 338
    // Since the program has already triggered a system error,
    // no further processing is required here, glog FailureSignalHandler
    // will Kill program by the default signal handler
C
chengduo 已提交
339 340
  }
}
341 342 343 344
#endif  // _WIN32

void DisableSignalHandler() {
#ifndef _WIN32
345 346
  for (const auto &SignalErrorString : SignalErrorStrings) {
    int signal_number = SignalErrorString.signal_number;
347 348 349 350
    struct sigaction sig_action;
    memset(&sig_action, 0, sizeof(sig_action));
    sigemptyset(&sig_action.sa_mask);
    sig_action.sa_handler = SIG_DFL;
351
    sigaction(signal_number, &sig_action, nullptr);
352
  }
C
chengduo 已提交
353
#endif
354
}
C
chengduo 已提交
355

356 357
#ifdef WITH_WIN_DUMP_DBG
typedef BOOL(WINAPI *MINIDUMP_WRITE_DUMP)(
358 359 360
    IN HANDLE hProcess,
    IN DWORD ProcessId,
    IN HANDLE hFile,
361 362 363 364 365 366
    IN MINIDUMP_TYPE DumpType,
    IN CONST PMINIDUMP_EXCEPTION_INFORMATION ExceptionParam,
    OPTIONAL IN PMINIDUMP_USER_STREAM_INFORMATION UserStreamParam,
    OPTIONAL IN PMINIDUMP_CALLBACK_INFORMATION CallbackParam OPTIONAL);
void CreateDumpFile(LPCSTR lpstrDumpFilePathName,
                    EXCEPTION_POINTERS *pException) {
367 368 369
  HANDLE hDumpFile = CreateFile(lpstrDumpFilePathName,
                                GENERIC_WRITE,
                                0,
370
                                nullptr,
371 372
                                CREATE_ALWAYS,
                                FILE_ATTRIBUTE_NORMAL,
373
                                nullptr);
374 375 376 377 378 379 380 381
  MINIDUMP_EXCEPTION_INFORMATION dumpInfo;
  dumpInfo.ExceptionPointers = pException;
  dumpInfo.ThreadId = GetCurrentThreadId();
  dumpInfo.ClientPointers = TRUE;
  MINIDUMP_WRITE_DUMP MiniDumpWriteDump_;
  HMODULE hDbgHelp = LoadLibrary("DBGHELP.DLL");
  MiniDumpWriteDump_ =
      (MINIDUMP_WRITE_DUMP)GetProcAddress(hDbgHelp, "MiniDumpWriteDump");
382 383 384 385 386
  MiniDumpWriteDump_(GetCurrentProcess(),
                     GetCurrentProcessId(),
                     hDumpFile,
                     MiniDumpWithPrivateReadWriteMemory,
                     &dumpInfo,
387 388
                     nullptr,
                     nullptr);
389 390 391 392 393 394 395 396 397
  CloseHandle(hDumpFile);
}

LONG ApplicationCrashHandler(EXCEPTION_POINTERS *pException) {
  time_t time_seconds = time(0);
  struct tm now_time;
  localtime_s(&now_time, &time_seconds);

  char buf[1024];
398 399 400 401 402 403 404 405
  sprintf_s(buf,
            "C:\\Paddle%04d%02d%02d-%02d%02d%02d.dmp",
            1900 + now_time.tm_year,
            1 + now_time.tm_mon,
            now_time.tm_mday,
            now_time.tm_hour,
            now_time.tm_min,
            now_time.tm_sec);
406 407 408 409 410 411

  CreateDumpFile(buf, pException);
  return EXCEPTION_EXECUTE_HANDLER;
}
#endif

Y
Yang Yu 已提交
412
void InitGLOG(const std::string &prog_name) {
413
  std::call_once(glog_init_flag, [&]() {
414 415 416 417 418 419
// glog will not hold the ARGV[0] inside.
// Use strdup to alloc a new string.
#ifdef WITH_WIN_DUMP_DBG
    SetUnhandledExceptionFilter(
        (LPTOP_LEVEL_EXCEPTION_FILTER)ApplicationCrashHandler);
#endif
420
    google::InitGoogleLogging(strdup(prog_name.c_str()));
C
chengduo 已提交
421
#ifndef _WIN32
422 423
    google::InstallFailureSignalHandler();
    google::InstallFailureWriter(&SignalHandle);
C
chengduo 已提交
424
#endif
425
  });
Y
Yang Yu 已提交
426 427
}

428 429 430 431 432 433 434 435 436 437 438
void InitMemoryMethod() {
  std::call_once(memory_method_init_flag, [&]() {
    auto &memory_utils = phi::MemoryUtils::Instance();
    auto memory_method = std::make_unique<phi::MemoryInterface>();
    memory_method->alloc = paddle::memory::Alloc;
    memory_method->alloc_with_stream = paddle::memory::Alloc;
    memory_method->alloc_shared = paddle::memory::AllocShared;
    memory_method->alloc_shared_with_stream = paddle::memory::AllocShared;
    memory_method->in_same_stream = paddle::memory::InSameStream;
    memory_method->allocation_deleter =
        paddle::memory::allocation::Allocator::AllocationDeleter;
439 440 441 442 443 444 445 446
#if defined(PADDLE_WITH_CUSTOM_DEVICE) || defined(PADDLE_WITH_CUDA) || \
    defined(PADDLE_WITH_HIP)
    memory_method->copy_with_stream =
        paddle::memory::Copy<phi::Place, phi::Place>;
#endif
    memory_method->copy = paddle::memory::Copy<phi::Place, phi::Place>;
    memory_method->device_memory_stat_current_value =
        paddle::memory::DeviceMemoryStatCurrentValue;
447 448 449
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
    memory_method->gpu_memory_usage = paddle::platform::GpuMemoryUsage;
#endif
450 451
    memory_method->emplace_device_contexts =
        paddle::platform::EmplaceDeviceContexts;
452
    memory_method->init_devices = InitDevices;
453 454 455 456
    memory_utils.Init(std::move(memory_method));
  });
}

D
dzhwinter 已提交
457 458
}  // namespace framework
}  // namespace paddle