提交 368c1860 编写于 作者: M Megvii Engine Team

fix(mgb/jit): find cuda include path correctly

GitOrigin-RevId: 5ced4206a7e0e884fdffc7b09c52196564c01eae
上级 b04e0466
/**
* \file src/core/impl/utils/cuda_helper.cpp
* MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
*
* Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*/
#include "megbrain/common.h"
#include "megbrain/exception.h"
#include "megbrain/utils/cuda_helper.h"
#include <set>
#include <fstream>
#include <string>
#include <sstream>
using namespace mgb;
#ifdef WIN32
#include <io.h>
#include <windows.h>
#else
#include <unistd.h>
#include <dlfcn.h>
#endif
#ifndef PATH_MAX
#define PATH_MAX 4096
#endif
#ifdef WIN32
#define F_OK 0
#define RTLD_LAZY 0
#define RTLD_GLOBAL 0
#define RTLD_NOLOAD 0
#define RTLD_DI_ORIGIN 0
#define access(a, b) false
#define SPLITER ';'
#define PATH_SPLITER '\\'
#define ENV_PATH "Path"
#define NVCC_EXE "nvcc.exe"
void* dlopen(const char* file, int) {
return static_cast<void*>(LoadLibrary(file));
}
int dlinfo(void* handle, int request, char* path) {
if (GetModuleFileName((HMODULE)handle, path, PATH_MAX))
return 0;
else
return -1;
}
void* dlerror() {
const char* errmsg = "dlerror not aviable in windows";
return const_cast<char*>(errmsg);
}
void* dlsym(void* handle, char* name) {
FARPROC symbol = GetProcAddress((HMODULE)handle, name);
return reinterpret_cast<void*>(symbol);
}
int check_file_exist(const char* path, int mode) {
return _access(path, mode);
}
#else
#define SPLITER ':'
#define PATH_SPLITER '/'
#define ENV_PATH "PATH"
#define NVCC_EXE "nvcc"
int check_file_exist(const char* path, int mode) {
return access(path, mode);
}
#endif
std::vector<std::string> split_env(const char* env) {
std::string e(env);
std::istringstream stream(e);
std::vector<std::string> ret;
std::string path;
while (std::getline(stream, path, SPLITER)) {
ret.emplace_back(path);
}
return ret;
}
//! this function will find file_name in each path in envs. It accepts add
//! intermediate path between env and file_name
std::string find_file_in_envs_with_intmd(
const std::vector<std::string>& envs, const std::string& file_name,
const std::vector<std::string>& itmedias = {}) {
for (auto&& env : envs) {
auto ret = getenv(env.c_str());
if (ret) {
for (auto&& path : split_env(ret)) {
auto file_path = std::string(path) + PATH_SPLITER + file_name;
if (!check_file_exist(file_path.c_str(), F_OK)) {
return file_path;
}
if (!itmedias.empty()) {
for (auto&& inter_path : itmedias) {
file_path = std::string(path) + PATH_SPLITER + inter_path + PATH_SPLITER +
file_name;
if (!check_file_exist(file_path.c_str(), F_OK)) {
return file_path;
}
}
}
}
}
}
return std::string{};
}
std::string get_nvcc_root_path() {
auto nvcc_root_path = find_file_in_envs_with_intmd({ENV_PATH}, NVCC_EXE);
if (nvcc_root_path.empty()) {
mgb_throw(MegBrainError,
"nvcc not found. Add your nvcc to your environment Path");
} else {
auto idx = nvcc_root_path.rfind(PATH_SPLITER);
return nvcc_root_path.substr(0, idx + 1);
}
}
std::vector<std::string> mgb::get_cuda_include_path() {
#if MGB_CUDA
std::vector<std::string> paths;
// 1. use CUDA_BIN_PATH
auto cuda_path = getenv("CUDA_BIN_PATH");
if (cuda_path) {
paths.emplace_back(std::string(cuda_path) + PATH_SPLITER + "include");
paths.emplace_back(std::string(cuda_path) + PATH_SPLITER + ".." +
PATH_SPLITER + "include");
}
// 2. use nvcc path
auto nvcc_path = get_nvcc_root_path();
auto cudart_header_path = nvcc_path + ".." + PATH_SPLITER + "include" +
PATH_SPLITER + "cuda_runtime.h";
//! double check path_to_nvcc/../include/cuda_runtime.h exists
auto ret = check_file_exist(cudart_header_path.c_str(), F_OK);
if (ret == 0) {
paths.emplace_back(nvcc_path + "..");
paths.emplace_back(nvcc_path + ".." + PATH_SPLITER + "include");
}
// 3. use libcudart.so library path
char cuda_lib_path[PATH_MAX];
auto handle = dlopen("libcudart.so", RTLD_GLOBAL | RTLD_LAZY);
if(handle != nullptr) {
mgb_assert(dlinfo(handle, RTLD_DI_ORIGIN, cuda_lib_path) != -1, "%s",
dlerror());
paths.emplace_back(std::string(cuda_lib_path) + PATH_SPLITER + ".." +
PATH_SPLITER + "include");
}
mgb_assert(paths.size() > 0,
"can't find cuda include path, check your environment of cuda, "
"try one of this solutions "
"1. set CUDA_BIN_PATH to cuda home path "
"2. add nvcc path in PATH "
"3. add libcudart.so path in LD_LIBRARY_PATH");
return paths;
#else
mgb_throw(MegBrainError, "cuda disabled at compile time");
#endif
}
\ No newline at end of file
/**
* \file src/core/include/megbrain/utils/cuda_helper.h
* MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
*
* Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*/
#pragma once
#include <string>
#include <vector>
namespace mgb {
std::vector<std::string> get_cuda_include_path();
} // namespace mgb
\ No newline at end of file
/**
* \file src/core/test/utils/cuda_helper.cpp
* MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
*
* Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*/
#include "megbrain_build_config.h"
#include "megbrain/test/helper.h"
#include "megbrain/utils/cuda_helper.h"
#if MGB_CUDA
TEST(TestUtils, TestCudaIncludePath) {
auto paths = mgb::get_cuda_include_path();
int available = 0;
for (auto path : paths) {
FILE* file =
fopen((path + "/cuda.h").c_str(), "r");
if(file) {
available ++;
fclose(file);
}
}
mgb_assert(available, "no available cuda include path found!");
}
#endif
\ No newline at end of file
......@@ -10,11 +10,13 @@
*/
#include "./compiler_cuda.h"
#include <cstdio>
#include "./codegen_cuda.h"
#include "megbrain/common.h"
#include "megbrain/comp_node_env.h"
#include "megbrain/jit/param_elem_visitor.h"
#include "megbrain/jit/utils.h"
#include "megbrain/utils/persistent_cache.h"
#include "megbrain/utils/timer.h"
......@@ -29,23 +31,6 @@ using namespace jit;
namespace {
std::string NVRTCCompile(const std::string& code, int cap_major,
int cap_minor) {
auto get_cuda_include_opts = []() {
auto cuda_path = getenv("CUDA_BIN_PATH");
if (cuda_path) {
std::string path1 = std::string("-I") + cuda_path + "/include";
std::string path2 = std::string("-I") + cuda_path + "/../include";
return std::vector<std::string>{path1, path2};
} else {
char cuda_lib_path[PATH_MAX];
auto handle = dlopen("libcudart.so",
RTLD_GLOBAL | RTLD_LAZY | RTLD_NOLOAD);
mgb_assert(handle != nullptr, "%s", dlerror());
mgb_assert(dlinfo(handle, RTLD_DI_ORIGIN, &cuda_lib_path) != -1,
"%s", dlerror());
return std::vector<std::string>{std::string("-I") + cuda_lib_path +
"/../include"};
}
};
static std::vector<std::string> cuda_include_opts = get_cuda_include_opts();
auto arch_opt =
......
......@@ -16,6 +16,10 @@
#include "megbrain/utils/debug.h"
#include "megbrain/jit/utils.h"
#if MGB_CUDA
#include "megbrain/utils/cuda_helper.h"
#endif
#include <atomic>
#ifdef __linux__
......@@ -251,6 +255,19 @@ std::string jit::next_kernel_name() {
return "fusion" + std::to_string(cnt.fetch_add(1));
}
std::vector<std::string> mgb::jit::get_cuda_include_opts() {
#if MGB_CUDA
std::vector<std::string> opts;
auto paths = mgb::get_cuda_include_path();
for (auto path:paths) {
opts.emplace_back("-I" + path);
}
return opts;
#else
mgb_throw(MegBrainError, "cuda disabled at compile time");
#endif // MGB_CUDA
}
#endif // MGB_JIT
// vim: syntax=cpp.doxygen foldmethod=marker foldmarker=f{{{,f}}}
......@@ -136,6 +136,8 @@ public:
//! in this process
std::string next_kernel_name();
std::vector<std::string> get_cuda_include_opts();
} // namespace jit
} // namespace mgb
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册