未验证 提交 2bae75ed 编写于 作者: H huangjiyi 提交者: GitHub

[Phi decouple] remove dependece to "paddle/fluid/platform/device/xpu/xxx.h" in phi (#48420)

* rm fluid “xpu_header.h” deps in phi

* move part of xpu_op_list.h from fluid to phi

* add fluid xpu_op_list deps

* add glog deps for xpu_op_list in phi

* fix PR-CI-Kunlun
上级 d3f52efd
......@@ -24,7 +24,13 @@ cc_library(
cc_library(
xpu_op_list
SRCS xpu_op_list.cc
DEPS gflags glog enforce xpulib device_context op_kernel_type)
DEPS gflags
glog
enforce
xpulib
device_context
op_kernel_type
phi_xpu_op_list)
cc_library(
xpu_resource_pool
SRCS xpu_resource_pool.cc
......
......@@ -49,31 +49,6 @@ static void tokenize(const std::string& ops,
op_set->insert(ops.substr(beg));
}
bool is_in_xpu_black_list(const std::string& op_name) {
static bool inited = false;
static std::unordered_set<std::string> xpu_black_list;
static std::mutex s_mtx;
if (!inited) {
std::lock_guard<std::mutex> guard(s_mtx);
if (!inited) {
if (std::getenv("XPU_BLACK_LIST") != nullptr) {
std::string ops(std::getenv("XPU_BLACK_LIST"));
tokenize(ops, ',', &xpu_black_list);
}
inited = true;
VLOG(3) << "XPU Black List: ";
for (auto iter = xpu_black_list.begin(); iter != xpu_black_list.end();
++iter) {
VLOG(3) << *iter << " ";
}
}
}
if (xpu_black_list.find(op_name) != xpu_black_list.end()) {
return true;
}
return false;
}
#ifdef PADDLE_WITH_XPU_KP
bool is_xpu_kp_support_op(const std::string& op_name,
const pOpKernelType& type) {
......
......@@ -15,6 +15,7 @@ limitations under the License. */
#include <unordered_map>
#include "paddle/fluid/framework/op_kernel_type.h"
#include "paddle/phi/backends/xpu/xpu_op_list.h"
namespace paddle {
namespace platform {
......@@ -25,7 +26,7 @@ using XPUOpListMap =
std::unordered_map<std::string, std::vector<vartype::Type>>;
bool is_xpu_support_op(const std::string& op_name, const pOpKernelType& type);
bool is_in_xpu_black_list(const std::string& op_name);
using phi::backends::xpu::is_in_xpu_black_list;
#ifdef PADDLE_WITH_XPU_KP
bool is_xpu_kp_support_op(const std::string& op_name,
......
......@@ -16,6 +16,7 @@ if(WITH_GPU OR WITH_ROCM)
endif()
if(WITH_XPU)
add_subdirectory(xpu)
list(APPEND BACKENDS_SRCS xpu/xpu_context.cc xpu/xpu_info.cc)
endif()
......
cc_library(
phi_xpu_op_list
SRCS xpu_op_list.cc
DEPS glog)
/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#ifdef PADDLE_WITH_XPU
#include "paddle/phi/backends/xpu/xpu_op_list.h"
#include <glog/logging.h>
#include <mutex>
#include <string>
#include <unordered_set>
namespace phi {
namespace backends {
namespace xpu {
// ops_string contains op_list(e.g., 'mul,mul_grad'), parse the op string and
// insert op to op set
static void tokenize(const std::string& ops,
char delim,
std::unordered_set<std::string>* op_set) {
std::string::size_type beg = 0;
for (uint64_t end = 0; (end = ops.find(delim, end)) != std::string::npos;
++end) {
op_set->insert(ops.substr(beg, end - beg));
beg = end + 1;
}
op_set->insert(ops.substr(beg));
}
bool is_in_xpu_black_list(const std::string& op_name) {
static bool inited = false;
static std::unordered_set<std::string> xpu_black_list;
static std::mutex s_mtx;
if (!inited) {
std::lock_guard<std::mutex> guard(s_mtx);
if (!inited) {
if (std::getenv("XPU_BLACK_LIST") != nullptr) {
std::string ops(std::getenv("XPU_BLACK_LIST"));
tokenize(ops, ',', &xpu_black_list);
}
inited = true;
VLOG(3) << "XPU Black List: ";
for (auto iter = xpu_black_list.begin(); iter != xpu_black_list.end();
++iter) {
VLOG(3) << *iter << " ";
}
}
}
if (xpu_black_list.find(op_name) != xpu_black_list.end()) {
return true;
}
return false;
}
} // namespace xpu
} // namespace backends
} // namespace phi
#endif
/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#ifdef PADDLE_WITH_XPU
#include <string>
namespace phi {
namespace backends {
namespace xpu {
bool is_in_xpu_black_list(const std::string& op_name);
} // namespace xpu
} // namespace backends
} // namespace phi
#endif
......@@ -19,7 +19,7 @@ if(WITH_XPU)
cc_library(
kernel_factory
SRCS kernel_factory.cc
DEPS phi_enforce convert_utils xpu_op_list)
DEPS phi_enforce convert_utils phi_xpu_op_list)
else()
cc_library(
kernel_factory
......
......@@ -17,7 +17,7 @@
#include "glog/logging.h"
#include "paddle/phi/core/enforce.h"
#if defined(PADDLE_WITH_XPU) && !defined(PADDLE_WITH_XPU_KP)
#include "paddle/fluid/platform/device/xpu/xpu_op_list.h"
#include "paddle/phi/backends/xpu/xpu_op_list.h"
#include "paddle/phi/core/compat/convert_utils.h"
#endif
#include "paddle/phi/core/compat/op_utils.h"
......@@ -151,7 +151,7 @@ KernelResult KernelFactory::SelectKernelOrThrowError(
#if defined(PADDLE_WITH_XPU) && !defined(PADDLE_WITH_XPU_KP)
VLOG(6) << "fluid_op_name: " << TransToFluidOpName(kernel_name);
if ((FLAGS_enable_api_kernel_fallback && kernel_iter == iter->second.end()) ||
paddle::platform::is_in_xpu_black_list(TransToFluidOpName(kernel_name))
phi::backends::xpu::is_in_xpu_black_list(TransToFluidOpName(kernel_name))
#else
if ((FLAGS_enable_api_kernel_fallback && kernel_iter == iter->second.end())
#endif
......
......@@ -16,13 +16,11 @@
#include "paddle/phi/backends/xpu/enforce_xpu.h"
#include "paddle/phi/backends/xpu/xpu_context.h"
#include "paddle/phi/backends/xpu/xpu_header.h"
#include "paddle/phi/common/float16.h"
#include "paddle/phi/core/enforce.h"
#include "paddle/phi/core/kernel_registry.h"
// See Note [ Why still include the fluid headers? ]
#include "paddle/fluid/platform/device/xpu/xpu_header.h"
namespace phi {
template <typename T, typename Context>
......
......@@ -15,8 +15,8 @@
#include "paddle/phi/kernels/nonzero_kernel.h"
#include "paddle/fluid/memory/memcpy.h"
#include "paddle/fluid/platform/device/xpu/xpu_header.h"
#include "paddle/phi/backends/xpu/xpu_context.h"
#include "paddle/phi/backends/xpu/xpu_header.h"
#include "paddle/phi/core/kernel_registry.h"
namespace phi {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册