未验证 提交 06b32b38 编写于 作者: Q QingshuChen 提交者: GitHub

add xpu_support op function (#48606)

*test=kunlun
上级 06a92c50
......@@ -1319,9 +1319,10 @@ bool OperatorWithKernel::SupportXPU() const {
op_kernels.end(),
[this](OpKernelMap::const_reference kern_pair) {
return platform::is_xpu_place(kern_pair.first.place_) &&
paddle::platform::is_xpu_support_op(type_,
kern_pair.first) &&
!paddle::platform::is_in_xpu_black_list(type_);
paddle::platform::is_xpu_support_op(
type_,
framework::TransToPhiDataType(
kern_pair.first.data_type_));
});
}
}
......@@ -1409,8 +1410,8 @@ bool OperatorWithKernel::SupportsKernelType(
#if defined(PADDLE_WITH_XPU) && !defined(PADDLE_WITH_XPU_KP)
if (paddle::platform::is_xpu_place(kernel_type.place_)) {
return kernel_iter != kernels.end() &&
paddle::platform::is_xpu_support_op(type_, kernel_type) &&
!paddle::platform::is_in_xpu_black_list(type_);
paddle::platform::is_xpu_support_op(
type_, framework::TransToPhiDataType(kernel_type.data_type_));
}
#endif
......@@ -1418,7 +1419,8 @@ bool OperatorWithKernel::SupportsKernelType(
if (paddle::platform::is_xpu_place(kernel_type.place_)) {
bool use_xpu_kp_kernel_rt =
FLAGS_run_kp_kernel &&
paddle::platform::is_xpu_kp_support_op(type_, kernel_type);
paddle::platform::is_xpu_support_op(
type_, framework::TransToPhiDataType(kernel_type.data_type_));
bool use_xpu_kp_kernel_debug =
paddle::platform::is_in_xpu_kpwhite_list(type_);
bool is_xpu_kp_support = (use_xpu_kp_kernel_rt || use_xpu_kp_kernel_debug);
......@@ -1428,8 +1430,8 @@ bool OperatorWithKernel::SupportsKernelType(
return kernels.find(tmp_kernel_type) != kernels.end();
}
return kernel_iter != kernels.end() &&
paddle::platform::is_xpu_support_op(type_, kernel_type) &&
!paddle::platform::is_in_xpu_black_list(type_);
paddle::platform::is_xpu_support_op(
type_, framework::TransToPhiDataType(kernel_type.data_type_));
}
#endif
......@@ -1591,7 +1593,8 @@ void OperatorWithKernel::RunImpl(const Scope& scope,
if (paddle::platform::is_xpu_place(kernel_type_->place_)) {
bool use_xpu_kp_kernel_rt =
FLAGS_run_kp_kernel &&
paddle::platform::is_xpu_kp_support_op(type_, *kernel_type_);
paddle::platform::is_xpu_support_op(
type_, framework::TransToPhiDataType(kernel_type_->data_type_));
bool use_xpu_kp_kernel_debug =
paddle::platform::is_in_xpu_kpwhite_list(type_);
if (use_xpu_kp_kernel_rt) {
......@@ -1668,7 +1671,8 @@ void OperatorWithKernel::RunImpl(const Scope& scope,
if (paddle::platform::is_xpu_place(kernel_type_->place_)) {
bool use_xpu_kp_kernel_rt =
FLAGS_run_kp_kernel &&
paddle::platform::is_xpu_kp_support_op(type_, *kernel_type_);
paddle::platform::is_xpu_support_op(
type_, framework::TransToPhiDataType(kernel_type_->data_type_));
bool use_xpu_kp_kernel_debug =
paddle::platform::is_in_xpu_kpwhite_list(type_);
if (use_xpu_kp_kernel_rt) {
......@@ -1709,14 +1713,15 @@ void OperatorWithKernel::RunImpl(const Scope& scope,
#if defined(PADDLE_WITH_XPU)
bool is_xpu_unsupport =
paddle::platform::is_xpu_place(kernel_type_->place_) &&
!paddle::platform::is_xpu_support_op(type_, *kernel_type_.get()) ||
paddle::platform::is_in_xpu_black_list(type_);
!paddle::platform::is_xpu_support_op(
type_, framework::TransToPhiDataType(kernel_type_->data_type_));
#endif
#ifdef PADDLE_WITH_XPU_KP
bool use_xpu_kp_kernel_rt =
paddle::platform::is_xpu_place(kernel_type_->place_) &&
FLAGS_run_kp_kernel &&
paddle::platform::is_xpu_kp_support_op(type_, *kernel_type_);
paddle::platform::is_xpu_support_op(
type_, framework::TransToPhiDataType(kernel_type_->data_type_));
bool use_xpu_kp_kernel_debug =
paddle::platform::is_xpu_place(kernel_type_->place_) &&
paddle::platform::is_in_xpu_kpwhite_list(type_);
......@@ -2051,8 +2056,9 @@ void OperatorWithKernel::ChooseKernel(const ExecutionContext& ctx) const {
#if defined(PADDLE_WITH_XPU) && !defined(PADDLE_WITH_XPU_KP)
if (platform::is_xpu_place(expected_kernel_key.place_) &&
(kernel_iter == kernels.end() ||
!paddle::platform::is_xpu_support_op(type_, expected_kernel_key) ||
paddle::platform::is_in_xpu_black_list(type_))) {
!paddle::platform::is_xpu_support_op(
type_,
framework::TransToPhiDataType(expected_kernel_key.data_type_)))) {
VLOG(3) << "fluid missing XPU kernel: " << type_
<< ", expected_kernel_key:" << expected_kernel_key
<< ", fallbacking to CPU one!";
......@@ -2065,7 +2071,9 @@ void OperatorWithKernel::ChooseKernel(const ExecutionContext& ctx) const {
if (paddle::platform::is_xpu_place(expected_kernel_key.place_)) {
bool use_xpu_kp_kernel_rt =
FLAGS_run_kp_kernel &&
paddle::platform::is_xpu_kp_support_op(type_, expected_kernel_key);
paddle::platform::is_xpu_support_op(
type_,
framework::TransToPhiDataType(expected_kernel_key.data_type_));
bool use_xpu_kp_kernel_debug =
paddle::platform::is_in_xpu_kpwhite_list(type_);
if (use_xpu_kp_kernel_rt) {
......@@ -2093,9 +2101,8 @@ void OperatorWithKernel::ChooseKernel(const ExecutionContext& ctx) const {
<< ", using_kernel_key:" << expected_kernel_key;
}
}
bool is_xpu_unsupport =
(!paddle::platform::is_xpu_support_op(type_, expected_kernel_key) ||
paddle::platform::is_in_xpu_black_list(type_));
bool is_xpu_unsupport = (!paddle::platform::is_xpu_support_op(
type_, framework::TransToPhiDataType(expected_kernel_key.data_type_)));
if (!is_xpu_kp_support &&
(kernel_iter == kernels.end() || is_xpu_unsupport)) {
VLOG(3) << "fluid missing XPU kernel: " << type_
......
......@@ -255,9 +255,9 @@ PreparedOp PrepareImpl(
#if defined(PADDLE_WITH_XPU)
bool is_xpu_unsupport =
paddle::platform::is_xpu_place(expected_kernel_key.place_) &&
!paddle::platform::is_xpu_support_op(op.Type(),
expected_kernel_key) ||
paddle::platform::is_in_xpu_black_list(op.Type());
!paddle::platform::is_xpu_support_op(
op.Type(),
framework::TransToPhiDataType(expected_kernel_key.data_type_));
#endif
#ifdef PADDLE_WITH_MLU
......@@ -292,8 +292,10 @@ PreparedOp PrepareImpl(
#ifdef PADDLE_WITH_XPU_KP
if (paddle::platform::is_xpu_place(expected_kernel_key.place_)) {
bool use_xpu_kp_kernel_rt =
FLAGS_run_kp_kernel && paddle::platform::is_xpu_kp_support_op(
op.Type(), expected_kernel_key);
FLAGS_run_kp_kernel &&
paddle::platform::is_xpu_support_op(
op.Type(),
framework::TransToPhiDataType(expected_kernel_key.data_type_));
bool use_xpu_kp_kernel_debug =
paddle::platform::is_in_xpu_kpwhite_list(op.Type());
if (use_xpu_kp_kernel_rt) {
......@@ -368,7 +370,9 @@ PreparedOp PrepareImpl(
bool use_xpu_kp_kernel_rt =
paddle::platform::is_xpu_place(expected_kernel_key.place_) &&
FLAGS_run_kp_kernel &&
paddle::platform::is_xpu_kp_support_op(op.Type(), expected_kernel_key);
paddle::platform::is_xpu_support_op(
op.Type(),
framework::TransToPhiDataType(expected_kernel_key.data_type_));
bool use_xpu_kp_kernel_debug =
paddle::platform::is_xpu_place(expected_kernel_key.place_) &&
paddle::platform::is_in_xpu_kpwhite_list(op.Type());
......
......@@ -15,25 +15,14 @@ limitations under the License. */
#include <string>
#include <unordered_set>
#include "paddle/fluid/platform/device/xpu/xpu1_op_list.h"
#include "paddle/fluid/platform/device/xpu/xpu2_op_list.h"
#include "paddle/fluid/framework/convert_utils.h"
#include "paddle/fluid/platform/device/xpu/xpu_info.h"
#include "paddle/fluid/platform/device/xpu/xpu_op_kpfirst_list.h"
#include "paddle/phi/backends/xpu/xpu_op_list.h"
namespace paddle {
namespace platform {
bool is_xpu_support_op(const std::string& op_name, const pOpKernelType& type) {
auto v = get_xpu_version(type.place_.device);
auto& ops = (v == phi::backends::xpu::XPUVersion::XPU1) ? get_kl1_ops()
: get_kl2_ops();
if (ops.find(op_name) != ops.end() &&
ops[op_name].find(type) != ops[op_name].end()) {
return true;
}
return false;
}
// ops_string contains op_list(e.g., 'mul,mul_grad'), parse the op string and
// insert op to op set
static void tokenize(const std::string& ops,
......@@ -50,18 +39,6 @@ static void tokenize(const std::string& ops,
}
#ifdef PADDLE_WITH_XPU_KP
bool is_xpu_kp_support_op(const std::string& op_name,
const pOpKernelType& type) {
auto v = get_xpu_version(type.place_.device);
auto& ops = (v == phi::backends::xpu::XPUVersion::XPU1) ? get_kl1_ops()
: get_kp_ops();
if (ops.find(op_name) != ops.end() &&
ops[op_name].find(type) != ops[op_name].end()) {
return true;
}
return false;
}
bool is_in_xpu_kpwhite_list(const std::string& op_name) {
static bool inited = false;
static std::unordered_set<std::string> xpu_kpwhite_list;
......@@ -88,49 +65,37 @@ bool is_in_xpu_kpwhite_list(const std::string& op_name) {
}
#endif
#ifdef PADDLE_WITH_XPU_KP
std::vector<vartype::Type> get_xpu_kp_op_support_type(
const std::string& op_name, phi::backends::xpu::XPUVersion version) {
std::vector<vartype::Type> res;
auto& ops = version == phi::backends::xpu::XPUVersion::XPU1 ? get_kl1_ops()
: get_kp_ops();
if (ops.find(op_name) != ops.end()) {
XPUKernelSet& type_set = ops[op_name];
for (auto& item : type_set) {
res.push_back(item.data_type_);
}
}
return res;
}
#endif
std::vector<vartype::Type> get_xpu_op_support_type(
const std::string& op_name, phi::backends::xpu::XPUVersion version) {
auto& ops = version == phi::backends::xpu::XPUVersion::XPU1
? phi::backends::xpu::get_kl1_ops()
: phi::backends::xpu::get_kl2_ops();
std::vector<vartype::Type> res;
auto& ops = version == phi::backends::xpu::XPUVersion::XPU1 ? get_kl1_ops()
: get_kl2_ops();
if (ops.find(op_name) != ops.end()) {
XPUKernelSet& type_set = ops[op_name];
for (auto& item : type_set) {
res.push_back(item.data_type_);
auto& dtypes = ops[op_name];
for (auto& type : dtypes) {
res.push_back(static_cast<vartype::Type>(phi::TransToProtoVarType(type)));
}
}
return res;
}
XPUOpListMap get_xpu_op_list(phi::backends::xpu::XPUVersion version) {
auto& ops = version == phi::backends::xpu::XPUVersion::XPU1
? phi::backends::xpu::get_kl1_ops()
: phi::backends::xpu::get_kl2_ops();
XPUOpListMap res;
auto& ops = version == phi::backends::xpu::XPUVersion::XPU1 ? get_kl1_ops()
: get_kl2_ops();
for (auto& op : ops) {
std::vector<vartype::Type> op_vartypes;
std::vector<vartype::Type> op_types;
for (auto& item : op.second) {
op_vartypes.push_back(item.data_type_);
op_types.push_back(
static_cast<vartype::Type>(phi::TransToProtoVarType(item)));
}
res[op.first] = std::move(op_vartypes);
res[op.first] = std::move(op_types);
}
return res;
}
} // namespace platform
} // namespace paddle
#endif
......@@ -21,22 +21,17 @@ limitations under the License. */
namespace paddle {
namespace platform {
using pOpKernelType = paddle::framework::OpKernelType;
using vartype = paddle::framework::proto::VarType;
using XPUOpListMap =
std::unordered_map<std::string, std::vector<vartype::Type>>;
bool is_xpu_support_op(const std::string& op_name, const pOpKernelType& type);
using phi::backends::xpu::is_in_xpu_black_list;
using phi::backends::xpu::is_xpu_support_op;
#ifdef PADDLE_WITH_XPU_KP
bool is_xpu_kp_support_op(const std::string& op_name,
const pOpKernelType& type);
bool is_in_xpu_kpwhite_list(const std::string& op_name);
std::vector<vartype::Type> get_xpu_kp_op_support_type(
const std::string& op_name, phi::backends::xpu::XPUVersion version);
#endif
using vartype = paddle::framework::proto::VarType;
using XPUOpListMap =
std::unordered_map<std::string, std::vector<vartype::Type>>;
std::vector<vartype::Type> get_xpu_op_support_type(
const std::string& op_name, phi::backends::xpu::XPUVersion version);
XPUOpListMap get_xpu_op_list(phi::backends::xpu::XPUVersion version);
......
......@@ -456,7 +456,7 @@ void BindPlace(pybind11::module &m) { // NOLINT
#ifdef PADDLE_WITH_XPU_KP
m.def("get_xpu_device_op_support_types",
[](const std::string &op_name, phi::backends::xpu::XPUVersion version) {
return platform::get_xpu_kp_op_support_type(op_name, version);
return platform::get_xpu_op_support_type(op_name, version);
});
#else
m.def("get_xpu_device_op_support_types",
......
cc_library(
phi_xpu_op_list
SRCS xpu_op_list.cc
SRCS xpu_op_list.cc xpu1_op_list.cc xpu2_op_list.cc
DEPS glog)
/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or
agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
or implied. See the License for the specific language governing permissions and
limitations under the License. */
#ifdef PADDLE_WITH_XPU
#include <string>
#include <unordered_map>
#include <unordered_set>
#include "paddle/phi/backends/xpu/xpu_op_list.h"
namespace phi {
namespace backends {
namespace xpu {
XPUOpMap& get_kl1_ops() {
// KL1支持的op,通过op_name, data_type
static XPUOpMap s_xpu1_kernels{
// AddMore
};
PD_THROW("get_kl1_ops unsupported");
return s_xpu1_kernels;
}
} // namespace xpu
} // namespace backends
} // namespace phi
#endif
此差异已折叠。
......@@ -10,11 +10,11 @@ See the License for the specific language governing permissions and
limitations under the License. */
#ifdef PADDLE_WITH_XPU
#include "paddle/phi/backends/xpu/xpu_op_list.h"
#include <glog/logging.h>
#include <mutex>
#include <string>
#include <unordered_set>
#include "paddle/phi/backends/xpu/xpu_info.h"
namespace phi {
namespace backends {
......@@ -35,7 +35,7 @@ static void tokenize(const std::string& ops,
op_set->insert(ops.substr(beg));
}
bool is_in_xpu_black_list(const std::string& op_name) {
bool is_in_xpu_black_list(const std::string& fluid_op_name) {
static bool inited = false;
static std::unordered_set<std::string> xpu_black_list;
static std::mutex s_mtx;
......@@ -54,7 +54,20 @@ bool is_in_xpu_black_list(const std::string& op_name) {
}
}
}
if (xpu_black_list.find(op_name) != xpu_black_list.end()) {
if (xpu_black_list.find(fluid_op_name) != xpu_black_list.end()) {
return true;
}
return false;
}
bool is_xpu_support_op(const std::string& fluid_op_name,
const phi::DataType type) {
if (is_in_xpu_black_list(fluid_op_name)) return false;
auto v = get_xpu_version(0);
auto& ops = (v == phi::backends::xpu::XPUVersion::XPU1) ? get_kl1_ops()
: get_kl2_ops();
if (ops.find(fluid_op_name) != ops.end() &&
ops[fluid_op_name].find(type) != ops[fluid_op_name].end()) {
return true;
}
return false;
......
......@@ -12,12 +12,23 @@ limitations under the License. */
#ifdef PADDLE_WITH_XPU
#include <string>
#include <unordered_map>
#include <unordered_set>
#include "paddle/phi/common/data_type.h"
namespace phi {
namespace backends {
namespace xpu {
bool is_in_xpu_black_list(const std::string& op_name);
using XPUKernelSet = std::unordered_set<phi::DataType>;
using XPUOpMap = std::unordered_map<std::string, XPUKernelSet>;
XPUOpMap& get_kl1_ops();
XPUOpMap& get_kl2_ops();
bool is_in_xpu_black_list(const std::string& fluid_op_name);
bool is_xpu_support_op(const std::string& fluid_op_name,
const phi::DataType type);
} // namespace xpu
} // namespace backends
......
......@@ -151,7 +151,8 @@ KernelResult KernelFactory::SelectKernelOrThrowError(
#if defined(PADDLE_WITH_XPU) && !defined(PADDLE_WITH_XPU_KP)
VLOG(6) << "fluid_op_name: " << TransToFluidOpName(kernel_name);
if ((FLAGS_enable_api_kernel_fallback && kernel_iter == iter->second.end()) ||
phi::backends::xpu::is_in_xpu_black_list(TransToFluidOpName(kernel_name))
!phi::backends::xpu::is_xpu_support_op(TransToFluidOpName(kernel_name),
kernel_key.dtype())
#else
if ((FLAGS_enable_api_kernel_fallback && kernel_iter == iter->second.end())
#endif
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册