未验证 提交 be9cb946 编写于 作者: S Sonder 提交者: GitHub

Remove has_structed_kerenl and has_fluid_kernel (#56779)

* remove has_structed_kerenl and has_fluid_kernel

* add test_fused_layernorm_op to STATIC_BUILD_TESTS list

* open static_build flag

* remove distributed_fused_lamb_init from StaticBuildBlackList

* use initialized replacing IsInitialized

* recover codes

* delete useless codes

* close the flag
上级 201480d5
...@@ -47,8 +47,6 @@ std::set<std::string> StaticBuildBlackList = { ...@@ -47,8 +47,6 @@ std::set<std::string> StaticBuildBlackList = {
"cinn_launch" /*: to handle subgraph infermeta*/, "cinn_launch" /*: to handle subgraph infermeta*/,
"run_program" /*: to handle scope output*/, "run_program" /*: to handle scope output*/,
"sparse_sparse_coo_tensor" /*: to handle sparse output*/, "sparse_sparse_coo_tensor" /*: to handle sparse output*/,
"shuffle_batch",
"shuffle_batch_grad",
"distributed_fused_lamb_init"}; "distributed_fused_lamb_init"};
namespace paddle { namespace paddle {
...@@ -60,8 +58,6 @@ bool BlockCanBeStaticBuilt(const framework::BlockDesc& block) { ...@@ -60,8 +58,6 @@ bool BlockCanBeStaticBuilt(const framework::BlockDesc& block) {
// is_operator_base = (kernelCode >> 6) & 1 // is_operator_base = (kernelCode >> 6) & 1
// is_custom_op = (kernelCode >> 5) & 1 // is_custom_op = (kernelCode >> 5) & 1
// use_mkldnn = (kernelCode >> 4) & 1 // use_mkldnn = (kernelCode >> 4) & 1
// has_fluid_kernel = (kernelCode >> 3) & 1
// has_structed_kernel = (kernelCode >> 2) & 1
using KernelCode = int8_t; using KernelCode = int8_t;
std::set<std::pair<std::string, KernelCode>> invalid_ops; std::set<std::pair<std::string, KernelCode>> invalid_ops;
for (auto& op : block.AllOps()) { for (auto& op : block.AllOps()) {
...@@ -81,13 +77,11 @@ bool BlockCanBeStaticBuilt(const framework::BlockDesc& block) { ...@@ -81,13 +77,11 @@ bool BlockCanBeStaticBuilt(const framework::BlockDesc& block) {
use_mkldnn = attr.index() == 1 ? PADDLE_GET_CONST(int, attr) use_mkldnn = attr.index() == 1 ? PADDLE_GET_CONST(int, attr)
: PADDLE_GET_CONST(bool, attr); : PADDLE_GET_CONST(bool, attr);
} }
bool has_fluid_kernel = OperatorWithKernel::AllOpKernels().count(op_type);
bool has_structured_kernel = bool has_structured_kernel =
phi::KernelFactory::Instance().HasStructuredKernel(op_type); phi::KernelFactory::Instance().HasStructuredKernel(op_type);
KernelCode kernel_code = (in_black_list << 7) + (is_operator_base << 6) + KernelCode kernel_code = (in_black_list << 7) + (is_operator_base << 6) +
(is_custom_op << 5) + (use_mkldnn << 4) + (is_custom_op << 5) + (use_mkldnn << 4) +
(has_fluid_kernel << 3) +
(has_structured_kernel << 2); (has_structured_kernel << 2);
if (!OpsCanSkipedFakeAllocInStaticBuild.count(op_type)) { if (!OpsCanSkipedFakeAllocInStaticBuild.count(op_type)) {
if (in_black_list || if (in_black_list ||
...@@ -107,8 +101,7 @@ bool BlockCanBeStaticBuilt(const framework::BlockDesc& block) { ...@@ -107,8 +101,7 @@ bool BlockCanBeStaticBuilt(const framework::BlockDesc& block) {
<< ", is_operator_base = " << (item.second >> 6 & 1) << ", is_operator_base = " << (item.second >> 6 & 1)
<< ", is_custom_op = " << (item.second >> 5 & 1) << ", is_custom_op = " << (item.second >> 5 & 1)
<< ", use_mkldnn = " << (item.second >> 4 & 1) << ", use_mkldnn = " << (item.second >> 4 & 1)
<< ", has_fluid_kernel = " << (item.second >> 3 & 1) << (item.second >> 2 & 1) << "]\n";
<< ", has_structed_kerenl = " << (item.second >> 2 & 1) << "]\n";
} }
VLOG(1) << ss.str(); VLOG(1) << ss.str();
} }
......
...@@ -1290,6 +1290,7 @@ set(STATIC_BUILD_TESTS ...@@ -1290,6 +1290,7 @@ set(STATIC_BUILD_TESTS
test_fuse_bn_act_pass test_fuse_bn_act_pass
test_fused_feedforward_op test_fused_feedforward_op
test_fused_feedforward_pass test_fused_feedforward_pass
test_fused_layernorm_op
test_imperative_optimizer test_imperative_optimizer
test_lamb_op test_lamb_op
test_layer_norm_op test_layer_norm_op
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册