未验证 提交 be41d276 编写于 作者: Z zhupengyang 提交者: GitHub

[NPU] support relu6, conv with relu6, pool with ceil_mode (#3296)

上级 c23f5cb1
......@@ -220,6 +220,8 @@ int ConvConverter(void* ctx, OpLite* op, KernelBase* kernel) {
act_op->set_attr_mode(CvtActMode(act_type));
if (act_type == "leaky_relu") {
act_op->set_attr_negative_slope(leaky_relu_alpha);
} else if (act_type == "relu6") {
act_op->set_attr_coef(6.f);
}
}
......
......@@ -18,6 +18,7 @@ USE_SUBGRAPH_BRIDGE(sigmoid, kNPU);
USE_SUBGRAPH_BRIDGE(relu, kNPU);
USE_SUBGRAPH_BRIDGE(tanh, kNPU);
USE_SUBGRAPH_BRIDGE(relu_clipped, kNPU);
USE_SUBGRAPH_BRIDGE(relu6, kNPU);
USE_SUBGRAPH_BRIDGE(leaky_relu, kNPU);
USE_SUBGRAPH_BRIDGE(softsign, kNPU);
USE_SUBGRAPH_BRIDGE(hard_sigmoid, kNPU);
......
......@@ -99,10 +99,8 @@ int PoolConverter(void* ctx, OpLite* op, KernelBase* kernel) {
ksize);
// ceil mode
int ceil_mode = 0;
if (op_info->HasAttr("ceil_mode")) {
ceil_mode = op_info->GetAttr<bool>("ceil_mode") ? 1 : 0;
}
bool ceil_mode =
op_info->HasAttr("ceil_mode") && op_info->GetAttr<bool>("ceil_mode");
// Pooling node
auto pool_node = graph->Add<ge::op::Pooling>(out_name);
......@@ -112,12 +110,14 @@ int PoolConverter(void* ctx, OpLite* op, KernelBase* kernel) {
pool_op->set_attr_pad_mode(pad_mode);
pool_op->set_attr_global_pooling(global_pooling);
pool_op->set_attr_window(ge::AttrValue::LIST_INT(ksize.begin(), ksize.end()));
pool_op->set_attr_pad(ge::AttrValue::LIST_INT{
paddings[0], paddings[1], paddings[2], paddings[3]});
pool_op->set_attr_pad(
ge::AttrValue::LIST_INT(paddings.begin(), paddings.end()));
pool_op->set_attr_stride(
ge::AttrValue::LIST_INT(strides.begin(), strides.end()));
pool_op->set_attr_ceil_mode(ceil_mode);
// pool_op->set_attr_data_mode(data_mode);
if (ceil_mode) {
pool_op->set_attr_ceil_mode(1);
pool_op->set_attr_data_mode(0);
}
return REBUILD_WHEN_SHAPE_CHANGED;
}
......
......@@ -425,19 +425,24 @@ TEST(Activation_swish, precision) {
TEST(Activation_relu6, precision) {
LOG(INFO) << "test relu6 op...";
#ifdef LITE_WITH_ARM
Place place(TARGET(kARM));
Place place;
float abs_error = 2e-5;
#if defined(LITE_WITH_NPU)
place = TARGET(kNPU);
abs_error = 1e-2; // Using fp16 in NPU
#elif defined(LITE_WITH_ARM)
place = TARGET(kARM);
#else
return;
#endif
for (auto dims : std::vector<std::vector<int64_t>>{
{1, 3, 2, 4}, {2, 3, 4}, {5, 4}, {8}}) {
for (auto slope : {0.01, 0.1}) {
std::unique_ptr<arena::TestCase> tester(new ActivationComputeTester(
place, "def", 0.01, 6., "all", 0., DDim(dims), "relu6", RELU6));
arena::Arena arena(std::move(tester), place, 2e-5);
arena.TestPrecision();
}
std::unique_ptr<arena::TestCase> tester(new ActivationComputeTester(
place, "def", 0.01, 6., "all", 0., DDim(dims), "relu6", RELU6));
arena::Arena arena(std::move(tester), place, abs_error);
arena.TestPrecision();
}
#endif
}
TEST(Activation_log, precision) {
......
......@@ -276,9 +276,24 @@ void TestPoolHelper(Place place,
std::string pooling_type,
std::vector<int> strides,
std::vector<int> paddings,
std::vector<int> ksize) {
std::unique_ptr<arena::TestCase> tester(new PoolComputeTest(
place, "def", DDim(dims), pooling_type, false, strides, paddings, ksize));
std::vector<int> ksize,
bool exclusive = true,
bool ceil_mode = false,
bool adaptive = false,
std::string padding_algorithm = "") {
std::unique_ptr<arena::TestCase> tester(
new PoolComputeTest(place,
"def",
DDim(dims),
pooling_type,
false,
strides,
paddings,
ksize,
exclusive,
ceil_mode,
adaptive,
padding_algorithm));
arena::Arena arena(std::move(tester), place, abs_error);
arena.TestPrecision();
}
......@@ -345,6 +360,20 @@ void TestPoolKsize(Place place, float abs_error = 2e-5) {
}
}
void TestPoolCeilMode(Place place, float abs_error = 2e-5) {
for (auto pooling_type : {"max", "avg"}) {
TestPoolHelper(place,
abs_error,
{2, 3, 6, 6},
pooling_type,
{2, 2},
{0, 0, 0, 0},
{3, 3},
true,
true);
}
}
TEST(Pool, precision) {
LOG(INFO) << "test pool op";
float abs_error = 2e-5;
......@@ -363,6 +392,7 @@ TEST(Pool, precision) {
TestPoolStrides(place, abs_error);
TestPoolPaddings(place, abs_error);
TestPoolKsize(place, abs_error);
TestPoolCeilMode(place, abs_error);
}
} // namespace lite
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册