Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
702bce57
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
702bce57
编写于
2月 11, 2022
作者:
F
fwenguang
提交者:
GitHub
2月 11, 2022
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
[MLU] add pool2d and pool2d_grad mlu kernel (#39453)
上级
d25a7f9e
变更
3
显示空白变更内容
内联
并排
Showing
3 changed file
with
265 addition
and
14 deletion
+265
-14
paddle/fluid/operators/mlu/mlu_baseop.cc
paddle/fluid/operators/mlu/mlu_baseop.cc
+10
-9
paddle/fluid/operators/mlu/mlu_baseop.h
paddle/fluid/operators/mlu/mlu_baseop.h
+5
-5
paddle/fluid/operators/pool_op_mlu.cc
paddle/fluid/operators/pool_op_mlu.cc
+250
-0
未找到文件。
paddle/fluid/operators/mlu/mlu_baseop.cc
浏览文件 @
702bce57
...
@@ -224,11 +224,13 @@ MLUCnnlActivationDesc::~MLUCnnlActivationDesc() {
...
@@ -224,11 +224,13 @@ MLUCnnlActivationDesc::~MLUCnnlActivationDesc() {
MLUCnnlPoolingDesc
::
MLUCnnlPoolingDesc
(
MLUCnnlPoolingDesc
::
MLUCnnlPoolingDesc
(
const
cnnlPoolingMode_t
mode
,
const
cnnlNanPropagation_t
maxpooling_nan_opt
,
const
cnnlPoolingMode_t
mode
,
const
cnnlNanPropagation_t
maxpooling_nan_opt
,
int
window_rows
,
int
window_cols
,
int64_t
pad_up
,
int64_t
pad_down
,
int
window_rows
,
int
window_cols
,
int64_t
pad_up
,
int64_t
pad_down
,
int64_t
pad_left
,
int64_t
pad_right
,
int
row_stride
,
int
col_stride
)
{
int64_t
pad_left
,
int64_t
pad_right
,
int
row_stride
,
int
col_stride
,
int
row_dilation
,
int
col_dilation
,
bool
ceil_mode
)
{
PADDLE_ENFORCE_MLU_SUCCESS
(
cnnlCreatePoolingDescriptor
(
&
pooling_desc_
));
PADDLE_ENFORCE_MLU_SUCCESS
(
cnnlCreatePoolingDescriptor
(
&
pooling_desc_
));
PADDLE_ENFORCE_MLU_SUCCESS
(
cnnlSetPooling2dDescriptor
(
PADDLE_ENFORCE_MLU_SUCCESS
(
cnnlSetPooling2dDescriptor
_v2
(
pooling_desc_
,
mode
,
maxpooling_nan_opt
,
window_rows
,
window_cols
,
pad_up
,
pooling_desc_
,
mode
,
maxpooling_nan_opt
,
window_rows
,
window_cols
,
pad_up
,
pad_down
,
pad_left
,
pad_right
,
row_stride
,
col_stride
));
pad_down
,
pad_left
,
pad_right
,
row_stride
,
col_stride
,
row_dilation
,
col_dilation
,
ceil_mode
));
}
}
MLUCnnlPoolingDesc
::
MLUCnnlPoolingDesc
(
MLUCnnlPoolingDesc
::
MLUCnnlPoolingDesc
(
...
@@ -1125,17 +1127,16 @@ MLUCnnlTrigonDesc::~MLUCnnlTrigonDesc() {
...
@@ -1125,17 +1127,16 @@ MLUCnnlTrigonDesc::~MLUCnnlTrigonDesc() {
}
}
/* static */
void
MLUCnnl
::
PoolingForward
(
/* static */
void
MLUCnnl
::
PoolingForward
(
const
ExecutionContext
&
ctx
,
cnnlPoolingMode_t
pool_mode
,
const
ExecutionContext
&
ctx
,
cnnlPoolingMode_t
pool_mode
,
int64_t
output_h
,
const
std
::
vector
<
int64_t
>&
output_shape
,
int64_t
output_w
,
const
cnnlPoolingDescriptor_t
pooling_desc
,
const
cnnlPoolingDescriptor_t
pooling_desc
,
const
void
*
alpha
,
const
void
*
alpha
,
const
cnnlTensorDescriptor_t
input_desc
,
const
cnnlTensorDescriptor_t
input_desc
,
const
void
*
input
,
const
void
*
input
,
const
void
*
beta
,
const
void
*
extra_input_ptr
,
const
void
*
beta
,
const
void
*
extra_input_ptr
,
const
cnnlTensorDescriptor_t
output_desc
,
void
*
output
)
{
const
cnnlTensorDescriptor_t
output_desc
,
void
*
output
)
{
cnnlHandle_t
handle
=
GetHandleFromCTX
(
ctx
);
cnnlHandle_t
handle
=
GetHandleFromCTX
(
ctx
);
size_t
workspace_size
=
0
;
size_t
workspace_size
=
0
;
PADDLE_ENFORCE_MLU_SUCCESS
(
cnnlGetPoolingWorkspaceSize
(
PADDLE_ENFORCE_MLU_SUCCESS
(
cnnlGetPoolingWorkspaceSize
(
handle
,
pool_mode
,
output_
shape
[
2
],
output_shape
[
1
]
,
&
workspace_size
));
handle
,
pool_mode
,
output_
w
,
output_h
,
&
workspace_size
));
auto
&
dev_ctx
=
GetDevCtxFromCTX
(
ctx
);
auto
&
dev_ctx
=
GetDevCtxFromCTX
(
ctx
);
Tensor
workspace
=
ctx
.
AllocateTmpTensor
<
int8_t
,
MLUDeviceContext
>
(
Tensor
workspace
=
ctx
.
AllocateTmpTensor
<
int8_t
,
MLUDeviceContext
>
(
...
...
paddle/fluid/operators/mlu/mlu_baseop.h
浏览文件 @
702bce57
...
@@ -236,7 +236,8 @@ class MLUCnnlPoolingDesc {
...
@@ -236,7 +236,8 @@ class MLUCnnlPoolingDesc {
const
cnnlNanPropagation_t
maxpooling_nan_opt
,
const
cnnlNanPropagation_t
maxpooling_nan_opt
,
int
window_rows
,
int
window_cols
,
int64_t
pad_up
,
int
window_rows
,
int
window_cols
,
int64_t
pad_up
,
int64_t
pad_down
,
int64_t
pad_left
,
int64_t
pad_right
,
int64_t
pad_down
,
int64_t
pad_left
,
int64_t
pad_right
,
int
row_stride
,
int
col_stride
);
int
row_stride
,
int
col_stride
,
int
row_dilation
,
int
col_dilation
,
bool
ceil_mode
);
MLUCnnlPoolingDesc
(
const
cnnlPoolingMode_t
mode
,
MLUCnnlPoolingDesc
(
const
cnnlPoolingMode_t
mode
,
const
cnnlNanPropagation_t
maxpooling_nan_opt
,
const
cnnlNanPropagation_t
maxpooling_nan_opt
,
...
@@ -643,10 +644,9 @@ class MLUCnnl {
...
@@ -643,10 +644,9 @@ class MLUCnnl {
static
void
PoolingForward
(
static
void
PoolingForward
(
const
ExecutionContext
&
ctx
,
cnnlPoolingMode_t
pool_mode
,
const
ExecutionContext
&
ctx
,
cnnlPoolingMode_t
pool_mode
,
const
std
::
vector
<
int64_t
>&
output_shape
,
int64_t
output_h
,
int64_t
output_w
,
cnnlPoolingDescriptor_t
pooling_desc
,
cnnlPoolingDescriptor_t
pooling_desc
,
const
void
*
alpha
,
const
void
*
alpha
,
const
cnnlTensorDescriptor_t
input_desc
,
const
cnnlTensorDescriptor_t
input_desc
,
const
void
*
input
,
const
void
*
input
,
const
void
*
beta
,
const
void
*
extra_input_ptr
,
const
void
*
beta
,
const
void
*
extra_input_ptr
,
const
cnnlTensorDescriptor_t
output_desc
,
void
*
output
);
const
cnnlTensorDescriptor_t
output_desc
,
void
*
output
);
static
void
Pool3D
(
const
ExecutionContext
&
ctx
,
cnnlPoolingMode_t
pool_mode
,
static
void
Pool3D
(
const
ExecutionContext
&
ctx
,
cnnlPoolingMode_t
pool_mode
,
...
...
paddle/fluid/operators/pool_op_mlu.cc
0 → 100644
浏览文件 @
702bce57
/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/pool_op.h"
#include "paddle/fluid/operators/mlu/mlu_baseop.h"
namespace
paddle
{
namespace
operators
{
namespace
{
cnnlPoolingMode_t
ToCnnlPoolingMode
(
const
std
::
string
&
pooling_type
,
bool
exclusive
)
{
cnnlPoolingMode_t
pooling_mode
;
if
(
pooling_type
==
"max"
)
{
pooling_mode
=
CNNL_POOLING_MAX
;
}
else
if
(
pooling_type
==
"avg"
)
{
if
(
exclusive
)
{
pooling_mode
=
CNNL_POOLING_AVERAGE_COUNT_EXCLUDE_PADDING
;
}
else
{
pooling_mode
=
CNNL_POOLING_AVERAGE_COUNT_INCLUDE_PADDING
;
}
}
else
{
PADDLE_THROW
(
platform
::
errors
::
InvalidArgument
(
"Unknown pooling_type: %s"
,
pooling_type
));
}
return
pooling_mode
;
}
}
// namespace
template
<
typename
T
>
class
MLUPoolOpKernel
:
public
framework
::
OpKernel
<
T
>
{
public:
void
Compute
(
const
framework
::
ExecutionContext
&
ctx
)
const
override
{
auto
&
dev_ctx
=
ctx
.
template
device_context
<
platform
::
MLUDeviceContext
>();
const
Tensor
*
in_x
=
ctx
.
Input
<
Tensor
>
(
"X"
);
Tensor
*
out
=
ctx
.
Output
<
Tensor
>
(
"Out"
);
out
->
mutable_data
<
T
>
(
ctx
.
GetPlace
());
std
::
string
pooling_type
=
ctx
.
Attr
<
std
::
string
>
(
"pooling_type"
);
std
::
vector
<
int
>
ksize
=
ctx
.
Attr
<
std
::
vector
<
int
>>
(
"ksize"
);
std
::
vector
<
int
>
strides
=
ctx
.
Attr
<
std
::
vector
<
int
>>
(
"strides"
);
std
::
vector
<
int
>
paddings
=
ctx
.
Attr
<
std
::
vector
<
int
>>
(
"paddings"
);
std
::
string
data_format
=
ctx
.
Attr
<
std
::
string
>
(
"data_format"
);
bool
global_pooling
=
ctx
.
Attr
<
bool
>
(
"global_pooling"
);
bool
ceil_mode
=
ctx
.
Attr
<
bool
>
(
"ceil_mode"
);
bool
exclusive
=
ctx
.
Attr
<
bool
>
(
"exclusive"
);
bool
adaptive
=
ctx
.
Attr
<
bool
>
(
"adaptive"
);
std
::
string
padding_algorithm
=
ctx
.
Attr
<
std
::
string
>
(
"padding_algorithm"
);
PADDLE_ENFORCE_EQ
(
in_x
->
dims
().
size
(),
4
,
platform
::
errors
::
InvalidArgument
(
"Only support 4-dims for mlu pool2d kernel."
));
PADDLE_ENFORCE_EQ
(
adaptive
,
false
,
platform
::
errors
::
InvalidArgument
(
"Not support adaptive for mlu pool2d kernel."
));
// default
cnnlTensorLayout_t
cnnl_layout
=
CNNL_LAYOUT_NCHW
;
auto
out_dims
=
out
->
dims
();
int64_t
out_h
=
out_dims
[
2
];
int64_t
out_w
=
out_dims
[
3
];
auto
in_x_dims
=
in_x
->
dims
();
framework
::
DDim
data_dims
=
framework
::
slice_ddim
(
in_x_dims
,
2
,
in_x_dims
.
size
());
const
bool
channel_last
=
data_format
==
"NHWC"
;
if
(
channel_last
)
{
cnnl_layout
=
CNNL_LAYOUT_NHWC
;
out_h
=
out_dims
[
1
];
out_w
=
out_dims
[
2
];
data_dims
=
framework
::
slice_ddim
(
in_x_dims
,
1
,
in_x_dims
.
size
()
-
1
);
}
UpdatePadding
(
&
paddings
,
global_pooling
,
adaptive
,
padding_algorithm
,
data_dims
,
strides
,
ksize
);
if
(
global_pooling
)
{
UpdateKsize
(
&
ksize
,
data_dims
);
}
MLUCnnlTensorDesc
in_x_desc
(
*
in_x
,
cnnl_layout
,
ToCnnlDataType
<
T
>
());
MLUCnnlTensorDesc
out_desc
(
*
out
,
cnnl_layout
,
ToCnnlDataType
<
T
>
());
cnnlPoolingMode_t
pool_mode
=
ToCnnlPoolingMode
(
pooling_type
,
exclusive
);
MLUCnnlPoolingDesc
pool_desc
(
pool_mode
,
CNNL_NOT_PROPAGATE_NAN
,
ksize
[
0
],
ksize
[
1
],
paddings
[
0
],
paddings
[
1
],
paddings
[
2
],
paddings
[
3
],
strides
[
0
],
strides
[
1
],
1
/*row_dilation*/
,
1
/*col_dilation*/
,
ceil_mode
);
size_t
extra_input_size
=
0
;
cnnlHandle_t
handle
=
ctx
.
template
device_context
<
MLUDeviceContext
>().
cnnl_handle
();
cnnlGetPoolingExtraInputSize
(
handle
,
pool_mode
,
out_w
,
out_h
,
&
extra_input_size
);
if
(
extra_input_size
>
0
)
{
paddle
::
platform
::
CPUDeviceContext
cpu_ctx
;
framework
::
Tensor
extra_host_tensor
=
ctx
.
AllocateTmpTensor
<
int8_t
,
platform
::
CPUDeviceContext
>
(
{
static_cast
<
int64_t
>
(
extra_input_size
)},
cpu_ctx
);
cnnlInitPoolingExtraInput
(
handle
,
pool_desc
.
get
(),
in_x_desc
.
get
(),
out_desc
.
get
(),
GetBasePtr
(
&
extra_host_tensor
));
framework
::
Tensor
extra_device_tensor
=
ctx
.
AllocateTmpTensor
<
int8_t
,
MLUDeviceContext
>
(
{
static_cast
<
int64_t
>
(
extra_input_size
)},
dev_ctx
);
// TODO(fwg): use Async copy, and add a callback to stream that free host
// memory.
framework
::
TensorCopySync
(
extra_host_tensor
,
ctx
.
GetPlace
(),
&
extra_device_tensor
);
MLUCnnl
::
PoolingForward
(
ctx
,
pool_mode
,
out_h
,
out_w
,
pool_desc
.
get
(),
nullptr
/*alpha*/
,
in_x_desc
.
get
(),
GetBasePtr
(
in_x
),
nullptr
/*beta*/
,
GetBasePtr
(
&
extra_device_tensor
)
/*params_shape_ptr*/
,
out_desc
.
get
(),
GetBasePtr
(
out
));
}
else
{
MLUCnnl
::
PoolingForward
(
ctx
,
pool_mode
,
out_h
,
out_w
,
pool_desc
.
get
(),
nullptr
/*alpha*/
,
in_x_desc
.
get
(),
GetBasePtr
(
in_x
),
nullptr
/*beta*/
,
nullptr
/*params_shape_ptr*/
,
out_desc
.
get
(),
GetBasePtr
(
out
));
}
}
};
template
<
typename
T
,
typename
IDX_T
>
class
MLUPoolGradOpKernel
:
public
framework
::
OpKernel
<
T
>
{
public:
void
Compute
(
const
framework
::
ExecutionContext
&
ctx
)
const
override
{
auto
&
dev_ctx
=
ctx
.
template
device_context
<
platform
::
MLUDeviceContext
>();
const
Tensor
*
in_x
=
ctx
.
Input
<
Tensor
>
(
"X"
);
const
Tensor
*
out
=
ctx
.
Input
<
Tensor
>
(
"Out"
);
const
Tensor
*
out_grad
=
ctx
.
Input
<
Tensor
>
(
framework
::
GradVarName
(
"Out"
));
Tensor
*
in_x_grad
=
ctx
.
Output
<
Tensor
>
(
framework
::
GradVarName
(
"X"
));
in_x_grad
->
mutable_data
<
T
>
(
ctx
.
GetPlace
());
std
::
string
pooling_type
=
ctx
.
Attr
<
std
::
string
>
(
"pooling_type"
);
std
::
vector
<
int
>
ksize
=
ctx
.
Attr
<
std
::
vector
<
int
>>
(
"ksize"
);
std
::
vector
<
int
>
strides
=
ctx
.
Attr
<
std
::
vector
<
int
>>
(
"strides"
);
std
::
vector
<
int
>
paddings
=
ctx
.
Attr
<
std
::
vector
<
int
>>
(
"paddings"
);
bool
ceil_mode
=
ctx
.
Attr
<
bool
>
(
"ceil_mode"
);
bool
exclusive
=
ctx
.
Attr
<
bool
>
(
"exclusive"
);
bool
adaptive
=
ctx
.
Attr
<
bool
>
(
"adaptive"
);
std
::
string
data_format
=
ctx
.
Attr
<
std
::
string
>
(
"data_format"
);
bool
global_pooling
=
ctx
.
Attr
<
bool
>
(
"global_pooling"
);
std
::
string
padding_algorithm
=
ctx
.
Attr
<
std
::
string
>
(
"padding_algorithm"
);
const
bool
channel_last
=
data_format
==
"NHWC"
;
auto
in_x_dims
=
in_x
->
dims
();
framework
::
DDim
data_dims
=
framework
::
slice_ddim
(
in_x_dims
,
2
,
in_x_dims
.
size
());
if
(
channel_last
)
{
data_dims
=
framework
::
slice_ddim
(
in_x_dims
,
1
,
in_x_dims
.
size
()
-
1
);
}
UpdatePadding
(
&
paddings
,
global_pooling
,
adaptive
,
padding_algorithm
,
data_dims
,
strides
,
ksize
);
if
(
global_pooling
)
{
UpdateKsize
(
&
ksize
,
data_dims
);
}
// inputs need with NHWC layout
framework
::
Tensor
trans_in_x
;
framework
::
Tensor
trans_out
;
framework
::
Tensor
trans_out_grad
;
framework
::
Tensor
trans_in_x_grad
;
if
(
channel_last
)
{
trans_in_x
=
*
in_x
;
trans_out
=
*
out
;
trans_out_grad
=
*
out_grad
;
trans_in_x_grad
=
*
in_x_grad
;
}
else
{
std
::
vector
<
int
>
perm
{
0
,
2
,
3
,
1
};
TransposeFromMLUTensor
<
T
>
(
ctx
,
perm
,
in_x
,
&
trans_in_x
,
true
/*need_reshape_or_alloc*/
);
TransposeFromMLUTensor
<
T
>
(
ctx
,
perm
,
out
,
&
trans_out
,
true
/*need_reshape_or_alloc*/
);
TransposeFromMLUTensor
<
T
>
(
ctx
,
perm
,
out_grad
,
&
trans_out_grad
,
true
/*need_reshape_or_alloc*/
);
auto
in_x_grad_dims
=
in_x_grad
->
dims
();
trans_in_x_grad
=
ctx
.
AllocateTmpTensor
<
T
,
MLUDeviceContext
>
(
{
in_x_grad_dims
[
0
],
in_x_grad_dims
[
2
],
in_x_grad_dims
[
3
],
in_x_grad_dims
[
1
]},
dev_ctx
);
}
MLUCnnlTensorDesc
trans_in_x_desc
(
trans_in_x
,
CNNL_LAYOUT_NHWC
,
ToCnnlDataType
<
T
>
());
MLUCnnlTensorDesc
trans_out_desc
(
trans_out
,
CNNL_LAYOUT_NHWC
,
ToCnnlDataType
<
T
>
());
MLUCnnlTensorDesc
trans_out_grad_desc
(
trans_out_grad
,
CNNL_LAYOUT_NHWC
,
ToCnnlDataType
<
T
>
());
MLUCnnlTensorDesc
trans_in_x_grad_desc
(
trans_in_x_grad
,
CNNL_LAYOUT_NHWC
,
ToCnnlDataType
<
T
>
());
cnnlPoolingMode_t
pool_mode
=
ToCnnlPoolingMode
(
pooling_type
,
exclusive
);
MLUCnnlPoolingDesc
pool_desc
(
pool_mode
,
CNNL_NOT_PROPAGATE_NAN
,
ksize
[
0
],
ksize
[
1
],
paddings
[
0
],
paddings
[
1
],
paddings
[
2
],
paddings
[
3
],
strides
[
0
],
strides
[
1
],
1
/*row_dilation*/
,
1
/*col_dilation*/
,
ceil_mode
);
if
(
pooling_type
==
"max"
)
{
framework
::
Tensor
index_tensor
=
ctx
.
AllocateTmpTensor
<
IDX_T
,
MLUDeviceContext
>
(
trans_out_grad
.
dims
(),
dev_ctx
);
MLUCnnlTensorDesc
index_tensor_desc
(
index_tensor
,
CNNL_LAYOUT_NHWC
,
ToCnnlDataType
<
IDX_T
>
());
MLUCnnl
::
PoolingIndex
(
ctx
,
pool_desc
.
get
(),
trans_in_x_desc
.
get
(),
GetBasePtr
(
&
trans_in_x
),
index_tensor_desc
.
get
(),
GetBasePtr
(
&
index_tensor
));
MLUCnnl
::
PoolingBackward
(
ctx
,
pool_desc
.
get
(),
nullptr
/*alpha*/
,
index_tensor_desc
.
get
(),
GetBasePtr
(
&
index_tensor
),
trans_out_grad_desc
.
get
(),
GetBasePtr
(
&
trans_out_grad
),
trans_in_x_desc
.
get
(),
GetBasePtr
(
&
trans_in_x
),
nullptr
/*beta*/
,
trans_in_x_grad_desc
.
get
(),
GetBasePtr
(
&
trans_in_x_grad
));
}
else
{
MLUCnnl
::
PoolingBackward
(
ctx
,
pool_desc
.
get
(),
nullptr
/*alpha*/
,
nullptr
,
nullptr
,
trans_out_grad_desc
.
get
(),
GetBasePtr
(
&
trans_out_grad
),
nullptr
,
nullptr
,
nullptr
/*beta*/
,
trans_in_x_grad_desc
.
get
(),
GetBasePtr
(
&
trans_in_x_grad
));
}
if
(
!
channel_last
)
{
std
::
vector
<
int
>
perm
{
0
,
3
,
1
,
2
};
TransposeFromMLUTensor
<
T
>
(
ctx
,
perm
,
&
trans_in_x_grad
,
in_x_grad
,
false
/*need_reshape_or_alloc*/
);
}
}
};
}
// namespace operators
}
// namespace paddle
namespace
ops
=
paddle
::
operators
;
namespace
plat
=
paddle
::
platform
;
REGISTER_OP_MLU_KERNEL
(
pool2d
,
ops
::
MLUPoolOpKernel
<
float
>
,
ops
::
MLUPoolOpKernel
<
plat
::
float16
>
);
REGISTER_OP_MLU_KERNEL
(
pool2d_grad
,
ops
::
MLUPoolGradOpKernel
<
float
,
int
>
,
ops
::
MLUPoolGradOpKernel
<
plat
::
float16
,
int16_t
>
);
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录