Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
Paddle
提交
27281e1f
P
Paddle
项目概览
PaddlePaddle
/
Paddle
1 年多 前同步成功
通知
2302
Star
20931
Fork
5422
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1423
列表
看板
标记
里程碑
合并请求
543
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1,423
Issue
1,423
列表
看板
标记
里程碑
合并请求
543
合并请求
543
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
27281e1f
编写于
2月 21, 2023
作者:
L
limingshu
提交者:
GitHub
2月 21, 2023
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Addition of marco for auto_tune_base.h (#50516)
上级
7fe44feb
变更
4
隐藏空白更改
内联
并排
Showing
4 changed file
with
83 addition
and
41 deletion
+83
-41
paddle/phi/kernels/autotune/auto_tune_base.h
paddle/phi/kernels/autotune/auto_tune_base.h
+42
-38
paddle/phi/kernels/autotune/cache.cc
paddle/phi/kernels/autotune/cache.cc
+1
-1
paddle/phi/kernels/autotune/cache_base.h
paddle/phi/kernels/autotune/cache_base.h
+2
-2
python/paddle/fluid/tests/unittests/test_transpose_op.py
python/paddle/fluid/tests/unittests/test_transpose_op.py
+38
-0
未找到文件。
paddle/phi/kernels/autotune/auto_tune_base.h
浏览文件 @
27281e1f
...
...
@@ -67,13 +67,8 @@ class AutoTuneBase {
const
AlgorithmType
&
algo
,
const
size_t
key
,
Args
&&
...
args
)
{
PADDLE_ENFORCE_GT
(
kernels_
.
size
(),
0
,
phi
::
errors
::
InvalidArgument
(
"kernel num must be greater than 0, now is %d"
,
kernels_
.
size
()));
is_init_
=
true
;
CheckKernelSize
();
auto
&
cache
=
AutoTuneCache
::
Instance
().
Get
(
algo
);
if
(
cache
.
Find
(
key
))
{
auto
best_idx
=
cache
.
Get
(
key
);
...
...
@@ -91,19 +86,22 @@ class AutoTuneBase {
}
}
pr
ivate
:
pr
otected
:
bool
is_init_
{
false
};
std
::
vector
<
KernelType
>
kernels_
;
mutable
std
::
mutex
mutex_
;
template
<
typename
Context
,
typename
...
Args
>
size_t
PickBestKernel
(
const
Context
&
ctx
,
Args
&&
...
args
)
{
std
::
lock_guard
<
std
::
mutex
>
lock
(
mutex_
);
void
CheckKernelSize
()
{
PADDLE_ENFORCE_GT
(
kernels_
.
size
(),
0
,
phi
::
errors
::
InvalidArgument
(
"kernel num must be greater than 0, now is %d"
,
kernels_
.
size
()));
}
template
<
typename
Context
,
typename
...
Args
>
size_t
PickBestKernel
(
const
Context
&
ctx
,
Args
&&
...
args
)
{
std
::
lock_guard
<
std
::
mutex
>
lock
(
mutex_
);
size_t
best_idx
=
0
;
float
min_time
=
std
::
numeric_limits
<
float
>::
max
();
...
...
@@ -143,36 +141,42 @@ class AutoTuneBase {
}
};
template
<
typename
T
,
typename
ReturnType
,
typename
...
Args
>
static
AutoTuneBase
<
T
,
KernelCallback
<
T
,
ReturnType
,
Args
...
>>
MakeAutoTuner
(
ReturnType
(
*
func
)(
Args
...))
{
auto
obj
=
MakeCallback
<
T
>
(
func
);
return
AutoTuneBase
<
T
,
decltype
(
obj
)
>
(
obj
);
}
template
<
typename
T
,
typename
ReturnType
,
typename
...
Args
>
class
TransposeAutoTuner
:
public
AutoTuneBase
<
T
,
KernelCallback
<
T
,
ReturnType
,
Args
...
>>
{
public:
static
AutoTuneBase
<
T
,
KernelCallback
<
T
,
ReturnType
,
Args
...
>>*
Instance
(
ReturnType
(
*
func
)(
Args
...))
{
static
std
::
once_flag
transpose_init_flag_
;
static
std
::
unique_ptr
<
AutoTuneBase
<
T
,
KernelCallback
<
T
,
ReturnType
,
Args
...
>>>
instance_
;
std
::
call_once
(
transpose_init_flag_
,
[
&
]
{
auto
obj
=
MakeCallback
<
T
>
(
func
);
instance_
.
reset
(
new
AutoTuneBase
<
T
,
decltype
(
obj
)
>
(
obj
));
});
return
instance_
.
get
();
// To init the auto_tuner object.
#define DEFINE_AUTOTUNER_COMMON_OBJ(name) \
template <typename T, typename ReturnType, typename... Args> \
class name##AutoTuner \
: public AutoTuneBase<T, KernelCallback<T, ReturnType, Args...>> { \
public: \
static name##AutoTuner<T, ReturnType, Args...>* Instance( \
ReturnType (*func)(Args...)) { \
static std::once_flag name##_init_flag; \
static std::unique_ptr<name##AutoTuner<T, ReturnType, Args...>> \
instance; \
std::call_once(name##_init_flag, [&] { \
auto obj = MakeCallback<T>(func); \
instance.reset(new name##AutoTuner<T, ReturnType, Args...>); \
instance->AddCallBack(func); \
}); \
return instance.get(); \
} \
};
// To init auto_tuner inital function.
#define DEFINE_AUTOTUNER_FN(name) \
template <typename T, typename ReturnType, typename... Args> \
static name##AutoTuner<T, ReturnType, Args...>* Make##name##Tuner( \
ReturnType (*func)(Args...)) { \
return name##AutoTuner<T, ReturnType, Args...>::Instance(func); \
}
};
template
<
typename
T
,
typename
ReturnType
,
typename
...
Args
>
static
AutoTuneBase
<
T
,
KernelCallback
<
T
,
ReturnType
,
Args
...
>>*
MakeTransposeTuner
(
ReturnType
(
*
func
)(
Args
...))
{
return
TransposeAutoTuner
<
T
,
ReturnType
,
Args
...
>::
Instance
(
func
);
}
#define DEFINE_AUTOTUNER(name) \
DEFINE_AUTOTUNER_COMMON_OBJ(name) DEFINE_AUTOTUNER_FN(name)
DEFINE_AUTOTUNER
(
Transpose
)
#undef DEFINE_AUTOTUNER_COMMON_OBJECT
#undef DEFINE_AUTOTUNER_FN
#undef DEFINE_AUTOTUNER
}
// namespace autotune
}
// namespace phi
paddle/phi/kernels/autotune/cache.cc
浏览文件 @
27281e1f
...
...
@@ -25,7 +25,7 @@ size_t TransposeKey(const std::vector<int64_t>& x_dims,
const
std
::
vector
<
int32_t
>&
perm
,
phi
::
DataType
dtype
)
{
const
auto
rank
=
perm
.
size
();
return
Ge
t
Key
(
x_dims
,
perm
,
rank
,
static_cast
<
int64_t
>
(
dtype
));
return
Ge
n
Key
(
x_dims
,
perm
,
rank
,
static_cast
<
int64_t
>
(
dtype
));
}
std
::
string
AlgorithmTypeString
(
int64_t
algo_type
)
{
...
...
paddle/phi/kernels/autotune/cache_base.h
浏览文件 @
27281e1f
...
...
@@ -54,7 +54,7 @@ namespace phi {
namespace
autotune
{
template
<
typename
...
Args
>
size_t
Ge
t
Key
(
Args
&&
...
args
)
{
size_t
Ge
n
Key
(
Args
&&
...
args
)
{
size_t
seed
=
0
;
HashCombine
(
&
seed
,
std
::
forward
<
Args
>
(
args
)...);
return
seed
;
...
...
@@ -79,7 +79,7 @@ struct ConvCacheKey {
groups
(
arg_groups
),
data_layout
(
arg_data_layout
)
{}
size_t
hash_value
()
const
{
return
Ge
t
Key
(
x_dims
,
return
Ge
n
Key
(
x_dims
,
w_dims
,
strides
,
paddings
,
...
...
python/paddle/fluid/tests/unittests/test_transpose_op.py
浏览文件 @
27281e1f
...
...
@@ -157,6 +157,44 @@ class TestAutoTuneTransposeOp(OpTest):
self
.
check_grad
([
'X'
],
'Out'
)
class
TestAutoTuneTransposeBF16Op
(
OpTest
):
def
setUp
(
self
):
self
.
init_op_type
()
self
.
initTestCase
()
self
.
dtype
=
np
.
uint16
self
.
python_api
=
paddle
.
transpose
x
=
np
.
random
.
random
(
self
.
shape
).
astype
(
"float32"
)
self
.
inputs
=
{
'X'
:
convert_float_to_uint16
(
x
)}
self
.
attrs
=
{
'axis'
:
list
(
self
.
axis
),
'use_mkldnn'
:
self
.
use_mkldnn
,
}
self
.
outputs
=
{
'XShape'
:
convert_float_to_uint16
(
np
.
random
.
random
(
self
.
shape
).
astype
(
"float32"
)
),
'Out'
:
self
.
inputs
[
'X'
].
transpose
(
self
.
axis
),
}
def
initTestCase
(
self
):
fluid
.
core
.
set_autotune_range
(
0
,
3
)
fluid
.
core
.
update_autotune_status
()
fluid
.
core
.
enable_autotune
()
self
.
shape
=
(
2
,
8
,
10
)
self
.
axis
=
(
0
,
2
,
1
)
def
init_op_type
(
self
):
self
.
op_type
=
"transpose2"
self
.
use_mkldnn
=
False
def
test_check_output
(
self
):
self
.
check_output
(
no_check_set
=
[
'XShape'
])
fluid
.
core
.
disable_autotune
()
def
test_check_grad
(
self
):
self
.
check_grad
([
'X'
],
'Out'
)
class
TestTransposeBF16Op
(
OpTest
):
def
setUp
(
self
):
self
.
init_op_type
()
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录