Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
2cd5ee9c
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
2cd5ee9c
编写于
9月 03, 2020
作者:
石
石晓伟
提交者:
GitHub
9月 03, 2020
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Revert Add mkldnn bfloat16 option to C-API (#26676) (#26913)
上级
2697f72b
变更
11
隐藏空白更改
内联
并排
Showing
11 changed file
with
2 addition
and
113 deletion
+2
-113
go/paddle/config.go
go/paddle/config.go
+0
-7
paddle/fluid/inference/api/analysis_config.cc
paddle/fluid/inference/api/analysis_config.cc
+0
-18
paddle/fluid/inference/api/analysis_predictor_tester.cc
paddle/fluid/inference/api/analysis_predictor_tester.cc
+0
-21
paddle/fluid/inference/api/paddle_analysis_config.h
paddle/fluid/inference/api/paddle_analysis_config.h
+0
-14
paddle/fluid/inference/api/paddle_pass_builder.cc
paddle/fluid/inference/api/paddle_pass_builder.cc
+0
-12
paddle/fluid/inference/api/paddle_pass_builder.h
paddle/fluid/inference/api/paddle_pass_builder.h
+0
-11
paddle/fluid/inference/capi/paddle_c_api.h
paddle/fluid/inference/capi/paddle_c_api.h
+0
-6
paddle/fluid/inference/capi/pd_config.cc
paddle/fluid/inference/capi/pd_config.cc
+0
-12
paddle/fluid/inference/tests/api/analyzer_capi_gpu_tester.cc
paddle/fluid/inference/tests/api/analyzer_capi_gpu_tester.cc
+0
-3
paddle/fluid/inference/tests/api/analyzer_capi_tester.cc
paddle/fluid/inference/tests/api/analyzer_capi_tester.cc
+0
-3
paddle/fluid/pybind/inference_api.cc
paddle/fluid/pybind/inference_api.cc
+2
-6
未找到文件。
go/paddle/config.go
浏览文件 @
2cd5ee9c
...
...
@@ -154,17 +154,10 @@ func (config *AnalysisConfig) EnableMkldnnQuantizer() {
C
.
PD_EnableMkldnnQuantizer
(
config
.
c
)
}
func
(
config
*
AnalysisConfig
)
EnableMkldnnBfloat16
()
{
C
.
PD_EnableMkldnnBfloat16
(
config
.
c
)
}
func
(
config
*
AnalysisConfig
)
MkldnnQuantizerEnabled
()
bool
{
return
ConvertCBooleanToGo
(
C
.
PD_MkldnnQuantizerEnabled
(
config
.
c
))
}
func
(
config
*
AnalysisConfig
)
MkldnnBfloat16Enabled
()
bool
{
return
ConvertCBooleanToGo
(
C
.
PD_MkldnnBfloat16Enabled
(
config
.
c
))
}
// SetModelBuffer
// ModelFromMemory
...
...
paddle/fluid/inference/api/analysis_config.cc
浏览文件 @
2cd5ee9c
...
...
@@ -217,17 +217,6 @@ void AnalysisConfig::EnableMkldnnQuantizer() {
Update
();
}
void
AnalysisConfig
::
EnableMkldnnBfloat16
()
{
#ifdef PADDLE_WITH_MKLDNN
use_mkldnn_bfloat16_
=
true
;
#else
LOG
(
ERROR
)
<<
"Please compile with MKLDNN first to use MkldnnBfloat16"
;
use_mkldnn_bfloat16_
=
false
;
#endif
Update
();
}
MkldnnQuantizerConfig
*
AnalysisConfig
::
mkldnn_quantizer_config
()
const
{
PADDLE_ENFORCE_NOT_NULL
(
mkldnn_quantizer_config_
,
"MkldnnQuantizer was not enabled yet."
);
...
...
@@ -341,12 +330,6 @@ void AnalysisConfig::Update() {
#endif
}
if
(
use_mkldnn_bfloat16_
)
{
#ifdef PADDLE_WITH_MKLDNN
pass_builder
()
->
EnableMkldnnBfloat16
();
#endif
}
#ifdef PADDLE_WITH_MKLDNN
// Do not optimize when mkldnn is on
if
(
enable_memory_optim_
&&
!
use_mkldnn_
)
{
...
...
@@ -415,7 +398,6 @@ std::string AnalysisConfig::SerializeInfoCache() {
ss
<<
";"
;
ss
<<
use_mkldnn_quantizer_
;
ss
<<
use_mkldnn_bfloat16_
;
ss
<<
model_from_memory_
;
ss
<<
with_profile_
;
...
...
paddle/fluid/inference/api/analysis_predictor_tester.cc
浏览文件 @
2cd5ee9c
...
...
@@ -485,25 +485,4 @@ TEST_F(MkldnnQuantizerTest, kl_scaling_factor_unsigned) {
}
#endif
#ifdef PADDLE_WITH_CUDA
TEST
(
AnalysisPredictor
,
bf16_gpu_pass_strategy
)
{
AnalysisConfig
config
;
config
.
SetModel
(
FLAGS_dirname
);
config
.
SwitchIrOptim
(
true
);
config
.
EnableUseGpu
(
100
,
0
);
config
.
EnableMkldnnBfloat16
();
#ifdef PADDLE_WITH_MKLDNN
ASSERT_EQ
(
config
.
mkldnn_bfloat16_enabled
(),
true
);
#else
ASSERT_EQ
(
config
.
mkldnn_bfloat16_enabled
(),
false
);
#endif
}
#endif
TEST
(
AnalysisPredictor
,
bf16_pass_strategy
)
{
std
::
vector
<
std
::
string
>
passes
;
PassStrategy
passStrategy
(
passes
);
passStrategy
.
EnableMkldnnBfloat16
();
}
}
// namespace paddle
paddle/fluid/inference/api/paddle_analysis_config.h
浏览文件 @
2cd5ee9c
...
...
@@ -401,19 +401,6 @@ struct PD_INFER_DECL AnalysisConfig {
///
void
EnableMkldnnQuantizer
();
///
/// \brief Turn on MKLDNN bfloat16.
///
///
void
EnableMkldnnBfloat16
();
///
/// \brief A boolean state telling whether to use the MKLDNN Bfloat16.
///
/// \return bool Whether to use the MKLDNN Bfloat16.
///
bool
mkldnn_bfloat16_enabled
()
const
{
return
use_mkldnn_bfloat16_
;
}
///
/// \brief A boolean state telling whether the thread local CUDA stream is
/// enabled.
...
...
@@ -605,7 +592,6 @@ struct PD_INFER_DECL AnalysisConfig {
int
mkldnn_cache_capacity_
{
0
};
bool
use_mkldnn_quantizer_
{
false
};
std
::
shared_ptr
<
MkldnnQuantizerConfig
>
mkldnn_quantizer_config_
;
bool
use_mkldnn_bfloat16_
{
false
};
// If the config is already used on a predictor, it becomes invalid.
// Any config can only be used with one predictor.
...
...
paddle/fluid/inference/api/paddle_pass_builder.cc
浏览文件 @
2cd5ee9c
...
...
@@ -143,10 +143,6 @@ void GpuPassStrategy::EnableMkldnnQuantizer() {
LOG
(
ERROR
)
<<
"GPU not support MKL-DNN quantization"
;
}
void
GpuPassStrategy
::
EnableMkldnnBfloat16
()
{
LOG
(
ERROR
)
<<
"GPU not support MKL-DNN bfloat16"
;
}
CpuPassStrategy
::
CpuPassStrategy
()
:
PassStrategy
({})
{
// NOTE the large fusions should be located in the front, so that they will
// not be damaged by smaller ones.
...
...
@@ -229,12 +225,4 @@ void CpuPassStrategy::EnableMkldnnQuantizer() {
#endif
}
void
CpuPassStrategy
::
EnableMkldnnBfloat16
()
{
#ifdef PADDLE_WITH_MKLDNN
use_mkldnn_bfloat16_
=
true
;
#else
use_mkldnn_bfloat16_
=
false
;
#endif
}
}
// namespace paddle
paddle/fluid/inference/api/paddle_pass_builder.h
浏览文件 @
2cd5ee9c
...
...
@@ -132,9 +132,6 @@ class PD_INFER_DECL PassStrategy : public PaddlePassBuilder {
/// \brief Enable MKLDNN quantize optimization.
virtual
void
EnableMkldnnQuantizer
()
{}
/// \brief Enable MKLDNN bfloat16.
virtual
void
EnableMkldnnBfloat16
()
{}
/// \brief Check if we are using gpu.
/// \return A bool variable implying whether we are in gpu mode.
bool
use_gpu
()
const
{
return
use_gpu_
;
}
...
...
@@ -164,7 +161,6 @@ class PD_INFER_DECL CpuPassStrategy : public PassStrategy {
use_gpu_
=
other
.
use_gpu_
;
use_mkldnn_
=
other
.
use_mkldnn_
;
use_mkldnn_quantizer_
=
other
.
use_mkldnn_quantizer_
;
use_mkldnn_bfloat16_
=
other
.
use_mkldnn_bfloat16_
;
}
/// \brief Default destructor.
virtual
~
CpuPassStrategy
()
=
default
;
...
...
@@ -178,13 +174,9 @@ class PD_INFER_DECL CpuPassStrategy : public PassStrategy {
/// \brief Enable MKLDNN quantize optimization.
void
EnableMkldnnQuantizer
()
override
;
/// \brief Enable MKLDNN bfloat16.
void
EnableMkldnnBfloat16
()
override
;
protected:
/// \cond Protected
bool
use_mkldnn_quantizer_
{
false
};
bool
use_mkldnn_bfloat16_
{
false
};
/// \endcond
};
...
...
@@ -213,9 +205,6 @@ class PD_INFER_DECL GpuPassStrategy : public PassStrategy {
/// \brief Not supported in GPU mode yet.
void
EnableMkldnnQuantizer
()
override
;
/// \brief Not supported in GPU mode yet.
void
EnableMkldnnBfloat16
()
override
;
/// \brief Default destructor.
virtual
~
GpuPassStrategy
()
=
default
;
...
...
paddle/fluid/inference/capi/paddle_c_api.h
浏览文件 @
2cd5ee9c
...
...
@@ -235,12 +235,6 @@ PADDLE_CAPI_EXPORT extern void PD_EnableMkldnnQuantizer(
PADDLE_CAPI_EXPORT
extern
bool
PD_MkldnnQuantizerEnabled
(
const
PD_AnalysisConfig
*
config
);
PADDLE_CAPI_EXPORT
extern
void
PD_EnableMkldnnBfloat16
(
PD_AnalysisConfig
*
config
);
PADDLE_CAPI_EXPORT
extern
bool
PD_MkldnnBfloat16Enabled
(
const
PD_AnalysisConfig
*
config
);
PADDLE_CAPI_EXPORT
extern
void
PD_SetModelBuffer
(
PD_AnalysisConfig
*
config
,
const
char
*
prog_buffer
,
size_t
prog_buffer_size
,
...
...
paddle/fluid/inference/capi/pd_config.cc
浏览文件 @
2cd5ee9c
...
...
@@ -207,18 +207,6 @@ bool PD_MkldnnQuantizerEnabled(const PD_AnalysisConfig* config) {
return
config
->
config
.
mkldnn_quantizer_enabled
();
}
void
PD_EnableMkldnnBfloat16
(
PD_AnalysisConfig
*
config
)
{
PADDLE_ENFORCE_NOT_NULL
(
config
,
paddle
::
platform
::
errors
::
NotFound
(
"PD_AnalysisConfig should not be null"
));
config
->
config
.
EnableMkldnnBfloat16
();
}
bool
PD_MkldnnBfloat16Enabled
(
const
PD_AnalysisConfig
*
config
)
{
PADDLE_ENFORCE_NOT_NULL
(
config
,
paddle
::
platform
::
errors
::
NotFound
(
"PD_AnalysisConfig should not be null"
));
return
config
->
config
.
mkldnn_bfloat16_enabled
();
}
void
PD_SetModelBuffer
(
PD_AnalysisConfig
*
config
,
const
char
*
prog_buffer
,
size_t
prog_buffer_size
,
const
char
*
params_buffer
,
size_t
params_buffer_size
)
{
...
...
paddle/fluid/inference/tests/api/analyzer_capi_gpu_tester.cc
浏览文件 @
2cd5ee9c
...
...
@@ -54,9 +54,6 @@ TEST(PD_AnalysisConfig, use_gpu) {
PD_SwitchIrOptim
(
config
,
true
);
bool
ir_optim
=
PD_IrOptim
(
config
);
CHECK
(
ir_optim
)
<<
"NO"
;
PD_EnableMkldnnBfloat16
(
config
);
bool
bfloat16_enable
=
PD_MkldnnBfloat16Enabled
(
config
);
CHECK
(
!
bfloat16_enable
)
<<
"NO"
;
PD_EnableTensorRtEngine
(
config
,
1
<<
20
,
1
,
3
,
Precision
::
kFloat32
,
false
,
false
);
bool
trt_enable
=
PD_TensorrtEngineEnabled
(
config
);
...
...
paddle/fluid/inference/tests/api/analyzer_capi_tester.cc
浏览文件 @
2cd5ee9c
...
...
@@ -88,9 +88,6 @@ TEST(PD_AnalysisConfig, profile_mkldnn) {
PD_EnableMkldnnQuantizer
(
config
);
bool
quantizer_enable
=
PD_MkldnnQuantizerEnabled
(
config
);
CHECK
(
quantizer_enable
)
<<
"NO"
;
PD_EnableMkldnnBfloat16
(
config
);
bool
bfloat16_enable
=
PD_MkldnnBfloat16Enabled
(
config
);
CHECK
(
bfloat16_enable
)
<<
"NO"
;
PD_SetMkldnnCacheCapacity
(
config
,
0
);
PD_SetModel
(
config
,
prog_file
.
c_str
(),
params_file
.
c_str
());
PD_DeleteAnalysisConfig
(
config
);
...
...
paddle/fluid/pybind/inference_api.cc
浏览文件 @
2cd5ee9c
...
...
@@ -448,7 +448,6 @@ void BindAnalysisConfig(py::module *m) {
&
AnalysisConfig
::
cpu_math_library_num_threads
)
.
def
(
"to_native_config"
,
&
AnalysisConfig
::
ToNativeConfig
)
.
def
(
"enable_quantizer"
,
&
AnalysisConfig
::
EnableMkldnnQuantizer
)
.
def
(
"enable_mkldnn_bfloat16"
,
&
AnalysisConfig
::
EnableMkldnnBfloat16
)
#ifdef PADDLE_WITH_MKLDNN
.
def
(
"quantizer_config"
,
&
AnalysisConfig
::
mkldnn_quantizer_config
,
py
::
return_value_policy
::
reference
)
...
...
@@ -566,7 +565,6 @@ void BindPaddlePassBuilder(py::module *m) {
.
def
(
"enable_cudnn"
,
&
PassStrategy
::
EnableCUDNN
)
.
def
(
"enable_mkldnn"
,
&
PassStrategy
::
EnableMKLDNN
)
.
def
(
"enable_mkldnn_quantizer"
,
&
PassStrategy
::
EnableMkldnnQuantizer
)
.
def
(
"enable_mkldnn_bfloat16"
,
&
PassStrategy
::
EnableMkldnnBfloat16
)
.
def
(
"use_gpu"
,
&
PassStrategy
::
use_gpu
);
py
::
class_
<
CpuPassStrategy
,
PassStrategy
>
(
*
m
,
"CpuPassStrategy"
)
...
...
@@ -574,16 +572,14 @@ void BindPaddlePassBuilder(py::module *m) {
.
def
(
py
::
init
<
const
CpuPassStrategy
&>
())
.
def
(
"enable_cudnn"
,
&
CpuPassStrategy
::
EnableCUDNN
)
.
def
(
"enable_mkldnn"
,
&
CpuPassStrategy
::
EnableMKLDNN
)
.
def
(
"enable_mkldnn_quantizer"
,
&
CpuPassStrategy
::
EnableMkldnnQuantizer
)
.
def
(
"enable_mkldnn_bfloat16"
,
&
CpuPassStrategy
::
EnableMkldnnBfloat16
);
.
def
(
"enable_mkldnn_quantizer"
,
&
CpuPassStrategy
::
EnableMkldnnQuantizer
);
py
::
class_
<
GpuPassStrategy
,
PassStrategy
>
(
*
m
,
"GpuPassStrategy"
)
.
def
(
py
::
init
<>
())
.
def
(
py
::
init
<
const
GpuPassStrategy
&>
())
.
def
(
"enable_cudnn"
,
&
GpuPassStrategy
::
EnableCUDNN
)
.
def
(
"enable_mkldnn"
,
&
GpuPassStrategy
::
EnableMKLDNN
)
.
def
(
"enable_mkldnn_quantizer"
,
&
GpuPassStrategy
::
EnableMkldnnQuantizer
)
.
def
(
"enable_mkldnn_bfloat16"
,
&
GpuPassStrategy
::
EnableMkldnnBfloat16
);
.
def
(
"enable_mkldnn_quantizer"
,
&
GpuPassStrategy
::
EnableMkldnnQuantizer
);
}
}
// namespace
}
// namespace pybind
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录