Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
1b894e49
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
1b894e49
编写于
11月 20, 2018
作者:
T
Tao Luo
提交者:
GitHub
11月 20, 2018
浏览文件
操作
浏览文件
下载
差异文件
Merge pull request #14437 from jczaja/prv-softmax-mkl
Introducing MKL to softmax for inference
上级
a94a7355
9b0eae30
变更
4
隐藏空白更改
内联
并排
Showing
4 changed file
with
53 addition
and
38 deletion
+53
-38
CMakeLists.txt
CMakeLists.txt
+8
-7
paddle/fluid/operators/math/softmax.h
paddle/fluid/operators/math/softmax.h
+2
-1
paddle/fluid/operators/math/softmax_impl.h
paddle/fluid/operators/math/softmax_impl.h
+39
-28
paddle/fluid/operators/softmax_op.h
paddle/fluid/operators/softmax_op.h
+4
-2
未找到文件。
CMakeLists.txt
浏览文件 @
1b894e49
...
...
@@ -302,6 +302,14 @@ set(PADDLE_PYTHON_BUILD_DIR "${CMAKE_CURRENT_BINARY_DIR}/python/build")
set
(
CMAKE_CXX_FLAGS_RELWITHDEBINFO
"-O3 -g -DNDEBUG"
)
set
(
CMAKE_C_FLAGS_RELWITHDEBINFO
"-O3 -g -DNDEBUG"
)
if
(
ON_INFER
)
message
(
STATUS
"On inference mode, will take place some specific optimization."
)
add_definitions
(
-DPADDLE_ON_INFERENCE
)
else
()
#TODO(luotao), combine this warning with `make inference_lib_dist` command.
message
(
WARNING
"On inference mode, will take place some specific optimization. Turn on the ON_INFER flag when building inference_lib only."
)
endif
()
add_subdirectory
(
paddle
)
if
(
WITH_PYTHON
)
add_subdirectory
(
python
)
...
...
@@ -312,10 +320,3 @@ if(WITH_DOC)
find_python_module
(
recommonmark REQUIRED
)
add_subdirectory
(
doc
)
endif
()
if
(
ON_INFER
)
message
(
STATUS
"On inference mode, will take place some specific optimization."
)
else
()
#TODO(luotao), combine this warning with `make inference_lib_dist` command.
message
(
WARNING
"On inference mode, will take place some specific optimization. Turn on the ON_INFER flag when building inference_lib only."
)
endif
()
paddle/fluid/operators/math/softmax.h
浏览文件 @
1b894e49
...
...
@@ -19,7 +19,8 @@ namespace paddle {
namespace
operators
{
namespace
math
{
template
<
typename
DeviceContext
,
typename
T
,
bool
is_test
>
template
<
typename
DeviceContext
,
typename
T
,
bool
is_test
,
typename
Enable
=
void
>
class
SoftmaxFunctor
{
public:
void
operator
()(
const
DeviceContext
&
context
,
const
framework
::
Tensor
*
X
,
...
...
paddle/fluid/operators/math/softmax_impl.h
浏览文件 @
1b894e49
...
...
@@ -16,6 +16,7 @@ limitations under the License. */
#include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/framework/tensor.h"
#include "paddle/fluid/operators/math/blas.h"
namespace
paddle
{
namespace
operators
{
namespace
math
{
...
...
@@ -32,8 +33,8 @@ struct ValueClip {
}
};
template
<
typename
DeviceContext
,
typename
T
,
bool
is_test
>
void
SoftmaxFunctor
<
DeviceContext
,
T
,
is_test
>::
operator
()(
template
<
typename
DeviceContext
,
typename
T
,
bool
is_test
,
typename
Enable
>
void
SoftmaxFunctor
<
DeviceContext
,
T
,
is_test
,
Enable
>::
operator
()(
const
DeviceContext
&
context
,
const
framework
::
Tensor
*
X
,
framework
::
Tensor
*
Y
)
{
auto
logits
=
EigenMatrix
<
T
>::
From
(
*
X
);
...
...
@@ -65,36 +66,46 @@ void SoftmaxFunctor<DeviceContext, T, is_test>::operator()(
.
broadcast
(
one_by_class
));
}
template
<
typename
DeviceContext
,
typename
T
>
class
SoftmaxFunctor
<
DeviceContext
,
T
,
true
>
{
template
<
class
DeviceContext
>
using
enable_if_CPU
=
typename
std
::
enable_if
<
std
::
is_same
<
DeviceContext
,
platform
::
CPUDeviceContext
>::
value
>::
type
;
template
<
typename
DeviceContext
>
class
SoftmaxFunctor
<
DeviceContext
,
float
,
true
,
enable_if_CPU
<
DeviceContext
>>
{
void
operator
()(
const
DeviceContext
&
context
,
const
framework
::
Tensor
*
X
,
framework
::
Tensor
*
Y
)
{
auto
logits
=
EigenMatrix
<
T
>::
From
(
*
X
);
auto
softmax
=
EigenMatrix
<
T
>::
From
(
*
Y
);
auto
in_dims
=
X
->
dims
();
auto
out_dims
=
Y
->
dims
();
const
float
*
in_data
=
X
->
data
<
float
>
();
float
*
out_data
=
Y
->
data
<
float
>
();
const
int
kBatchDim
=
0
;
const
int
kClassDim
=
1
;
const
int
batch_size
=
logits
.
dimension
(
kBatchDim
);
const
int
num_classes
=
logits
.
dimension
(
kClassDim
);
Eigen
::
DSizes
<
int
,
1
>
along_class
(
kClassDim
);
Eigen
::
DSizes
<
int
,
2
>
batch_by_one
(
batch_size
,
1
);
Eigen
::
DSizes
<
int
,
2
>
one_by_class
(
1
,
num_classes
);
auto
shifted_logits
=
(
logits
-
logits
.
maximum
(
along_class
)
.
eval
()
.
reshape
(
batch_by_one
)
.
broadcast
(
one_by_class
));
softmax
.
device
(
*
context
.
eigen_device
())
=
shifted_logits
.
exp
();
softmax
.
device
(
*
context
.
eigen_device
())
=
(
softmax
*
softmax
.
sum
(
along_class
)
.
inverse
()
.
eval
()
.
reshape
(
batch_by_one
)
.
broadcast
(
one_by_class
));
// 2D data. Batch x C
const
int
batch_size
=
in_dims
[
kBatchDim
];
const
int
num_classes
=
in_dims
[
kClassDim
];
std
::
vector
<
float
>
entities
(
batch_size
);
auto
blas
=
math
::
GetBlas
<
DeviceContext
,
float
>
(
context
);
for
(
int
n
=
0
;
n
<
batch_size
;
++
n
)
{
entities
[
n
]
=
in_data
[
n
*
num_classes
];
for
(
int
c
=
1
;
c
<
num_classes
;
++
c
)
{
entities
[
n
]
=
in_data
[
n
*
num_classes
+
c
]
>
entities
[
n
]
?
in_data
[
n
*
num_classes
+
c
]
:
entities
[
n
];
}
for
(
int
c
=
0
;
c
<
num_classes
;
++
c
)
{
out_data
[
n
*
num_classes
+
c
]
=
in_data
[
n
*
num_classes
+
c
]
-
entities
[
n
];
}
}
blas
.
VEXP
(
num_classes
*
batch_size
,
out_data
,
out_data
);
for
(
int
n
=
0
;
n
<
batch_size
;
++
n
)
{
entities
[
n
]
=
out_data
[
n
*
num_classes
];
for
(
int
c
=
1
;
c
<
num_classes
;
++
c
)
{
entities
[
n
]
+=
out_data
[
n
*
num_classes
+
c
];
}
blas
.
SCAL
(
num_classes
,
1.0
f
/
entities
[
n
],
&
out_data
[
n
*
num_classes
]);
}
}
};
...
...
paddle/fluid/operators/softmax_op.h
浏览文件 @
1b894e49
...
...
@@ -35,8 +35,10 @@ class SoftmaxKernel : public framework::OpKernel<T> {
Tensor
X_2d
=
framework
::
ReshapeToMatrix
(
*
X
,
rank
-
1
);
Tensor
Out_2d
=
framework
::
ReshapeToMatrix
(
*
Out
,
rank
-
1
);
#ifdef ON_INFER
math
::
SoftmaxFunctor
<
DeviceContext
,
T
,
true
>
()(
#ifdef PADDLE_ON_INFERENCE
math
::
SoftmaxFunctor
<
DeviceContext
,
T
,
std
::
is_same
<
DeviceContext
,
platform
::
CPUDeviceContext
>::
value
>
()(
context
.
template
device_context
<
DeviceContext
>(),
&
X_2d
,
&
Out_2d
);
#else
math
::
SoftmaxFunctor
<
DeviceContext
,
T
,
false
>
()(
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录