Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
eb2123e1
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
eb2123e1
编写于
3月 27, 2019
作者:
D
dengkaipeng
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
fix doc and jit. test=develop
上级
7920e3be
变更
7
显示空白变更内容
内联
并排
Showing
7 changed file
with
14 addition
and
10 deletion
+14
-10
paddle/fluid/API.spec
paddle/fluid/API.spec
+1
-1
paddle/fluid/operators/jit/kernel_base.h
paddle/fluid/operators/jit/kernel_base.h
+2
-2
paddle/fluid/operators/jit/more/mix/mix.cc
paddle/fluid/operators/jit/more/mix/mix.cc
+3
-2
paddle/fluid/operators/jit/more/mkl/mkl.h
paddle/fluid/operators/jit/more/mkl/mkl.h
+1
-0
paddle/fluid/operators/jit/refer/refer.h
paddle/fluid/operators/jit/refer/refer.h
+1
-0
paddle/fluid/operators/jit/test.cc
paddle/fluid/operators/jit/test.cc
+2
-4
python/paddle/fluid/layers/nn.py
python/paddle/fluid/layers/nn.py
+4
-1
未找到文件。
paddle/fluid/API.spec
浏览文件 @
eb2123e1
...
...
@@ -86,7 +86,7 @@ paddle.fluid.layers.conv2d (ArgSpec(args=['input', 'num_filters', 'filter_size',
paddle.fluid.layers.conv3d (ArgSpec(args=['input', 'num_filters', 'filter_size', 'stride', 'padding', 'dilation', 'groups', 'param_attr', 'bias_attr', 'use_cudnn', 'act', 'name'], varargs=None, keywords=None, defaults=(1, 0, 1, None, None, None, True, None, None)), ('document', '37042620f9bd3a2da6e5d3138b2f724b'))
paddle.fluid.layers.sequence_pool (ArgSpec(args=['input', 'pool_type', 'is_test'], varargs=None, keywords=None, defaults=(False,)), ('document', 'a194fb80614023f543df3949fbd0d0b8'))
paddle.fluid.layers.sequence_softmax (ArgSpec(args=['input', 'use_cudnn', 'name'], varargs=None, keywords=None, defaults=(False, None)), ('document', '19ef6f9cdd27feac8a1ae060f19c10b4'))
paddle.fluid.layers.softmax (ArgSpec(args=['input', 'use_cudnn', 'name', 'axis'], varargs=None, keywords=None, defaults=(False, None, -1)), ('document', '5
02bad9e8bc7ef24817d0d4b20f61df3
'))
paddle.fluid.layers.softmax (ArgSpec(args=['input', 'use_cudnn', 'name', 'axis'], varargs=None, keywords=None, defaults=(False, None, -1)), ('document', '5
9b1c6bf2f0fa9dc649c85fef3a3b2ea
'))
paddle.fluid.layers.pool2d (ArgSpec(args=['input', 'pool_size', 'pool_type', 'pool_stride', 'pool_padding', 'global_pooling', 'use_cudnn', 'ceil_mode', 'name', 'exclusive'], varargs=None, keywords=None, defaults=(-1, 'max', 1, 0, False, True, False, None, True)), ('document', 'bbd84e855e660cd1084bb71a2fd0cdaa'))
paddle.fluid.layers.pool3d (ArgSpec(args=['input', 'pool_size', 'pool_type', 'pool_stride', 'pool_padding', 'global_pooling', 'use_cudnn', 'ceil_mode', 'name', 'exclusive'], varargs=None, keywords=None, defaults=(-1, 'max', 1, 0, False, True, False, None, True)), ('document', '043de7333b79ee0ac55053c14ed81625'))
paddle.fluid.layers.adaptive_pool2d (ArgSpec(args=['input', 'pool_size', 'pool_type', 'require_index', 'name'], varargs=None, keywords=None, defaults=('max', False, None)), ('document', '859b887174d06f361658f69cb7c06d95'))
...
...
paddle/fluid/operators/jit/kernel_base.h
浏览文件 @
eb2123e1
...
...
@@ -38,6 +38,8 @@ typedef enum {
kNCHW16CMulNC
,
kSeqPool
,
kSoftmax
,
kStrideASum
,
kStrideScal
,
kVAdd
,
kVAddBias
,
kVAddRelu
,
...
...
@@ -53,8 +55,6 @@ typedef enum {
kVSquare
,
kVSub
,
kVTanh
,
kStrideASum
,
kStrideScal
,
}
KernelType
;
typedef
enum
{
...
...
paddle/fluid/operators/jit/more/mix/mix.cc
浏览文件 @
eb2123e1
...
...
@@ -50,11 +50,12 @@ void VTanh(const T* x, T* y, int n) {
compute_addbias
(
&
b
,
y
,
y
,
n
);
}
// remain is the product of dimension shapes after the axis dimension
void
Softmax
(
const
T
*
x
,
T
*
y
,
int
n
,
int
bs
,
int
remain
)
{
auto
compute_hmax
=
KernelFuncs
<
HMaxTuple
<
T
>
,
CPUPlace
>::
Cache
().
At
(
n
);
auto
compute_hsum
=
KernelFuncs
<
HSumTuple
<
T
>
,
CPUPlace
>::
Cache
().
At
(
n
);
auto
compute_vscal
=
KernelFuncs
<
VScalTuple
<
T
>
,
CPUPlace
>::
Cache
().
At
(
n
);
auto
compute_stridesum
=
auto
compute_stride
a
sum
=
KernelFuncs
<
StrideASumTuple
<
T
>
,
CPUPlace
>::
Cache
().
At
(
n
);
auto
compute_stridescal
=
KernelFuncs
<
StrideScalTuple
<
T
>
,
CPUPlace
>::
Cache
().
At
(
n
);
...
...
@@ -74,7 +75,7 @@ void Softmax(const T* x, T* y, int n, int bs, int remain) {
compute_vscal
(
&
scalar
,
y
,
y
,
n
);
}
else
{
for
(
int
j
=
0
;
j
<
remain
;
++
j
)
{
compute_stridesum
(
&
y
[
j
],
&
scalar
,
n
,
remain
);
compute_stride
a
sum
(
&
y
[
j
],
&
scalar
,
n
,
remain
);
scalar
=
static_cast
<
T
>
(
1
)
/
scalar
;
compute_stridescal
(
&
scalar
,
&
y
[
j
],
&
y
[
j
],
n
,
remain
);
}
...
...
paddle/fluid/operators/jit/more/mkl/mkl.h
浏览文件 @
eb2123e1
...
...
@@ -134,6 +134,7 @@ void StrideASum(const T* x, T* res, int n, int stride);
template
<
typename
T
>
void
StrideScal
(
const
T
*
a
,
const
T
*
x
,
T
*
y
,
int
n
,
int
stride
);
// remain is the product of dimension shapes after the axis dimension
template
<
typename
T
>
void
Softmax
(
const
T
*
x
,
T
*
y
,
int
n
,
int
bs
,
int
remain
=
1
)
{
std
::
vector
<
T
>
entities
(
bs
);
...
...
paddle/fluid/operators/jit/refer/refer.h
浏览文件 @
eb2123e1
...
...
@@ -432,6 +432,7 @@ void StrideScal(const T* a, const T* x, T* y, int n, int stride) {
// y = e^(x - max(x))
// y = y / sum(y)
// remain is the product of dimension shapes after the axis dimension
template
<
typename
T
>
void
Softmax
(
const
T
*
x
,
T
*
y
,
int
n
,
int
bs
=
1
,
int
remain
=
1
)
{
for
(
int
i
=
0
;
i
<
bs
;
++
i
)
{
...
...
paddle/fluid/operators/jit/test.cc
浏览文件 @
eb2123e1
...
...
@@ -798,10 +798,8 @@ template <typename KernelTuple, typename PlaceType>
void
TestKernelStrideScal
()
{
using
T
=
typename
KernelTuple
::
data_type
;
VLOG
(
10
)
<<
"Test JITKernel: "
<<
jit
::
to_string
(
KernelTuple
::
kernel_type
);
// for (int d : TestSizes()) {
// for (int m : {1, 2, 3}) { // stride
for
(
int
d
:
{
4
})
{
for
(
int
m
:
{
2
})
{
// stride
for
(
int
d
:
TestSizes
())
{
for
(
int
m
:
{
1
,
2
,
3
})
{
// stride
if
(
m
>
d
||
d
%
m
!=
0
)
{
continue
;
}
...
...
python/paddle/fluid/layers/nn.py
浏览文件 @
eb2123e1
...
...
@@ -1826,7 +1826,7 @@ def softmax(input, use_cudnn=False, name=None, axis=-1):
The dimension :attr:`axis` of the input tensor will be permuted to the last.
Then the input tensor will be logically flattened to a 2-D matrix. The matrix's
second dimension(row length) is
as
same as the dimension :attr:`axis` of the input
second dimension(row length) is
the
same as the dimension :attr:`axis` of the input
tensor, and the first dimension(column length) is the product of all other
dimensions of the input tensor. For each row of the matrix, the softmax operator
squashes the K-dimensional(K is the width of the matrix, which is also the size
...
...
@@ -1864,7 +1864,10 @@ def softmax(input, use_cudnn=False, name=None, axis=-1):
.. code-block:: python
fc = fluid.layers.fc(input=x, size=10)
# perform softmax in the second dimension
softmax = fluid.layers.softmax(input=fc, axis=1)
# perform softmax in the last dimension
softmax = fluid.layers.softmax(input=fc, axis=-1)
"""
helper
=
LayerHelper
(
'softmax'
,
**
locals
())
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录