Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
Crayon鑫
Paddle
提交
e722f683
P
Paddle
项目概览
Crayon鑫
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
e722f683
编写于
9月 03, 2018
作者:
D
dzhwinter
提交者:
GitHub
9月 03, 2018
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
fix windows compile (#13147)
上级
f0552006
变更
4
隐藏空白更改
内联
并排
Showing
4 changed file
with
12 addition
and
12 deletion
+12
-12
paddle/fluid/operators/activation_op.h
paddle/fluid/operators/activation_op.h
+2
-2
paddle/fluid/operators/attention_lstm_op.cc
paddle/fluid/operators/attention_lstm_op.cc
+0
-1
paddle/fluid/operators/gru_unit_op.h
paddle/fluid/operators/gru_unit_op.h
+8
-8
paddle/fluid/operators/label_smooth_op.h
paddle/fluid/operators/label_smooth_op.h
+2
-1
未找到文件。
paddle/fluid/operators/activation_op.h
浏览文件 @
e722f683
...
...
@@ -865,8 +865,8 @@ struct SwishGradFunctor : public BaseActivationFunctor<T> {
void
operator
()(
Device
d
,
X
x
,
Out
out
,
dOut
dout
,
dX
dx
)
const
{
auto
temp1
=
static_cast
<
T
>
(
1
)
/
(
static_cast
<
T
>
(
1
)
+
(
static_cast
<
T
>
(
-
beta
)
*
x
).
exp
());
auto
temp2
=
temp1
*
(
static_cast
<
T
>
(
1
)
-
(
beta
*
out
));
dx
.
device
(
d
)
=
dout
*
((
beta
*
out
)
+
temp2
);
auto
temp2
=
temp1
*
(
static_cast
<
T
>
(
1
)
-
(
static_cast
<
T
>
(
beta
)
*
out
));
dx
.
device
(
d
)
=
dout
*
((
static_cast
<
T
>
(
beta
)
*
out
)
+
temp2
);
}
};
...
...
paddle/fluid/operators/attention_lstm_op.cc
浏览文件 @
e722f683
...
...
@@ -13,7 +13,6 @@ See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/attention_lstm_op.h"
#include <sys/time.h>
#include <string>
#include "paddle/fluid/operators/math/blas.h"
#include "paddle/fluid/operators/math/cpu_vec.h"
...
...
paddle/fluid/operators/gru_unit_op.h
浏览文件 @
e722f683
...
...
@@ -92,12 +92,12 @@ class GRUUnitKernel : public framework::OpKernel<T> {
gate_data
,
frame_size
*
3
);
// calculate activited gate
Eigen
::
array
<
int
,
2
>
extents
({{
batch_size
,
frame_size
}})
;
Eigen
::
array
<
int
,
2
>
u_offsets
({{
0
,
0
}})
;
Eigen
::
array
<
int
,
2
>
extents
=
{
batch_size
,
frame_size
}
;
Eigen
::
array
<
int
,
2
>
u_offsets
=
{
0
,
0
}
;
ActCompute
(
context
.
Attr
<
int
>
(
"gate_activation"
),
place
,
g
.
slice
(
u_offsets
,
extents
),
g
.
slice
(
u_offsets
,
extents
));
auto
u
=
g
.
slice
(
u_offsets
,
extents
);
// update gate
Eigen
::
array
<
int
,
2
>
r_offsets
({{
0
,
frame_size
}})
;
Eigen
::
array
<
int
,
2
>
r_offsets
=
{
0
,
frame_size
}
;
ActCompute
(
context
.
Attr
<
int
>
(
"gate_activation"
),
place
,
g
.
slice
(
r_offsets
,
extents
),
g
.
slice
(
r_offsets
,
extents
));
auto
r
=
g
.
slice
(
r_offsets
,
extents
);
// reset gate
...
...
@@ -107,7 +107,7 @@ class GRUUnitKernel : public framework::OpKernel<T> {
weight_data
+
frame_size
*
frame_size
*
2
,
frame_size
,
1
,
gate_data
+
frame_size
*
2
,
frame_size
*
3
);
Eigen
::
array
<
int
,
2
>
c_offsets
({{
0
,
frame_size
*
2
}})
;
Eigen
::
array
<
int
,
2
>
c_offsets
=
{
0
,
frame_size
*
2
}
;
ActCompute
(
context
.
Attr
<
int
>
(
"activation"
),
place
,
g
.
slice
(
c_offsets
,
extents
),
g
.
slice
(
c_offsets
,
extents
));
auto
c
=
g
.
slice
(
c_offsets
,
extents
);
// output candidate
...
...
@@ -171,12 +171,12 @@ class GRUUnitGradKernel : public framework::OpKernel<T> {
int
batch_size
=
input
->
dims
()[
0
];
int
frame_size
=
hidden_prev
->
dims
()[
1
];
Eigen
::
array
<
int
,
2
>
extents
({{
batch_size
,
frame_size
}})
;
Eigen
::
array
<
int
,
2
>
u_offsets
({{
0
,
0
}})
;
Eigen
::
array
<
int
,
2
>
extents
=
{
batch_size
,
frame_size
}
;
Eigen
::
array
<
int
,
2
>
u_offsets
=
{
0
,
0
}
;
auto
u
=
g
.
slice
(
u_offsets
,
extents
);
// update gate
Eigen
::
array
<
int
,
2
>
r_offsets
({{
0
,
frame_size
}})
;
Eigen
::
array
<
int
,
2
>
r_offsets
=
{
0
,
frame_size
}
;
auto
r
=
g
.
slice
(
r_offsets
,
extents
);
// reset gate
Eigen
::
array
<
int
,
2
>
c_offsets
({{
0
,
frame_size
*
2
}})
;
Eigen
::
array
<
int
,
2
>
c_offsets
=
{
0
,
frame_size
*
2
}
;
auto
c
=
g
.
slice
(
c_offsets
,
extents
);
// output candidate
// backward for unactivated update gate
...
...
paddle/fluid/operators/label_smooth_op.h
浏览文件 @
e722f683
...
...
@@ -38,7 +38,8 @@ class LabelSmoothKernel : public framework::OpKernel<T> {
auto
dist
=
framework
::
EigenVector
<
T
>::
Flatten
(
*
dist_t
);
out
.
device
(
dev
)
=
static_cast
<
T
>
(
1
-
epsilon
)
*
in
+
epsilon
*
dist
.
broadcast
(
Eigen
::
DSizes
<
int
,
1
>
(
in_t
->
numel
()));
static_cast
<
T
>
(
epsilon
)
*
dist
.
broadcast
(
Eigen
::
DSizes
<
int
,
1
>
(
in_t
->
numel
()));
}
else
{
out
.
device
(
dev
)
=
static_cast
<
T
>
(
1
-
epsilon
)
*
in
+
static_cast
<
T
>
(
epsilon
/
label_dim
);
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录