Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
Crayon鑫
Paddle
提交
41271f03
P
Paddle
项目概览
Crayon鑫
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
41271f03
编写于
9月 14, 2017
作者:
Q
qijun
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
fix gpu build error
上级
fd5aa2ad
变更
2
隐藏空白更改
内联
并排
Showing
2 changed file
with
20 addition
and
40 deletion
+20
-40
paddle/operators/activation_op.cu
paddle/operators/activation_op.cu
+18
-38
python/paddle/trainer_config_helpers/networks.py
python/paddle/trainer_config_helpers/networks.py
+2
-2
未找到文件。
paddle/operators/activation_op.cu
浏览文件 @
41271f03
...
...
@@ -19,10 +19,10 @@ namespace ops = paddle::operators;
REGISTER_OP_GPU_KERNEL
(
sigmoid
,
ops
::
ActivationKernel
<
paddle
::
platform
::
GPUPlace
,
float
,
ops
::
SigmoidFunctor
>
);
ops
::
SigmoidFunctor
<
float
>
>
);
REGISTER_OP_GPU_KERNEL
(
sigmoid_grad
,
ops
::
ActivationGradKernel
<
paddle
::
platform
::
GPUPlace
,
float
,
ops
::
SigmoidGradFunctor
>
);
ops
::
SigmoidGradFunctor
<
float
>
>
);
REGISTER_OP_GPU_KERNEL
(
exp
,
...
...
@@ -37,35 +37,27 @@ REGISTER_OP_GPU_KERNEL(
relu_grad
,
ops
::
ActivationGradKernel
<
paddle
::
platform
::
GPUPlace
,
float
,
ops
::
ReluGradFunctor
<
float
>>
);
REGISTER_OP
(
tanh
,
ops
::
ActivationOp
,
ops
::
TanhOpMaker
,
tanh_grad
,
ops
::
ActivationOpGrad
);
REGISTER_OP_GPU_KERNEL
(
tanh
,
ops
::
ActivationKernel
<
paddle
::
platform
::
GPUPlace
,
float
,
ops
::
TanhFunctor
<
float
>>
);
REGISTER_OP_GPU_KERNEL
(
tanh
,
ops
::
ActivationKernel
<
paddle
::
platform
::
GPUPlace
,
float
,
ops
::
TanhFunctor
>
);
REGISTER_OP_GPU_KERNEL
(
tanh_grad
,
ops
::
ActivationGradKernel
<
paddle
::
platform
::
GPUPlace
,
float
,
ops
::
TanhGradFunctor
<
float
>>
);
REGISTER_OP
(
sqrt
,
ops
::
ActivationOp
,
ops
::
SqrtOpMaker
,
sqrt_grad
,
ops
::
ActivationOpGrad
);
REGISTER_OP_GPU_KERNEL
(
sqrt
,
ops
::
ActivationKernel
<
paddle
::
platform
::
GPUPlace
,
float
,
ops
::
SqrtFunctor
<
float
>>
);
REGISTER_OP_GPU_KERNEL
(
sqrt
,
ops
::
ActivationKernel
<
paddle
::
platform
::
GPUPlace
,
float
,
ops
::
SqrtFunctor
>
);
REGISTER_OP_GPU_KERNEL
(
sqrt_grad
,
ops
::
ActivationGradKernel
<
paddle
::
platform
::
GPUPlace
,
float
,
ops
::
SqrtGradFunctor
<
float
>>
);
REGISTER_OP
(
abs
,
ops
::
ActivationOp
,
ops
::
AbsOpMaker
,
abs_grad
,
ops
::
ActivationOpGrad
);
REGISTER_OP_GPU_KERNEL
(
abs
,
ops
::
ActivationKernel
<
paddle
::
platform
::
GPUPlace
,
float
,
ops
::
AbsFunctor
<
float
>>
);
REGISTER_OP_GPU_KERNEL
(
abs_grad
,
ops
::
ActivationGradKernel
<
paddle
::
platform
::
GPUPlace
,
float
,
ops
::
AbsGradFunctor
<
float
>>
);
abs
,
ops
::
ActivationKernel
<
paddle
::
platform
::
GPUPlace
,
float
,
ops
::
AbsFunctor
>
);
REGISTER_OP_GPU_KERNEL
(
abs_grad
,
ops
::
ActivationGradKernel
<
paddle
::
platform
::
GPUPlace
,
float
,
ops
::
AbsGradFunctor
>
);
REGISTER_OP
(
reciprocal
,
ops
::
ActivationOp
,
ops
::
ReciprocalOpMaker
,
reciprocal_grad
,
ops
::
ActivationOpGrad
);
REGISTER_OP_GPU_KERNEL
(
reciprocal
,
ops
::
ActivationKernel
<
paddle
::
platform
::
GPUPlace
,
float
,
ops
::
ReciprocalFunctor
<
float
>>
);
...
...
@@ -74,47 +66,35 @@ REGISTER_OP_GPU_KERNEL(
ops
::
ActivationGradKernel
<
paddle
::
platform
::
GPUPlace
,
float
,
ops
::
ReciprocalGradFunctor
<
float
>>
);
REGISTER_OP
(
log
,
ops
::
ActivationOp
,
ops
::
LogOpMaker
,
log_grad
,
ops
::
ActivationOpGrad
);
REGISTER_OP_GPU_KERNEL
(
log
,
ops
::
ActivationKernel
<
paddle
::
platform
::
GPUPlace
,
float
,
ops
::
LogFunctor
<
float
>>
);
REGISTER_OP_GPU_KERNEL
(
log
,
ops
::
ActivationKernel
<
paddle
::
platform
::
GPUPlace
,
float
,
ops
::
LogFunctor
>
);
REGISTER_OP_GPU_KERNEL
(
log_grad
,
ops
::
ActivationGradKernel
<
paddle
::
platform
::
GPUPlace
,
float
,
ops
::
LogGradFunctor
<
float
>>
);
REGISTER_OP
(
square
,
ops
::
ActivationOp
,
ops
::
SquareOpMaker
,
square_grad
,
ops
::
ActivationOpGrad
);
REGISTER_OP_GPU_KERNEL
(
square
,
ops
::
ActivationKernel
<
paddle
::
platform
::
GPUPlace
,
float
,
ops
::
squareFunctor
<
float
>
>
);
ops
::
SquareFunctor
>
);
REGISTER_OP_GPU_KERNEL
(
square_grad
,
ops
::
ActivationGradKernel
<
paddle
::
platform
::
GPUPlace
,
float
,
ops
::
SquareGradFunctor
<
float
>>
);
REGISTER_OP
(
brelu
,
ops
::
ActivationOp
,
ops
::
BReluOpMaker
<
float
>
,
brelu_grad
,
ops
::
ActivationOpGrad
);
REGISTER_OP_GPU_KERNEL
(
brelu
,
ops
::
BReluKernel
<
paddle
::
platform
::
GPUPlace
,
float
>
);
REGISTER_OP_GPU_KERNEL
(
brelu_grad
,
ops
::
BReluGradKernel
<
paddle
::
platform
::
GPUPlace
,
float
>
);
REGISTER_OP
(
soft_relu
,
ops
::
ActivationOp
,
ops
::
SoftReluOpMaker
<
float
>
,
soft_relu_grad
,
ops
::
ActivationOpGrad
);
REGISTER_OP_GPU_KERNEL
(
soft_relu
,
ops
::
SoftReluKernel
<
paddle
::
platform
::
GPUPlace
,
float
>
);
REGISTER_OP_GPU_KERNEL
(
soft_relu_grad
,
ops
::
SoftReluGradKernel
<
paddle
::
platform
::
GPUPlace
,
float
>
);
REGISTER_OP
(
pow
,
ops
::
ActivationOp
,
ops
::
PowOpMaker
<
float
>
,
pow_grad
,
ops
::
ActivationOpGrad
);
REGISTER_OP_GPU_KERNEL
(
pow
,
ops
::
PowKernel
<
paddle
::
platform
::
GPUPlace
,
float
>
);
REGISTER_OP_GPU_KERNEL
(
pow_grad
,
ops
::
PowGradKernel
<
paddle
::
platform
::
GPUPlace
,
float
>
);
REGISTER_OP
(
stanh
,
ops
::
ActivationOp
,
ops
::
STanhOpMaker
<
float
>
,
stanh_grad
,
ops
::
ActivationOpGrad
);
REGISTER_OP_GPU_KERNEL
(
stanh
,
ops
::
STanhKernel
<
paddle
::
platform
::
GPUPlace
,
float
>
);
REGISTER_OP_GPU_KERNEL
(
stanh_grad
,
ops
::
STanhGradKernel
<
paddle
::
platform
::
GPUPlace
,
float
>
);
\ No newline at end of file
ops
::
STanhGradKernel
<
paddle
::
platform
::
GPUPlace
,
float
>
);
python/paddle/trainer_config_helpers/networks.py
浏览文件 @
41271f03
...
...
@@ -1406,7 +1406,7 @@ def inputs(layers, *args):
if
len
(
args
)
!=
0
:
layers
.
extend
(
args
)
Inputs
(
*
[
l
.
name
for
l
in
layers
])
Inputs
(
*
[
l
.
name
for
l
in
layers
])
def
outputs
(
layers
,
*
args
):
...
...
@@ -1456,7 +1456,7 @@ def outputs(layers, *args):
assert
len
(
layers
)
>
0
if
HasInputsSet
():
# input already set
Outputs
(
*
[
l
.
name
for
l
in
layers
])
Outputs
(
*
[
l
.
name
for
l
in
layers
])
return
# just return outputs.
if
len
(
layers
)
!=
1
:
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录