diff --git a/paddle/operators/activation_op.cu b/paddle/operators/activation_op.cu index 112b33d22535f2fb6bada6e3edca3a15222eefd8..feed1302b292a546f88fa35457c86aa2cfdaa307 100644 --- a/paddle/operators/activation_op.cu +++ b/paddle/operators/activation_op.cu @@ -19,10 +19,10 @@ namespace ops = paddle::operators; REGISTER_OP_GPU_KERNEL(sigmoid, ops::ActivationKernel); + ops::SigmoidFunctor>); REGISTER_OP_GPU_KERNEL( sigmoid_grad, ops::ActivationGradKernel); + ops::SigmoidGradFunctor>); REGISTER_OP_GPU_KERNEL( exp, @@ -37,35 +37,27 @@ REGISTER_OP_GPU_KERNEL( relu_grad, ops::ActivationGradKernel>); -REGISTER_OP(tanh, ops::ActivationOp, ops::TanhOpMaker, tanh_grad, - ops::ActivationOpGrad); -REGISTER_OP_GPU_KERNEL(tanh, - ops::ActivationKernel>); +REGISTER_OP_GPU_KERNEL( + tanh, + ops::ActivationKernel); REGISTER_OP_GPU_KERNEL( tanh_grad, ops::ActivationGradKernel>); -REGISTER_OP(sqrt, ops::ActivationOp, ops::SqrtOpMaker, sqrt_grad, - ops::ActivationOpGrad); -REGISTER_OP_GPU_KERNEL(sqrt, - ops::ActivationKernel>); +REGISTER_OP_GPU_KERNEL( + sqrt, + ops::ActivationKernel); REGISTER_OP_GPU_KERNEL( sqrt_grad, ops::ActivationGradKernel>); -REGISTER_OP(abs, ops::ActivationOp, ops::AbsOpMaker, abs_grad, - ops::ActivationOpGrad); -REGISTER_OP_GPU_KERNEL(abs, - ops::ActivationKernel>); REGISTER_OP_GPU_KERNEL( - abs_grad, ops::ActivationGradKernel>); + abs, + ops::ActivationKernel); +REGISTER_OP_GPU_KERNEL(abs_grad, + ops::ActivationGradKernel); -REGISTER_OP(reciprocal, ops::ActivationOp, ops::ReciprocalOpMaker, - reciprocal_grad, ops::ActivationOpGrad); REGISTER_OP_GPU_KERNEL(reciprocal, ops::ActivationKernel>); @@ -74,47 +66,35 @@ REGISTER_OP_GPU_KERNEL( ops::ActivationGradKernel>); -REGISTER_OP(log, ops::ActivationOp, ops::LogOpMaker, log_grad, - ops::ActivationOpGrad); -REGISTER_OP_GPU_KERNEL(log, - ops::ActivationKernel>); +REGISTER_OP_GPU_KERNEL( + log, + ops::ActivationKernel); REGISTER_OP_GPU_KERNEL( log_grad, ops::ActivationGradKernel>); -REGISTER_OP(square, ops::ActivationOp, ops::SquareOpMaker, square_grad, - ops::ActivationOpGrad); REGISTER_OP_GPU_KERNEL(square, ops::ActivationKernel>); + ops::SquareFunctor>); REGISTER_OP_GPU_KERNEL( square_grad, ops::ActivationGradKernel>); -REGISTER_OP(brelu, ops::ActivationOp, ops::BReluOpMaker, brelu_grad, - ops::ActivationOpGrad); REGISTER_OP_GPU_KERNEL(brelu, ops::BReluKernel); REGISTER_OP_GPU_KERNEL(brelu_grad, ops::BReluGradKernel); -REGISTER_OP(soft_relu, ops::ActivationOp, ops::SoftReluOpMaker, - soft_relu_grad, ops::ActivationOpGrad); REGISTER_OP_GPU_KERNEL(soft_relu, ops::SoftReluKernel); REGISTER_OP_GPU_KERNEL( soft_relu_grad, ops::SoftReluGradKernel); -REGISTER_OP(pow, ops::ActivationOp, ops::PowOpMaker, pow_grad, - ops::ActivationOpGrad); REGISTER_OP_GPU_KERNEL(pow, ops::PowKernel); REGISTER_OP_GPU_KERNEL(pow_grad, ops::PowGradKernel); -REGISTER_OP(stanh, ops::ActivationOp, ops::STanhOpMaker, stanh_grad, - ops::ActivationOpGrad); REGISTER_OP_GPU_KERNEL(stanh, ops::STanhKernel); REGISTER_OP_GPU_KERNEL(stanh_grad, - ops::STanhGradKernel); \ No newline at end of file + ops::STanhGradKernel); diff --git a/python/paddle/trainer_config_helpers/networks.py b/python/paddle/trainer_config_helpers/networks.py index 34be203ee254584027c79cf93fe54f404b7235db..28a71cf788f2bce1e680ef37fb1bc997ead6e486 100644 --- a/python/paddle/trainer_config_helpers/networks.py +++ b/python/paddle/trainer_config_helpers/networks.py @@ -1406,7 +1406,7 @@ def inputs(layers, *args): if len(args) != 0: layers.extend(args) - Inputs(*[l.name for l in layers]) + Inputs(* [l.name for l in layers]) def outputs(layers, *args): @@ -1456,7 +1456,7 @@ def outputs(layers, *args): assert len(layers) > 0 if HasInputsSet(): # input already set - Outputs(*[l.name for l in layers]) + Outputs(* [l.name for l in layers]) return # just return outputs. if len(layers) != 1: