diff --git a/cmake/cuda.cmake b/cmake/cuda.cmake
index f373951ccb25b5cf0520e0162b9ff0a5c562bc26..2f4f5449f482d71a2a27957af4b5f17601ab634f 100644
--- a/cmake/cuda.cmake
+++ b/cmake/cuda.cmake
@@ -216,6 +216,8 @@ endif(WIN32)
 set(CMAKE_CUDA_FLAGS "${CMAKE_CUDA_FLAGS} -w")
 # Set :expt-relaxed-constexpr to suppress Eigen warnings
 set(CMAKE_CUDA_FLAGS "${CMAKE_CUDA_FLAGS} --expt-relaxed-constexpr")
+# Set :expt-extended-lambda to enable HOSTDEVICE annotation on lambdas
+set(CMAKE_CUDA_FLAGS "${CMAKE_CUDA_FLAGS} --expt-extended-lambda")
 
 if(WIN32)
   set(CMAKE_CUDA_FLAGS "${CMAKE_CUDA_FLAGS} -Xcompiler \"/wd4244 /wd4267 /wd4819 \"")
diff --git a/paddle/fluid/operators/activation_op.cc b/paddle/fluid/operators/activation_op.cc
index 3643fd926d33adbbab60a13d2de1d9fbb851941d..785d6daaecdd28e3a811e2d1002dc4cede980bdf 100644
--- a/paddle/fluid/operators/activation_op.cc
+++ b/paddle/fluid/operators/activation_op.cc
@@ -1061,7 +1061,7 @@ REGISTER_OPERATOR(
     ops::ActivationOpDoubleGrad2<ops::ReluGradFunctor<float>::FwdDeps()>,
     ops::ActivationDoubleGradOpInplaceInferer);
 
-REGISTER_ACTIVATION_CPU_KERNEL(relu, Relu, ReluFunctor, ReluGradFunctor);
+REGISTER_ACTIVATION_CPU_KERNEL(relu, Relu, ReluCPUFunctor, ReluGradFunctor);
 
 REGISTER_OP_CPU_KERNEL(
     relu_grad_grad,
diff --git a/paddle/fluid/operators/activation_op.cu b/paddle/fluid/operators/activation_op.cu
index 36777399174f5d2619fbcd40ebf91be1ed29feec..2033081af224a4e938a7b4f0f619729feea57506 100644
--- a/paddle/fluid/operators/activation_op.cu
+++ b/paddle/fluid/operators/activation_op.cu
@@ -60,7 +60,7 @@ REGISTER_OP_CUDA_KERNEL(
 /* ========================================================================== */
 
 /* ===========================    relu register  ============================ */
-REGISTER_ACTIVATION_CUDA_KERNEL(relu, Relu, ReluFunctor, ReluGradFunctor);
+REGISTER_ACTIVATION_CUDA_KERNEL(relu, Relu, ReluCUDAFunctor, ReluGradFunctor);
 
 REGISTER_OP_CUDA_KERNEL(
     relu_grad_grad,
diff --git a/paddle/fluid/operators/activation_op.h b/paddle/fluid/operators/activation_op.h
index 483f5cc2e5cc267b1e0ca3856b32f37acded8c43..289cc70392a3fd7e22e212edab4d7ef59b6ad0f9 100644
--- a/paddle/fluid/operators/activation_op.h
+++ b/paddle/fluid/operators/activation_op.h
@@ -318,7 +318,17 @@ struct ExpGradFunctor : public BaseActivationFunctor<T> {
 
 // relu(x) = max(x, 0)
 template <typename T>
-struct ReluFunctor : public BaseActivationFunctor<T> {
+struct ReluCPUFunctor : public BaseActivationFunctor<T> {
+  template <typename Device, typename X, typename Out>
+  void operator()(Device d, X x, Out out) const {
+    out.device(d) = x.unaryExpr([] HOSTDEVICE(T v) {
+      return v > static_cast<T>(0) ? v : static_cast<T>(0);
+    });
+  }
+};
+
+template <typename T>
+struct ReluCUDAFunctor : public BaseActivationFunctor<T> {
   template <typename Device, typename X, typename Out>
   void operator()(Device d, X x, Out out) const {
     out.device(d) = x.cwiseMax(static_cast<T>(0));
diff --git a/paddle/fluid/operators/fused/fused_bn_activation_op.cu b/paddle/fluid/operators/fused/fused_bn_activation_op.cu
index 32eaf1180977a070bd14b3eb79eaaa2357bdb2b0..9339ae8e470de897749c1795a98baeb41ddddaba 100644
--- a/paddle/fluid/operators/fused/fused_bn_activation_op.cu
+++ b/paddle/fluid/operators/fused/fused_bn_activation_op.cu
@@ -93,7 +93,7 @@ class FusedBatchNormActKernel<platform::CUDADeviceContext, T>
       auto y_v = framework::EigenVector<T>::Flatten(*y);
       auto &dev = *dev_ctx.eigen_device();
       if (act_type == "relu") {
-        ReluFunctor<T>()(dev, x_v, y_v);
+        ReluCUDAFunctor<T>()(dev, x_v, y_v);
       } else {
         PADDLE_THROW(
             platform::errors::Unimplemented("Unsupported activation type"));
diff --git a/paddle/fluid/operators/gru_unit_op.h b/paddle/fluid/operators/gru_unit_op.h
index 4865a02c5292ffb9d079d0711f0bf7d6e927c441..2d1a89f9ae471017d30f3278955498de71c053aa 100644
--- a/paddle/fluid/operators/gru_unit_op.h
+++ b/paddle/fluid/operators/gru_unit_op.h
@@ -18,6 +18,7 @@ limitations under the License. */
 #include "paddle/fluid/framework/op_registry.h"
 #include "paddle/fluid/operators/activation_op.h"
 #include "paddle/fluid/operators/math/blas.h"
+#include "paddle/fluid/platform/place.h"
 
 namespace paddle {
 namespace operators {
@@ -37,19 +38,24 @@ template <typename DeviceContext, typename T>
 class GRUUnitKernel : public framework::OpKernel<T> {
  public:
   template <typename Device, typename X, typename Y>
-  void ActCompute(const int act_type, const Device& d, X x, Y y) const {
-    if (act_type == identity)
+  void ActCompute(const int act_type, const Device& d, X x, Y y,
+                  platform::Place place) const {
+    if (act_type == identity) {
       y.device(d) = x;
-    else if (act_type == sigmoid)
+    } else if (act_type == sigmoid) {
       SigmoidFunctor<T>()(d, x, y);
-    else if (act_type == tanh)
+    } else if (act_type == tanh) {
       TanhFunctor<T>()(d, x, y);
-    else if (act_type == relu)
-      ReluFunctor<T>()(d, x, y);
-    else
+    } else if (act_type == relu) {
+      if (place == platform::CPUPlace())
+        ReluCPUFunctor<T>()(d, x, y);
+      else
+        ReluCUDAFunctor<T>()(d, x, y);
+    } else {
       PADDLE_THROW(platform::errors::Unimplemented(
           "Unsupported activation type, only supports identity, sigmoid, tanh "
           "and relu."));
+    }
   }
 
   void Compute(const framework::ExecutionContext& context) const override {
@@ -97,11 +103,13 @@ class GRUUnitKernel : public framework::OpKernel<T> {
     Eigen::array<int, 2> extents{{batch_size, frame_size}};
     Eigen::array<int, 2> u_offsets{{0, 0}};
     ActCompute(context.Attr<int>("gate_activation"), place,
-               g.slice(u_offsets, extents), g.slice(u_offsets, extents));
+               g.slice(u_offsets, extents), g.slice(u_offsets, extents),
+               context.GetPlace());
     auto u = g.slice(u_offsets, extents);  // update gate
     Eigen::array<int, 2> r_offsets{{0, frame_size}};
     ActCompute(context.Attr<int>("gate_activation"), place,
-               g.slice(r_offsets, extents), g.slice(r_offsets, extents));
+               g.slice(r_offsets, extents), g.slice(r_offsets, extents),
+               context.GetPlace());
     auto r = g.slice(r_offsets, extents);  // reset gate
     r_h_p.device(place) = r * h_p;         // reset previous hidden state
     blas.GEMM(false, false, batch_size, frame_size, frame_size, 1,
@@ -111,7 +119,8 @@ class GRUUnitKernel : public framework::OpKernel<T> {
 
     Eigen::array<int, 2> c_offsets{{0, frame_size * 2}};
     ActCompute(context.Attr<int>("activation"), place,
-               g.slice(c_offsets, extents), g.slice(c_offsets, extents));
+               g.slice(c_offsets, extents), g.slice(c_offsets, extents),
+               context.GetPlace());
     auto c = g.slice(c_offsets, extents);  // output candidate
 
     // calculate final output
diff --git a/paddle/fluid/operators/lstmp_op.h b/paddle/fluid/operators/lstmp_op.h
index a2d1d5295be82f1e4c328c84005c9c676ccd06aa..5a6ac42f457852308bbe83bc824c21575d4640c8 100644
--- a/paddle/fluid/operators/lstmp_op.h
+++ b/paddle/fluid/operators/lstmp_op.h
@@ -22,6 +22,7 @@ limitations under the License. */
 #include "paddle/fluid/operators/math/detail/activation_functions.h"
 #include "paddle/fluid/operators/math/lstm_compute.h"
 #include "paddle/fluid/operators/math/sequence2batch.h"
+#include "paddle/fluid/platform/place.h"
 #include "paddle/fluid/platform/transform.h"
 
 namespace paddle {
@@ -81,18 +82,22 @@ class LSTMPKernel : public framework::OpKernel<T> {
  public:
   template <typename Device, typename X, typename Y>
   void ActCompute(const math::detail::ActivationType act_type, const Device& d,
-                  X x, Y y) const {
-    if (act_type == math::detail::ActivationType::kIdentity)
+                  X x, Y y, platform::Place place) const {
+    if (act_type == math::detail::ActivationType::kIdentity) {
       y.device(d) = x;
-    else if (act_type == math::detail::ActivationType::kSigmoid)
+    } else if (act_type == math::detail::ActivationType::kSigmoid) {
       SigmoidFunctor<T>()(d, x, y);
-    else if (act_type == math::detail::ActivationType::kTanh)
+    } else if (act_type == math::detail::ActivationType::kTanh) {
       TanhFunctor<T>()(d, x, y);
-    else if (act_type == math::detail::ActivationType::kReLU)
-      ReluFunctor<T>()(d, x, y);
-    else
+    } else if (act_type == math::detail::ActivationType::kReLU) {
+      if (place == platform::CPUPlace())
+        ReluCPUFunctor<T>()(d, x, y);
+      else
+        ReluCUDAFunctor<T>()(d, x, y);
+    } else {
       PADDLE_THROW(
           platform::errors::InvalidArgument("unsupported activation type"));
+    }
   }
 
   void Compute(const framework::ExecutionContext& ctx) const override {
@@ -225,7 +230,7 @@ class LSTMPKernel : public framework::OpKernel<T> {
                   &proj_t, static_cast<T>(0.0));
       if (proj_act != math::detail::ActivationType::kIdentity) {
         auto proj_t_dev = EigenMatrix<T>::From(proj_t);
-        ActCompute(cell_act, place, proj_t_dev, proj_t_dev);
+        ActCompute(cell_act, place, proj_t_dev, proj_t_dev, ctx.GetPlace());
       }
       if (proj_clip && proj_clip > 0.0) {
         T* x_data = proj_t.data<T>();
diff --git a/paddle/fluid/operators/rnn_op.h b/paddle/fluid/operators/rnn_op.h
index b993f5ac17479544e127f669c94ce0606ab47399..2b223e24cf8e63082fff221301cbe5000962a84f 100644
--- a/paddle/fluid/operators/rnn_op.h
+++ b/paddle/fluid/operators/rnn_op.h
@@ -979,7 +979,7 @@ class RNNCPUKernel : public framework::OpKernel<T> {
     } else if (is_rnn_relu(ctx)) {
       gate_num = 1;
       RnnFunc<
-          SimpleRNNCell<T, ReluFunctor, math::detail::ActivationType::kReLU>,
+          SimpleRNNCell<T, ReluCPUFunctor, math::detail::ActivationType::kReLU>,
           Layer, SingleLayer, BidirLayer, T>(
           ctx, input, weight_list, pre_state[0], nullptr, sequence_length,
           state[0], nullptr, output, dropout_mask, num_layers, gate_num,