diff --git a/cmake/cpplint.cmake b/cmake/cpplint.cmake
index e50530411cc74392091c8026fa012ec7631f7f6b..5184f0815faac005b3dff1015395235f4e19d65b 100644
--- a/cmake/cpplint.cmake
+++ b/cmake/cpplint.cmake
@@ -42,29 +42,21 @@ macro(add_style_check_target TARGET_NAME)
     if(WITH_STYLE_CHECK)
         set(SOURCES_LIST ${ARGN})
         list(REMOVE_DUPLICATES SOURCES_LIST)
-        list(SORT SOURCES_LIST)
-
         foreach(filename ${SOURCES_LIST})
-            set(LINT ON)
             foreach(pattern ${IGNORE_PATTERN})
                 if(filename MATCHES ${pattern})
-                    message(STATUS "DROP LINT ${filename}")
-                    set(LINT OFF)
+                    list(REMOVE_ITEM SOURCES_LIST ${filename})
                 endif()
             endforeach()
-            if(LINT MATCHES ON)
-                # cpplint code style
-                get_filename_component(base_filename ${filename} NAME)
-                set(CUR_GEN ${CMAKE_CURRENT_BINARY_DIR}/${base_filename}.cpplint)
-                add_custom_command(OUTPUT ${CUR_GEN} PRE_BUILD
-                    COMMAND "${PYTHON_EXECUTABLE}" "${PROJ_ROOT}/paddle/scripts/cpplint.py"
-                            "--filter=${STYLE_FILTER}"
-                            "--write-success=${CUR_GEN}" ${filename}
-                    DEPENDS ${filename} ${PROJ_ROOT}/paddle/scripts/cpplint.py
-                    WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR})
-                add_custom_target(${base_filename}.cpplint DEPENDS ${CUR_GEN})
-                add_dependencies(${TARGET_NAME} ${base_filename}.cpplint)
-            endif()
         endforeach()
+
+        if(SOURCES_LIST)
+            add_custom_command(TARGET ${TARGET_NAME} POST_BUILD
+                COMMAND "${PYTHON_EXECUTABLE}" "${PROJ_ROOT}/paddle/scripts/cpplint.py"
+                        "--filter=${STYLE_FILTER}"
+                        ${SOURCES_LIST}
+                COMMENT "cpplint: Checking source code style"
+                WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR})        
+        endif()
     endif()
 endmacro()
diff --git a/cmake/flags.cmake b/cmake/flags.cmake
index d00a9bb3a30cfb16623e073414088059481c3e1a..e26d8d9df386e65137aa83cc60a43bfeabf7a4a6 100644
--- a/cmake/flags.cmake
+++ b/cmake/flags.cmake
@@ -115,7 +115,7 @@ set(COMMON_FLAGS
     -Wno-error=literal-suffix
     -Wno-error=sign-compare
     -Wno-error=unused-local-typedefs
-    -Wno-error=parentheses-equality # Warnings in Pybind11
+    -Wno-error=parentheses-equality # Warnings in pybind11
 )
 
 set(GPU_COMMON_FLAGS
@@ -195,6 +195,7 @@ endif()
 # Modern gpu architectures: Pascal
 if (CUDA_VERSION VERSION_GREATER "8.0" OR CUDA_VERSION VERSION_EQUAL "8.0")
       list(APPEND __arch_flags " -gencode arch=compute_60,code=sm_60")
+      list(APPEND CUDA_NVCC_FLAGS --expt-relaxed-constexpr)
 endif()
 
 # Custom gpu architecture
diff --git a/doc/design/mkldnn/README.MD b/doc/design/mkldnn/README.MD
new file mode 100644
index 0000000000000000000000000000000000000000..e956994431fbb43438c56dcd96ad8313cf516090
--- /dev/null
+++ b/doc/design/mkldnn/README.MD
@@ -0,0 +1,110 @@
+# Intel® MKL-DNN on PaddlePaddle: Design Doc
+
+我们计划将Intel深度神经网络数学库(**MKL-DNN**\[[1](#references)\])集成到PaddlePaddle,充分展现英特尔平台的优势,有效提升PaddlePaddle在英特尔架构上的性能。
+
+我们短期内的基本目标是:
+
+- 完成常用layer的MKL-DNN实现。
+- 完成常见深度神经网络VGG,GoogLeNet 和 ResNet的MKL-DNN实现。
+
+
+## Contents
+
+- [Overview](#overview)
+- [Actions](#actions)
+ 	- [CMake](#cmake)
+	- [Layers](#layers)
+	- [Activations](#activations)
+	- [Unit Tests](#unit-tests)
+	- [Protobuf Messages](#protobuf-messages)
+	- [Python API](#python-api)
+	- [Demos](#demos)
+	- [Benchmarking](#benchmarking)
+	- [Others](#others)
+- [Design Concerns](#design-concerns)
+
+## Overview
+
+我们会把MKL-DNN作为第三方库集成进PaddlePaddle,整体框架图
+
+

+Figure 1. PaddlePaddle on IA.
+
 
+
+## Actions
+我们把集成方案大致分为了如下几个方面。
+
+### CMake
+我们会在`CMakeLists.txt`中会添加`WITH_MKLDNN`的选项,当设置这个值为`ON`的时候会启用编译MKL-DNN功能。同时会自动开启OpenMP用于提高MKL-DNN的性能。
+
+同时,我们会引入`WITH_MKLML`选项,用于选择是否使用MKL-DNN自带的MKLML安装包。这个安装包可以独立于MKL-DNN使用,但是建议在开启MKL-DNN的同时也打开MKLML的开关,这样才能发挥最好的性能。
+
+所以,我们会在`cmake/external`目录新建`mkldnn.cmake`和`mklml.cmake`文件,它们会在编译PaddlePaddle的时候下载对应的软件包,并放到PaddlePaddle的third party目录中。
+
+**备注**:当`WITH_MKLML=ON`的时候,会优先使用这个包作为PaddlePaddle的CBLAS和LAPACK库,所以会稍微改动`cmake/cblas.cmake`中的逻辑。
+
+### Layers
+所有MKL-DNN相关的C++ layers,都会按照PaddlePaddle的目录结构存放在
+`paddle/gserver/layers`中,并且文件名都会一以*Mkldnn*开头。
+
+所有MKL-DNN的layers都会继承于一个叫做`MkldnnLayer`的父类,该父类继承于PaddlePaddle的基类`Layer`。
+
+### Activations
+由于在PaddlePaddle中,激活函数是独立于layer概念的,所以会在`paddle/gserver/activations`目录下添加一个`MkldnnActivation.h`文件定义一些用于MKL-DNN的接口,实现方法还是会在`ActivationFunction.cpp`文件。
+
+### Unit Tests
+会在`paddle/gserver/test`目录下添加`test_Mkldnn.cpp`和`MkldnnTester.*`用于MKL-DNN的测试。
+
+Activation的测试,计划在PaddlePaddle原有的测试文件上直接添加新的测试type。
+
+### Protobuf Messages
+根据具体layer的需求可能会在`proto/ModelConfig.proto`里面添加必要的选项。
+
+### Python API
+目前只考虑**v1 API**。
+
+计划在`python/paddle/trainer/config_parser.py`里面添加`use_mkldnn`这个选择,方便用户选择使用MKL-DNN的layers。
+
+具体实现方式比如:
+
+```python
+use_mkldnn = bool(int(g_command_config_args.get("use_mkldnn", 0)))
+if use_mkldnn
+    self.layer_type = mkldnn_*
+```
+
+所有MKL-DNN的layer type会以*mkldnn_*开头,以示区分。 
+
+并且可能在`python/paddle/trainer_config_helper`目录下的`activations.py `和`layers.py`里面添加必要的MKL-DNN的接口。
+
+### Demos
+
+会在`v1_api_demo`目录下添加一个`mkldnn`的文件夹,里面放入一些用于MKL-DNN测试的demo脚本。
+
+### Benchmarking
+会考虑添加部分逻辑在`benchmark/paddle/image/run.sh`,添加使用MKL-DNN的测试。
+
+### Others
+1. 如果在使用MKL-DNN的情况下,会把CPU的Buffer对齐为64。
+2. 深入PaddlePaddle,寻找有没有其他可以优化的可能,进一步优化。比如可能会用OpenMP改进SGD的更新性能。
+
+## Design Concerns
+
+为了更好的符合PaddlePaddle的代码风格\[[2](#references)\],同时又尽可能少的牺牲MKL-DNN的性能\[[3](#references)\]。
+
+我们总结出一些特别需要注意的点:
+
+1. 使用**deviceId_**。为了尽可能少的在父类Layer中添加变量或者函数,我们决定使用已有的`deviceId_`变量来区分layer的属性,定义`-2`为`MkldnnLayer`特有的设备ID。
+2. 重写父类Layer的**init**函数,修改`deviceId_`为`-2`,代表这个layer是用于跑在MKL-DNN的环境下。
+3. 创建`MkldnnMatrix`,用于管理MKL-DNN会用到的相关memory函数、接口以及会用的到格式信息。
+4. 创建`MkldnnBase`,定义一些除了layer和memory相关的类和函数。包括MKL-DNN会用到`MkldnnStream`和`CpuEngine`,和未来可能还会用到`FPGAEngine`等。
+5. 在**Argument**里添加两个`MkldnnMatrixPtr`,取名为`mkldnnValue`和`mkldnnGrad`,用于存放`MkldnnLayer`会用到的memory buffer。 并且添加函数cvt(会修改为一个更加合适的函数名),用于处理"CPU device"和"MKL-DNN device"之间memory的相互转化。
+6. 在父类`Layer`中的`getOutput`函数中添加一段逻辑,用于判断`deviceId`,并针对device在MKL-DNN和CPU之间不统一的情况,做一个前期转换。 也就是调用`Argument`的cvt函数把output统一到需要的device上。
+7. 在原来的`FLAGS`中添加一个`use_mkldnn`的flag,用于选择是否使用MKL-DNN的相关功能。
+
+## References
+
+1. [Intel Math Kernel Library for Deep Neural Networks (Intel MKL-DNN)](https://github.com/01org/mkl-dnn "Intel MKL-DNN")
+2. [原来的方案](https://github.com/PaddlePaddle/Paddle/pull/3096)会引入**nextLayer**的信息。但是在PaddlePaddle中,无论是重构前的layer还是重构后的op,都不会想要知道next layer/op的信息。
+3. MKL-DNN的高性能格式与PaddlePaddle原有的`NCHW`不同(PaddlePaddle中的CUDNN部分使用的也是`NCHW`,所以不存在这个问题),所以需要引入一个转换方法,并且只需要在必要的时候转换这种格式,才能更好的发挥MKL-DNN的性能。
+
diff --git a/doc/design/mkldnn/image/overview.png b/doc/design/mkldnn/image/overview.png
new file mode 100644
index 0000000000000000000000000000000000000000..84b455c28230703599a2529f014cfbb222138fef
Binary files /dev/null and b/doc/design/mkldnn/image/overview.png differ
diff --git a/paddle/framework/operator.h b/paddle/framework/operator.h
index 564db43dfee42d446461872816fb9d1468872b76..5a9b7dd914498626b00f46ba2e31b604bbe7b7c6 100644
--- a/paddle/framework/operator.h
+++ b/paddle/framework/operator.h
@@ -174,7 +174,11 @@ class OperatorContext {
   template 
   T* Output(const size_t index) const {
     auto var = OutputVar(index);
-    PADDLE_ENFORCE(var != nullptr, "Output(%d) should not be nullptr", index);
+    PADDLE_ENFORCE(
+        var != nullptr,
+        "Output(%d) not be nullptr, which means variable [%s] does not "
+        "exist in scope",
+        index, op_.outputs_[index]);
     return var->GetMutable();
   }
 
diff --git a/paddle/gserver/tests/CMakeLists.txt b/paddle/gserver/tests/CMakeLists.txt
index 4546d12a903084e7a746b967c39d67a0ade4c0cd..5511ab6b8bb05108e76cc0913264d864d2fecf5b 100644
--- a/paddle/gserver/tests/CMakeLists.txt
+++ b/paddle/gserver/tests/CMakeLists.txt
@@ -1,10 +1,5 @@
 # gserver pacakge unittests
 
-file(GLOB_RECURSE GSERVER_HEADER RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}" "*.h")
-file(GLOB_RECURSE GSERVER_SOURCES RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}" "*.cpp")
-add_style_check_target(paddle_gserver ${GSERVER_SOURCES})
-add_style_check_target(paddle_gserver ${GSERVER_HEADER})
-
 ################### test_ProtoDataProvider ############
 add_unittest_without_exec(test_ProtoDataProvider
     test_ProtoDataProvider.cpp)
diff --git a/paddle/operators/add_op.cc b/paddle/operators/add_op.cc
index 7fbdd84a391c7d0048fca473f7318561df50daa2..d4c05ed483ca56a31dd8ee4d81b54551ae6da0d1 100644
--- a/paddle/operators/add_op.cc
+++ b/paddle/operators/add_op.cc
@@ -20,8 +20,8 @@ namespace operators {
 class AddOp : public OperatorWithKernel {
  protected:
   void InferShape(const InferShapeContext &ctx) const override {
-    PADDLE_ENFORCE(ctx.InputSize() == 2, "Input size of AddOp must be two");
-    PADDLE_ENFORCE(ctx.OutputSize() == 1, "Output size of AddOp must be one");
+    PADDLE_ENFORCE_EQ(ctx.InputSize(), 2);
+    PADDLE_ENFORCE_EQ(ctx.OutputSize(), 1);
     PADDLE_ENFORCE(ctx.InputVar(0) != nullptr && ctx.InputVar(1) != nullptr,
                    "Inputs of AddOp must all be set");
     PADDLE_ENFORCE(ctx.OutputVar(0) != nullptr,
diff --git a/paddle/operators/mul_op.cc b/paddle/operators/mul_op.cc
index 181660cbacf139864b94e306e66c563b375c7f59..90761f3257b615a4af8b998de17037cc6de9f247 100644
--- a/paddle/operators/mul_op.cc
+++ b/paddle/operators/mul_op.cc
@@ -24,12 +24,16 @@ class MulOp : public OperatorWithKernel {
     PADDLE_ENFORCE(ctx.InputSize() == 2, "The mul op must take two inputs");
     auto dim0 = ctx.Input(0)->dims();
     auto dim1 = ctx.Input(1)->dims();
-    PADDLE_ENFORCE(dim0.size() == 2 && dim1.size() == 2,
-                   "The input of mul op must be matrix");
-    PADDLE_ENFORCE(
-        dim0[1] == dim1[0],
+    PADDLE_ENFORCE_EQ(dim0.size(), 2,
+                      "input X(%s) should be a tensor with 2 dims, a matrix",
+                      ctx.op_.Input("X"));
+    PADDLE_ENFORCE_EQ(dim1.size(), 2,
+                      "input Y(%s) should be a tensor with 2 dims, a matrix",
+                      ctx.op_.Input("Y"));
+    PADDLE_ENFORCE_EQ(
+        dim0[1], dim1[0],
         "First matrix's width must be equal with second matrix's height.");
-    PADDLE_ENFORCE(ctx.OutputSize() == 1, "The mul op must take one output");
+    PADDLE_ENFORCE_EQ(ctx.OutputSize(), 1, "The mul op takes only one output");
     ctx.Output(0)->Resize({dim0[0], dim1[1]});
   }
 };
diff --git a/paddle/operators/recurrent_op.cc b/paddle/operators/recurrent_op.cc
index 389d4323950269b81912a7213ff64872aafb410f..5e9c15ca0e6a7c56611a0fadda6c3c0839f309e6 100644
--- a/paddle/operators/recurrent_op.cc
+++ b/paddle/operators/recurrent_op.cc
@@ -36,6 +36,7 @@ void RecurrentAlgorithm::InferShape(const Scope& scope) const {
   InitMemories(step_scopes[0], true /*infer_shape_mode*/);
   Variable* net = scope.FindVar(arg_->step_net);
   PADDLE_ENFORCE(net != nullptr, "failed to get step net");
+
   for (size_t i = 0; i < seq_len_; i++) {
     if (i > 0) {
       rnn::LinkMemories(step_scopes, arg_->memories, i, -1,
@@ -56,6 +57,7 @@ void RecurrentAlgorithm::Run(const Scope& scope,
   Variable* net = scope.FindVar(arg_->step_net);
 
   for (size_t step_id = 0; step_id < seq_len_; step_id++) {
+    // create output alias variables
     if (step_id > 0) {
       rnn::LinkMemories(step_scopes, arg_->memories, step_id, -1,
                         false /*infer_shape_mode*/);
@@ -67,22 +69,31 @@ void RecurrentAlgorithm::Run(const Scope& scope,
 }
 
 void RecurrentAlgorithm::CreateScopes(const Scope& scope) const {
-  // TODO(xxx) Only two scopes are needed for inference, this case will be
+  // TODO(superjom) Only two scopes are needed for inference, this case will be
   // supported later.
-  auto step_scopes =
-      scope.FindVar(arg_->step_scopes)->GetMutable>();
+  auto step_scopes_var = scope.FindVar(arg_->step_scopes);
+  PADDLE_ENFORCE(step_scopes_var != nullptr, "");
+  auto step_scopes = step_scopes_var->GetMutable>();
+
+  // Now all variables in scope must be created outside of op.
+  auto net_var = scope.FindVar(arg_->step_net);
+  PADDLE_ENFORCE(net_var != nullptr, "no stepnet called %s in scope",
+                 arg_->step_net);
+  auto net_op = net_var->GetMutable();
+  PADDLE_ENFORCE(!net_op->outputs_.empty(), "net_op has no outputs");
 
   if (seq_len_ > step_scopes->size()) {
     for (size_t i = step_scopes->size(); i < seq_len_; ++i) {
       auto& step_scope = scope.NewScope();
 
-      // Now all variables in scope must be created outside of op.
-      auto net_op = scope.FindVar(arg_->step_net)->GetMutable();
+      // create step net's temp inputs
       for (auto& input : net_op->inputs_) {
         // the weight are located in parent scope
-        if (!step_scope.FindVar(input)) step_scope.NewVar(input);
+        if (!step_scope.FindVar(input))
+          step_scope.NewVar(input)->GetMutable();
       }
-      for (auto& output : net_op->outputs_) {
+      // create stepnet's outputs
+      for (const auto& output : net_op->outputs_) {
         step_scope.NewVar(output);
       }
       step_scopes->emplace_back(&step_scope);
@@ -100,6 +111,7 @@ void RecurrentAlgorithm::InitMemories(Scope* step_scope,
     Tensor* boot_mem = step_scope->FindVar(attr.boot_var)->GetMutable();
     if (infer_shape_mode) {
       pre_mem->Resize(boot_mem->dims());
+      PADDLE_ENFORCE_EQ(pre_mem->dims().size(), 2);
     } else {
       pre_mem->ShareDataWith(*boot_mem);
     }
diff --git a/paddle/operators/rnn/recurrent_op_utils.cc b/paddle/operators/rnn/recurrent_op_utils.cc
index 43c97ba29f637828d717ac82516769deff52c7da..32c6c2dd4efa85359b4e95471e8ba09e56afec57 100644
--- a/paddle/operators/rnn/recurrent_op_utils.cc
+++ b/paddle/operators/rnn/recurrent_op_utils.cc
@@ -53,11 +53,13 @@ void ConcatOutputs(const std::vector& step_scopes,
     PADDLE_ENFORCE(output_var != nullptr, "output link [%s] is not in scope.",
                    outlinks[i].external);
     Tensor* output = output_var->GetMutable();
+
     if (infer_shape_mode) {
-      fmw::DDim step_dims = step_scopes[0]
-                                ->FindVar(outlinks[i].internal)
-                                ->GetMutable()
-                                ->dims();
+      auto step_scope_var = step_scopes[0]->FindVar(outlinks[i].internal);
+      PADDLE_ENFORCE(step_scope_var != nullptr, "%s not in scope",
+                     outlinks[i].internal);
+      fmw::DDim step_dims =
+          step_scope_var->template GetMutable()->dims();
       std::vector dims_vec = vectorize(step_dims);
       dims_vec.insert(dims_vec.begin(), seq_len);
       output->Resize(fmw::make_ddim(dims_vec));
@@ -79,14 +81,15 @@ void LinkMemories(const std::vector& scopes,
                   const std::vector& memories,
                   const size_t step_id, const int offset,
                   bool infer_shape_mode) {
-  PADDLE_ENFORCE(step_id < scopes.size(),
-                 "step [%d] is out of range of step scopes' size [%d]", step_id,
-                 scopes.size());
-  PADDLE_ENFORCE(static_cast(step_id) + offset >= 0,
-                 "offset [%d] must be large than -[%d]", offset, step_id);
-  PADDLE_ENFORCE(step_id + offset < scopes.size(),
-                 "offset [%d] is out of range, it must be less than (%d - %d)",
-                 offset, scopes.size(), step_id);
+  PADDLE_ENFORCE_LT(step_id, scopes.size(),
+                    "step [%d] is out of range of step scopes' size [%d]",
+                    step_id, scopes.size());
+  PADDLE_ENFORCE_GE(static_cast(step_id) + offset, 0,
+                    "offset [%d] must be large than -[%d]", offset, step_id);
+  PADDLE_ENFORCE_LT(
+      step_id + offset, scopes.size(),
+      "offset [%d] is out of range, it must be less than (%d - %d)", offset,
+      scopes.size(), step_id);
   auto scope = scopes[step_id];
   auto linked_scope = scopes[step_id + offset];
   for (auto& attr : memories) {
diff --git a/paddle/operators/sigmoid_op.cc b/paddle/operators/sigmoid_op.cc
index 9d201eb93a2c0e34dd8e6869e97b43c4e278596e..1eb795faa858796f7a34aa495b43d043fdb5dd43 100644
--- a/paddle/operators/sigmoid_op.cc
+++ b/paddle/operators/sigmoid_op.cc
@@ -37,10 +37,8 @@ class SigmoidOpMaker : public OpProtoAndCheckerMaker {
 
 class SigmoidOpGrad : public OperatorWithKernel {
  protected:
-  void InferShape(const InferShapeContext &ctx) const override {}
-  std::string DebugString() const override {
-    LOG(INFO) << "SigmoidGrad";
-    return "";
+  void InferShape(const InferShapeContext &ctx) const override {
+    ctx.Output(0)->Resize(ctx.Input(0)->dims());
   }
 };
 
@@ -51,3 +49,5 @@ REGISTER_OP(sigmoid, ops::SigmoidOp, ops::SigmoidOpMaker);
 REGISTER_GRADIENT_OP(sigmoid, sigmoid_grad, ops::SigmoidOpGrad);
 
 REGISTER_OP_CPU_KERNEL(sigmoid, ops::SigmoidKernel);
+REGISTER_OP_CPU_KERNEL(sigmoid_grad,
+                       ops::SigmoidGradKernel);
diff --git a/paddle/operators/sigmoid_op.cu b/paddle/operators/sigmoid_op.cu
index 2123b17e4b5e90c22c2d6e9177f2a8956f8a4ac9..e80ba081f2ff805664cf92f3cb47e9ad51889058 100644
--- a/paddle/operators/sigmoid_op.cu
+++ b/paddle/operators/sigmoid_op.cu
@@ -16,3 +16,5 @@
 #include "paddle/operators/sigmoid_op.h"
 
 REGISTER_OP_GPU_KERNEL(sigmoid, ops::SigmoidKernel);
+REGISTER_OP_GPU_KERNEL(sigmoid_grad,
+                       ops::SigmoidGradKernel);
diff --git a/paddle/operators/sigmoid_op.h b/paddle/operators/sigmoid_op.h
index eb473920a5f866825b52ecb946653ccead7000ea..d513261e74423ce93a50eaaaec1c7d5fadb8f4a8 100644
--- a/paddle/operators/sigmoid_op.h
+++ b/paddle/operators/sigmoid_op.h
@@ -27,6 +27,7 @@ class SigmoidKernel : public OpKernel {
     auto output = context.Output(0);
     output->mutable_data(context.GetPlace());
 
+    // The clipping is used in Paddle's raw implenmention
     auto X = EigenVector::Flatten(*input);
     auto Y = EigenVector::Flatten(*output);
     auto place = context.GetEigenDevice();
@@ -34,5 +35,23 @@ class SigmoidKernel : public OpKernel {
     Y.device(place) = 1.0 / (1.0 + (-1.0 * X).exp());
   }
 };
+
+template 
+class SigmoidGradKernel : public OpKernel {
+ public:
+  void Compute(const ExecutionContext& context) const override {
+    auto Y_t = context.Input("Y");
+    auto dY_t = context.Input(framework::GradVarName("Y"));
+    auto dX_t = context.Output(framework::GradVarName("X"));
+
+    dX_t->mutable_data(context.GetPlace());
+
+    auto dX = EigenVector::Flatten(*dX_t);
+    auto Y = EigenVector::Flatten(*Y_t);
+    auto dY = EigenVector::Flatten(*dY_t);
+    dX.device(context.GetEigenDevice()) = dY * Y * (1. - Y);
+  }
+};
+
 }  // namespace operators
 }  // namespace paddle
diff --git a/python/paddle/v2/framework/tests/test_recurrent_op.py b/python/paddle/v2/framework/tests/test_recurrent_op.py
index 0457e3f16a709140180ce433c1d56d146f0b6974..5c77c477b347f4713e4af2a8cb462b243d7a779c 100644
--- a/python/paddle/v2/framework/tests/test_recurrent_op.py
+++ b/python/paddle/v2/framework/tests/test_recurrent_op.py
@@ -1,3 +1,4 @@
+import logging
 import paddle.v2.framework.core as core
 import unittest
 import numpy as np
@@ -7,10 +8,9 @@ ops = creation.op_creations
 
 
 def create_tensor(scope, name, shape):
-    tensor = scope.create_var(name).get_tensor()
+    tensor = scope.new_var(name).get_tensor()
     tensor.set_dims(shape)
-    tensor.alloc_float()
-    tensor.set(np.random.random(shape))
+    tensor.set(np.random.random(shape), core.CPUPlace())
     return tensor
 
 
@@ -31,40 +31,36 @@ class TestRNN(unittest.TestCase):
         - h
     '''
 
+    input_dim = 30
+    batch_size = 50
+    weight_dim = 15
+    sent_len = 11
+
     def init(self):
-        input_dim = 30
-        batch_size = 50
-        weight_dim = 15
-
-        self.scope = core.Scope(None)
-
-        # create vars
-        create_tensor(self.scope, "x", [batch_size, input_dim])
-        create_tensor(self.scope, "W", [input_dim, weight_dim])
-        create_tensor(self.scope, "U", [weight_dim, weight_dim])
-        create_tensor(self.scope, "h_boot", [batch_size, weight_dim])
-
-        x_alias = "x@alias"
-        y_alias = "y@alias"
-        memory = "h@alias"
-        prememory = "h@pre"
-        output = "rnn_out"
-        output_alias = "rnn_out@alias"
-
-        # create step net
-        stepnet_var = self.scope.create_var("stepnet")
-        stepnet = stepnet_var.get_net()
-        # stepnet = core.Net.create()
-        x_fc_op = ops.fc(X=x_alias, W="W", Y="Wx")
-        h_fc_op = ops.fc(X=prememory, W="U", Y="Uh")
-        sum_op = ops.add_two(X="Wx", Y="Uh", Out="sum")
-        sig_op = ops.sigmoid(X="sum", Y=memory)
-        stepnet.add_op(x_fc_op)
-        stepnet.add_op(h_fc_op)
-        stepnet.add_op(sum_op)
-        stepnet.add_op(sig_op)
-        stepnet.complete_add_op(True)
 
+        self.scope = core.Scope()
+
+        self.create_global_variables()
+        self.create_step_net()
+        rnn_op = self.create_rnn_op()
+        ctx = core.DeviceContext.create(core.CPUPlace())
+        print 'infer_shape'
+        rnn_op.infer_shape(self.scope)
+
+        rnn_op.run(self.scope, ctx)
+
+    def create_global_variables(self):
+        # create inlink
+        create_tensor(self.scope, "x",
+                      [self.sent_len, self.batch_size, self.input_dim])
+        create_tensor(self.scope, "W", [self.input_dim, self.input_dim])
+        create_tensor(self.scope, "U", [self.input_dim, self.input_dim])
+        create_tensor(self.scope, "h_boot", [self.batch_size, self.input_dim])
+        self.scope.new_var("step_scopes")
+        self.scope.new_var("h@alias")
+        self.scope.new_var("h")
+
+    def create_rnn_op(self):
         # create RNNOp
         rnnop = ops.recurrent_op(
             # inputs
@@ -72,17 +68,27 @@ class TestRNN(unittest.TestCase):
             boot_memories=["h_boot"],
             step_net="stepnet",
             # outputs
-            outlinks=[output],
+            outlinks=["h"],
             step_scopes="step_scopes",
             # attributes
             inlink_alias=["x@alias"],
-            outlink_alias=[output_alias],
-            pre_memories=[prememory],
-            memories=[memory])
+            outlink_alias=["h@alias"],
+            pre_memories=["h@pre"],
+            memories=["h@alias"])
+        return rnnop
+
+    def create_step_net(self):
+        var = self.scope.new_var("stepnet")
+        stepnet = var.get_net()
 
-        ctx = core.DeviceContext.cpu_context()
-        rnnop.infer_shape(self.scope)
-        rnnop.run(self.scope, ctx)
+        x_fc_op = ops.fc(X="x@alias", W="W", Y="Wx")
+        h_fc_op = ops.fc(X="h@pre", W="U", Y="Uh")
+        sum_op = ops.add_two(X="Wx", Y="Uh", Out="sum")
+        sig_op = ops.sigmoid(X="sum", Y="h@alias")
+
+        for op in [x_fc_op, h_fc_op, sum_op, sig_op]:
+            stepnet.add_op(op)
+        stepnet.complete_add_op(True)
 
     def test_recurrent(self):
         self.init()
diff --git a/python/paddle/v2/framework/tests/test_sigmoid_op.py b/python/paddle/v2/framework/tests/test_sigmoid_op.py
index 2610bcf16303d492dce3ce63c93b54b0c88f6bba..2a57a41ed8b718fd420062ba68e853a4861b7359 100644
--- a/python/paddle/v2/framework/tests/test_sigmoid_op.py
+++ b/python/paddle/v2/framework/tests/test_sigmoid_op.py
@@ -12,5 +12,8 @@ class TestSigmoidOp(unittest.TestCase):
         self.outputs = {'Y': 1 / (1 + np.exp(-self.inputs['X']))}
 
 
+#class TestSigmoidGradOp(unittest.TestCase):
+#TODO(qingqing) add unit test
+
 if __name__ == '__main__':
     unittest.main()