diff --git a/CMakeLists.txt b/CMakeLists.txt
index c7d743e193e7d32dbc0b56f3bcb05b6c61f85f1d..b174831109372cb014741d63032fa6a470e74042 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -36,8 +36,8 @@ include(simd)
################################ Configurations #######################################
option(WITH_GPU "Compile PaddlePaddle with NVIDIA GPU" ${CUDA_FOUND})
option(WITH_AVX "Compile PaddlePaddle with AVX intrinsics" ${AVX_FOUND})
-option(WITH_MKLDNN "Compile PaddlePaddle with mkl-dnn support." OFF)
-option(WITH_MKLML "Compile PaddlePaddle with mklml package." OFF)
+option(WITH_MKLDNN "Compile PaddlePaddle with mkl-dnn support." ${AVX_FOUND})
+option(WITH_MKLML "Compile PaddlePaddle with mklml package." ${AVX_FOUND})
option(WITH_DSO "Compile PaddlePaddle with dynamic linked CUDA" ON)
option(WITH_TESTING "Compile PaddlePaddle with unit testing" ON)
option(WITH_SWIG_PY "Compile PaddlePaddle with inference api" ON)
diff --git a/cmake/configure.cmake b/cmake/configure.cmake
index 69220e03fe8e337205f31cb1f45e3e19ae4f5d1e..2ac098954647d37e26ac2499e0675dae39910edc 100644
--- a/cmake/configure.cmake
+++ b/cmake/configure.cmake
@@ -74,8 +74,6 @@ if(WITH_MKLDNN)
set(OPENMP_FLAGS "-fopenmp")
set(CMAKE_C_CREATE_SHARED_LIBRARY_FORBIDDEN_FLAGS ${OPENMP_FLAGS})
set(CMAKE_CXX_CREATE_SHARED_LIBRARY_FORBIDDEN_FLAGS ${OPENMP_FLAGS})
- set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -L${MKLDNN_IOMP_DIR} -liomp5 -Wl,--as-needed")
- set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -L${MKLDNN_IOMP_DIR} -liomp5 -Wl,--as-needed")
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${OPENMP_FLAGS}")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${OPENMP_FLAGS}")
else()
diff --git a/cmake/cpplint.cmake b/cmake/cpplint.cmake
index e50530411cc74392091c8026fa012ec7631f7f6b..5184f0815faac005b3dff1015395235f4e19d65b 100644
--- a/cmake/cpplint.cmake
+++ b/cmake/cpplint.cmake
@@ -42,29 +42,21 @@ macro(add_style_check_target TARGET_NAME)
if(WITH_STYLE_CHECK)
set(SOURCES_LIST ${ARGN})
list(REMOVE_DUPLICATES SOURCES_LIST)
- list(SORT SOURCES_LIST)
-
foreach(filename ${SOURCES_LIST})
- set(LINT ON)
foreach(pattern ${IGNORE_PATTERN})
if(filename MATCHES ${pattern})
- message(STATUS "DROP LINT ${filename}")
- set(LINT OFF)
+ list(REMOVE_ITEM SOURCES_LIST ${filename})
endif()
endforeach()
- if(LINT MATCHES ON)
- # cpplint code style
- get_filename_component(base_filename ${filename} NAME)
- set(CUR_GEN ${CMAKE_CURRENT_BINARY_DIR}/${base_filename}.cpplint)
- add_custom_command(OUTPUT ${CUR_GEN} PRE_BUILD
- COMMAND "${PYTHON_EXECUTABLE}" "${PROJ_ROOT}/paddle/scripts/cpplint.py"
- "--filter=${STYLE_FILTER}"
- "--write-success=${CUR_GEN}" ${filename}
- DEPENDS ${filename} ${PROJ_ROOT}/paddle/scripts/cpplint.py
- WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR})
- add_custom_target(${base_filename}.cpplint DEPENDS ${CUR_GEN})
- add_dependencies(${TARGET_NAME} ${base_filename}.cpplint)
- endif()
endforeach()
+
+ if(SOURCES_LIST)
+ add_custom_command(TARGET ${TARGET_NAME} POST_BUILD
+ COMMAND "${PYTHON_EXECUTABLE}" "${PROJ_ROOT}/paddle/scripts/cpplint.py"
+ "--filter=${STYLE_FILTER}"
+ ${SOURCES_LIST}
+ COMMENT "cpplint: Checking source code style"
+ WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR})
+ endif()
endif()
endmacro()
diff --git a/cmake/external/any.cmake b/cmake/external/any.cmake
index 45e3764e8482a4cfc8ee72fe4d79f04a3c9b74fa..5d2f7219b2007493916a39e839d647a9d0046c9f 100644
--- a/cmake/external/any.cmake
+++ b/cmake/external/any.cmake
@@ -7,7 +7,7 @@ INCLUDE_DIRECTORIES(${ANY_SOURCE_DIR}/src/extern_lib_any)
ExternalProject_Add(
extern_lib_any
${EXTERNAL_PROJECT_LOG_ARGS}
- GIT_REPOSITORY "https://github.com/thelink2012/any.git"
+ GIT_REPOSITORY "https://github.com/PaddlePaddle/any.git"
GIT_TAG "8fef1e93710a0edf8d7658999e284a1142c4c020"
PREFIX ${ANY_SOURCE_DIR}
UPDATE_COMMAND ""
diff --git a/cmake/external/gflags.cmake b/cmake/external/gflags.cmake
index a0d0a892c4b3cc3743ac725f3cd90444f18abf34..16e5bef4cdb8d6513de51838e3c3c8398dbad60d 100644
--- a/cmake/external/gflags.cmake
+++ b/cmake/external/gflags.cmake
@@ -28,7 +28,14 @@ INCLUDE_DIRECTORIES(${GFLAGS_INCLUDE_DIR})
ExternalProject_Add(
extern_gflags
${EXTERNAL_PROJECT_LOG_ARGS}
- GIT_REPOSITORY "https://github.com/gflags/gflags.git"
+ # TODO(yiwang): The annoying warnings mentioned in
+ # https://github.com/PaddlePaddle/Paddle/issues/3277 are caused by
+ # gflags. I fired a PR https://github.com/gflags/gflags/pull/230
+ # to fix it. Before it gets accepted by the gflags team, we use
+ # my personal fork, which contains above fix, temporarily. Let's
+ # change this back to the official Github repo once my PR is
+ # merged.
+ GIT_REPOSITORY "https://github.com/wangkuiyi/gflags.git"
PREFIX ${GFLAGS_SOURCES_DIR}
UPDATE_COMMAND ""
CMAKE_ARGS -DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER}
diff --git a/cmake/external/openblas.cmake b/cmake/external/openblas.cmake
index 60a1041936437775e0994157b8ffcb7c52b7ab87..db09232c0e69016bf18c1d981e4620e9e804ff7c 100644
--- a/cmake/external/openblas.cmake
+++ b/cmake/external/openblas.cmake
@@ -69,8 +69,13 @@ ENDIF(NOT ${CBLAS_FOUND})
MESSAGE(STATUS "BLAS library: ${CBLAS_LIBRARIES}")
INCLUDE_DIRECTORIES(${CBLAS_INC_DIR})
-ADD_LIBRARY(cblas STATIC IMPORTED)
-SET_PROPERTY(TARGET cblas PROPERTY IMPORTED_LOCATION ${CBLAS_LIBRARIES})
+# FIXME(gangliao): generate cblas target to track all high performance
+# linear algebra libraries for cc_library(xxx SRCS xxx.c DEPS cblas)
+SET(dummyfile ${CMAKE_CURRENT_BINARY_DIR}/cblas_dummy.c)
+FILE(WRITE ${dummyfile} "const char * dummy = \"${dummyfile}\";")
+ADD_LIBRARY(cblas STATIC ${dummyfile})
+TARGET_LINK_LIBRARIES(cblas ${CBLAS_LIBRARIES})
+
IF(NOT ${CBLAS_FOUND})
ADD_DEPENDENCIES(cblas extern_openblas)
LIST(APPEND external_project_dependencies cblas)
diff --git a/cmake/flags.cmake b/cmake/flags.cmake
index d00a9bb3a30cfb16623e073414088059481c3e1a..e26d8d9df386e65137aa83cc60a43bfeabf7a4a6 100644
--- a/cmake/flags.cmake
+++ b/cmake/flags.cmake
@@ -115,7 +115,7 @@ set(COMMON_FLAGS
-Wno-error=literal-suffix
-Wno-error=sign-compare
-Wno-error=unused-local-typedefs
- -Wno-error=parentheses-equality # Warnings in Pybind11
+ -Wno-error=parentheses-equality # Warnings in pybind11
)
set(GPU_COMMON_FLAGS
@@ -195,6 +195,7 @@ endif()
# Modern gpu architectures: Pascal
if (CUDA_VERSION VERSION_GREATER "8.0" OR CUDA_VERSION VERSION_EQUAL "8.0")
list(APPEND __arch_flags " -gencode arch=compute_60,code=sm_60")
+ list(APPEND CUDA_NVCC_FLAGS --expt-relaxed-constexpr)
endif()
# Custom gpu architecture
diff --git a/cmake/generic.cmake b/cmake/generic.cmake
index 41b9b5928958ae31799c396a8d77fd7cff557905..957c20bcf603f2f264b4658f63ac0eec438f12b1 100644
--- a/cmake/generic.cmake
+++ b/cmake/generic.cmake
@@ -403,3 +403,16 @@ function(py_proto_compile TARGET_NAME)
protobuf_generate_python(py_srcs ${py_proto_compile_SRCS})
add_custom_target(${TARGET_NAME} ALL DEPENDS ${py_srcs})
endfunction()
+
+function(py_test TARGET_NAME)
+ if(WITH_TESTING)
+ set(options STATIC static SHARED shared)
+ set(oneValueArgs "")
+ set(multiValueArgs SRCS DEPS)
+ cmake_parse_arguments(py_test "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN})
+ add_test(NAME ${TARGET_NAME}
+ COMMAND env PYTHONPATH=${PADDLE_PYTHON_PACKAGE_DIR}
+ python2 ${py_test_SRCS}
+ WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR})
+ endif()
+endfunction()
diff --git a/doc/design/mkldnn/README.MD b/doc/design/mkldnn/README.MD
new file mode 100644
index 0000000000000000000000000000000000000000..e956994431fbb43438c56dcd96ad8313cf516090
--- /dev/null
+++ b/doc/design/mkldnn/README.MD
@@ -0,0 +1,110 @@
+# Intel® MKL-DNN on PaddlePaddle: Design Doc
+
+我们计划将Intel深度神经网络数学库(**MKL-DNN**\[[1](#references)\])集成到PaddlePaddle,充分展现英特尔平台的优势,有效提升PaddlePaddle在英特尔架构上的性能。
+
+我们短期内的基本目标是:
+
+- 完成常用layer的MKL-DNN实现。
+- 完成常见深度神经网络VGG,GoogLeNet 和 ResNet的MKL-DNN实现。
+
+
+## Contents
+
+- [Overview](#overview)
+- [Actions](#actions)
+ - [CMake](#cmake)
+ - [Layers](#layers)
+ - [Activations](#activations)
+ - [Unit Tests](#unit-tests)
+ - [Protobuf Messages](#protobuf-messages)
+ - [Python API](#python-api)
+ - [Demos](#demos)
+ - [Benchmarking](#benchmarking)
+ - [Others](#others)
+- [Design Concerns](#design-concerns)
+
+## Overview
+
+我们会把MKL-DNN作为第三方库集成进PaddlePaddle,整体框架图
+
+
+Figure 1. PaddlePaddle on IA.
+
+
+## Actions
+我们把集成方案大致分为了如下几个方面。
+
+### CMake
+我们会在`CMakeLists.txt`中会添加`WITH_MKLDNN`的选项,当设置这个值为`ON`的时候会启用编译MKL-DNN功能。同时会自动开启OpenMP用于提高MKL-DNN的性能。
+
+同时,我们会引入`WITH_MKLML`选项,用于选择是否使用MKL-DNN自带的MKLML安装包。这个安装包可以独立于MKL-DNN使用,但是建议在开启MKL-DNN的同时也打开MKLML的开关,这样才能发挥最好的性能。
+
+所以,我们会在`cmake/external`目录新建`mkldnn.cmake`和`mklml.cmake`文件,它们会在编译PaddlePaddle的时候下载对应的软件包,并放到PaddlePaddle的third party目录中。
+
+**备注**:当`WITH_MKLML=ON`的时候,会优先使用这个包作为PaddlePaddle的CBLAS和LAPACK库,所以会稍微改动`cmake/cblas.cmake`中的逻辑。
+
+### Layers
+所有MKL-DNN相关的C++ layers,都会按照PaddlePaddle的目录结构存放在
+`paddle/gserver/layers`中,并且文件名都会一以*Mkldnn*开头。
+
+所有MKL-DNN的layers都会继承于一个叫做`MkldnnLayer`的父类,该父类继承于PaddlePaddle的基类`Layer`。
+
+### Activations
+由于在PaddlePaddle中,激活函数是独立于layer概念的,所以会在`paddle/gserver/activations`目录下添加一个`MkldnnActivation.h`文件定义一些用于MKL-DNN的接口,实现方法还是会在`ActivationFunction.cpp`文件。
+
+### Unit Tests
+会在`paddle/gserver/test`目录下添加`test_Mkldnn.cpp`和`MkldnnTester.*`用于MKL-DNN的测试。
+
+Activation的测试,计划在PaddlePaddle原有的测试文件上直接添加新的测试type。
+
+### Protobuf Messages
+根据具体layer的需求可能会在`proto/ModelConfig.proto`里面添加必要的选项。
+
+### Python API
+目前只考虑**v1 API**。
+
+计划在`python/paddle/trainer/config_parser.py`里面添加`use_mkldnn`这个选择,方便用户选择使用MKL-DNN的layers。
+
+具体实现方式比如:
+
+```python
+use_mkldnn = bool(int(g_command_config_args.get("use_mkldnn", 0)))
+if use_mkldnn
+ self.layer_type = mkldnn_*
+```
+
+所有MKL-DNN的layer type会以*mkldnn_*开头,以示区分。
+
+并且可能在`python/paddle/trainer_config_helper`目录下的`activations.py `和`layers.py`里面添加必要的MKL-DNN的接口。
+
+### Demos
+
+会在`v1_api_demo`目录下添加一个`mkldnn`的文件夹,里面放入一些用于MKL-DNN测试的demo脚本。
+
+### Benchmarking
+会考虑添加部分逻辑在`benchmark/paddle/image/run.sh`,添加使用MKL-DNN的测试。
+
+### Others
+1. 如果在使用MKL-DNN的情况下,会把CPU的Buffer对齐为64。
+2. 深入PaddlePaddle,寻找有没有其他可以优化的可能,进一步优化。比如可能会用OpenMP改进SGD的更新性能。
+
+## Design Concerns
+
+为了更好的符合PaddlePaddle的代码风格\[[2](#references)\],同时又尽可能少的牺牲MKL-DNN的性能\[[3](#references)\]。
+
+我们总结出一些特别需要注意的点:
+
+1. 使用**deviceId_**。为了尽可能少的在父类Layer中添加变量或者函数,我们决定使用已有的`deviceId_`变量来区分layer的属性,定义`-2`为`MkldnnLayer`特有的设备ID。
+2. 重写父类Layer的**init**函数,修改`deviceId_`为`-2`,代表这个layer是用于跑在MKL-DNN的环境下。
+3. 创建`MkldnnMatrix`,用于管理MKL-DNN会用到的相关memory函数、接口以及会用的到格式信息。
+4. 创建`MkldnnBase`,定义一些除了layer和memory相关的类和函数。包括MKL-DNN会用到`MkldnnStream`和`CpuEngine`,和未来可能还会用到`FPGAEngine`等。
+5. 在**Argument**里添加两个`MkldnnMatrixPtr`,取名为`mkldnnValue`和`mkldnnGrad`,用于存放`MkldnnLayer`会用到的memory buffer。 并且添加函数cvt(会修改为一个更加合适的函数名),用于处理"CPU device"和"MKL-DNN device"之间memory的相互转化。
+6. 在父类`Layer`中的`getOutput`函数中添加一段逻辑,用于判断`deviceId`,并针对device在MKL-DNN和CPU之间不统一的情况,做一个前期转换。 也就是调用`Argument`的cvt函数把output统一到需要的device上。
+7. 在原来的`FLAGS`中添加一个`use_mkldnn`的flag,用于选择是否使用MKL-DNN的相关功能。
+
+## References
+
+1. [Intel Math Kernel Library for Deep Neural Networks (Intel MKL-DNN)](https://github.com/01org/mkl-dnn "Intel MKL-DNN")
+2. [原来的方案](https://github.com/PaddlePaddle/Paddle/pull/3096)会引入**nextLayer**的信息。但是在PaddlePaddle中,无论是重构前的layer还是重构后的op,都不会想要知道next layer/op的信息。
+3. MKL-DNN的高性能格式与PaddlePaddle原有的`NCHW`不同(PaddlePaddle中的CUDNN部分使用的也是`NCHW`,所以不存在这个问题),所以需要引入一个转换方法,并且只需要在必要的时候转换这种格式,才能更好的发挥MKL-DNN的性能。
+
diff --git a/doc/design/mkldnn/image/overview.png b/doc/design/mkldnn/image/overview.png
new file mode 100644
index 0000000000000000000000000000000000000000..84b455c28230703599a2529f014cfbb222138fef
Binary files /dev/null and b/doc/design/mkldnn/image/overview.png differ
diff --git a/paddle/api/test/CMakeLists.txt b/paddle/api/test/CMakeLists.txt
index f3b1c2c4d438b5d3e776ef27ce8f8b78f710f2ab..761aeb5b174105edece8880a9f5012c13a63fd11 100644
--- a/paddle/api/test/CMakeLists.txt
+++ b/paddle/api/test/CMakeLists.txt
@@ -1,2 +1,6 @@
-add_python_test(test_swig_api
- testArguments.py testGradientMachine.py testMatrix.py testVector.py testTrain.py testTrainer.py)
+py_test(testTrain SRCS testTrain.py)
+py_test(testMatrix SRCS testMatrix.py)
+py_test(testVector SRCS testVector.py)
+py_test(testTrainer SRCS testTrainer.py)
+py_test(testArguments SRCS testArguments.py)
+py_test(testGradientMachine SRCS testGradientMachine.py)
diff --git a/paddle/framework/operator.cc b/paddle/framework/operator.cc
index d3fbb1e96199b87887cd313d3cf31a230c9b8a34..10ed1f9e32201d887b10ee7ccab8f44573ed1290 100644
--- a/paddle/framework/operator.cc
+++ b/paddle/framework/operator.cc
@@ -22,14 +22,14 @@ namespace framework {
template <>
Eigen::DefaultDevice& ExecutionContext::GetEigenDevice<
platform::CPUPlace, Eigen::DefaultDevice>() const {
- return *device_context_.get_eigen_device();
+ return *device_context_->get_eigen_device();
}
#ifndef PADDLE_ONLY_CPU
template <>
Eigen::GpuDevice&
ExecutionContext::GetEigenDevice() const {
- return *device_context_.get_eigen_device();
+ return *device_context_->get_eigen_device();
}
#endif
diff --git a/paddle/framework/operator.h b/paddle/framework/operator.h
index 02707b8a9996fca80c96d76240e5e89cbb6f451e..9672492d1c2c6a82c37e0a840a4ca9c111de06d8 100644
--- a/paddle/framework/operator.h
+++ b/paddle/framework/operator.h
@@ -174,7 +174,11 @@ class OperatorContext {
template
T* Output(const size_t index) const {
auto var = OutputVar(index);
- PADDLE_ENFORCE(var != nullptr, "Output(%d) should not be nullptr", index);
+ PADDLE_ENFORCE(
+ var != nullptr,
+ "Output(%d) not be nullptr, which means variable [%s] does not "
+ "exist in scope",
+ index, op_.outputs_[index]);
return var->GetMutable();
}
@@ -252,7 +256,7 @@ struct EigenDeviceConverter {
class ExecutionContext : public OperatorContext {
public:
ExecutionContext(const OperatorBase* op, const Scope& scope,
- const platform::DeviceContext& device_context)
+ const platform::DeviceContext* device_context)
: OperatorContext(op, scope), device_context_(device_context) {}
template GetPlace(); }
- const platform::DeviceContext& device_context_;
+ const platform::DeviceContext* device_context_;
};
class OpKernel {
@@ -311,7 +315,7 @@ class OperatorWithKernel : public OperatorBase {
void Run(const Scope& scope,
const platform::DeviceContext& dev_ctx) const final {
auto& opKernel = AllOpKernels().at(type_).at(OpKernelKey(dev_ctx));
- opKernel->Compute(ExecutionContext(this, scope, dev_ctx));
+ opKernel->Compute(ExecutionContext(this, scope, &dev_ctx));
}
static std::unordered_map&
diff --git a/paddle/function/nnpack/NNPACKConvOp.cpp b/paddle/function/nnpack/NNPACKConvOp.cpp
index f0ec77a5d00333993427fb8d0bc938c884e50c95..00d048eb216baf37c875c870a31cfd55a97f2974 100644
--- a/paddle/function/nnpack/NNPACKConvOp.cpp
+++ b/paddle/function/nnpack/NNPACKConvOp.cpp
@@ -49,9 +49,7 @@ class NNPACKConvFunction : public ConvFunctionBase {
public:
void init(const FuncConfig& config) override {
ConvFunctionBase::init(config);
- CHECK_EQ(groups_, (size_t)1);
algorithm_ = get_nnp_convolution_algorithm(config.get("algo"));
- // algorithm_ = nnp_convolution_algorithm_auto;
transform_strategy_ = nnp_convolution_transform_strategy_compute;
nnp_status status = nnp_initialize();
CHECK_EQ(status, nnp_status_success);
@@ -67,8 +65,7 @@ public:
}
}
- virtual void check(const BufferArgs& inputs,
- const BufferArgs& outputs) override {
+ void check(const BufferArgs& inputs, const BufferArgs& outputs) override {
const TensorShape& input = inputs[0].shape();
const TensorShape& filter = inputs[1].shape();
const TensorShape& output = outputs[0].shape();
@@ -91,8 +88,8 @@ public:
size_t filterHeight = getFilterHeight(filter);
size_t filterWidth = getFilterWidth(filter);
size_t outputChannels = output[1];
- // size_t outputHeight = output[2];
- // size_t outputWidth = output[3];
+ size_t outputHeight = output[2];
+ size_t outputWidth = output[3];
nnp_size inputSize = {.width = inputWidth, .height = inputHeight};
nnp_padding padding = {.top = (size_t)paddingH(),
@@ -171,49 +168,58 @@ public:
}
}
+ size_t inputOffset = inputChannels / groups_ * inputHeight * inputWidth;
+ size_t outputOffset = outputChannels / groups_ * outputHeight * outputWidth;
+ size_t filterOffset = filter.getElements() / groups_;
+
if (batchSize == 1) {
- nnp_status status =
- nnp_convolution_inference(algorithm_,
- transform_strategy_,
- inputChannels,
- outputChannels,
- inputSize,
- padding,
- kernelSize,
- outputSubsampling,
- inputData,
- filterData,
- nullptr, /* bias */
- outputData,
- bufferPtr,
- sizePtr,
- nnp_activation_identity,
- nullptr,
- threadpool_, /* threadpool */
- nullptr);
- CHECK_EQ(status, nnp_status_success);
+ for (size_t g = 0; g < groups_; g++) {
+ nnp_status status =
+ nnp_convolution_inference(algorithm_,
+ transform_strategy_,
+ inputChannels / groups_,
+ outputChannels / groups_,
+ inputSize,
+ padding,
+ kernelSize,
+ outputSubsampling,
+ inputData + inputOffset * g,
+ filterData + filterOffset * g,
+ nullptr, /* bias */
+ outputData + outputOffset * g,
+ bufferPtr,
+ sizePtr,
+ nnp_activation_identity,
+ nullptr,
+ threadpool_, /* threadpool */
+ nullptr);
+ CHECK_EQ(status, nnp_status_success);
+ }
} else {
- // only supports stride = 1
- CHECK_EQ(strideH(), 1);
- CHECK_EQ(strideW(), 1);
- nnp_status status = nnp_convolution_output(algorithm_,
- batchSize,
- inputChannels,
- outputChannels,
- inputSize,
- padding,
- kernelSize,
- inputData,
- filterData,
- nullptr, /* bias */
- outputData,
- bufferPtr,
- sizePtr,
- nnp_activation_identity,
- nullptr,
- threadpool_, /* threadpool */
- nullptr);
- CHECK_EQ(status, nnp_status_success);
+ for (size_t g = 0; g < groups_; g++) {
+ // only supports stride = 1
+ CHECK_EQ(strideH(), 1);
+ CHECK_EQ(strideW(), 1);
+ nnp_status status =
+ nnp_convolution_output(algorithm_,
+ batchSize,
+ inputChannels / groups_,
+ outputChannels / groups_,
+ inputSize,
+ padding,
+ kernelSize,
+ inputData + inputOffset * g,
+ filterData + filterOffset * g,
+ nullptr, /* bias */
+ outputData + outputOffset * g,
+ bufferPtr,
+ sizePtr,
+ nnp_activation_identity,
+ nullptr,
+ threadpool_, /* threadpool */
+ nullptr);
+ CHECK_EQ(status, nnp_status_success);
+ }
}
}
diff --git a/paddle/gserver/layers/ExpandConvLayer.cpp b/paddle/gserver/layers/ExpandConvLayer.cpp
index 783e02e47cb91e28eb88b079f1e94439d34fa775..0ece2799318ea5ecc91f97f71289d4d07246dcaa 100644
--- a/paddle/gserver/layers/ExpandConvLayer.cpp
+++ b/paddle/gserver/layers/ExpandConvLayer.cpp
@@ -57,8 +57,7 @@ bool ExpandConvLayer::init(const LayerMap &layerMap,
convGradFilterType = "GemmConvGradFilter";
}
- if (FLAGS_use_nnpack) {
- CHECK_EQ(isDeconv_, false);
+ if (FLAGS_use_nnpack && !isDeconv_) {
createFunction(forward_,
"NNPACKConv",
FuncConfig()
diff --git a/paddle/gserver/tests/CMakeLists.txt b/paddle/gserver/tests/CMakeLists.txt
index 4546d12a903084e7a746b967c39d67a0ade4c0cd..5511ab6b8bb05108e76cc0913264d864d2fecf5b 100644
--- a/paddle/gserver/tests/CMakeLists.txt
+++ b/paddle/gserver/tests/CMakeLists.txt
@@ -1,10 +1,5 @@
# gserver pacakge unittests
-file(GLOB_RECURSE GSERVER_HEADER RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}" "*.h")
-file(GLOB_RECURSE GSERVER_SOURCES RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}" "*.cpp")
-add_style_check_target(paddle_gserver ${GSERVER_SOURCES})
-add_style_check_target(paddle_gserver ${GSERVER_HEADER})
-
################### test_ProtoDataProvider ############
add_unittest_without_exec(test_ProtoDataProvider
test_ProtoDataProvider.cpp)
diff --git a/paddle/operators/add_op.cc b/paddle/operators/add_op.cc
index 7fbdd84a391c7d0048fca473f7318561df50daa2..d4c05ed483ca56a31dd8ee4d81b54551ae6da0d1 100644
--- a/paddle/operators/add_op.cc
+++ b/paddle/operators/add_op.cc
@@ -20,8 +20,8 @@ namespace operators {
class AddOp : public OperatorWithKernel {
protected:
void InferShape(const InferShapeContext &ctx) const override {
- PADDLE_ENFORCE(ctx.InputSize() == 2, "Input size of AddOp must be two");
- PADDLE_ENFORCE(ctx.OutputSize() == 1, "Output size of AddOp must be one");
+ PADDLE_ENFORCE_EQ(ctx.InputSize(), 2);
+ PADDLE_ENFORCE_EQ(ctx.OutputSize(), 1);
PADDLE_ENFORCE(ctx.InputVar(0) != nullptr && ctx.InputVar(1) != nullptr,
"Inputs of AddOp must all be set");
PADDLE_ENFORCE(ctx.OutputVar(0) != nullptr,
diff --git a/paddle/operators/mul_op.cc b/paddle/operators/mul_op.cc
index f41e95e9db494109925fb600ec6bbd47edf6cc74..ccab9a994cc7aa9e389bd259e4c7365a06e93aa1 100644
--- a/paddle/operators/mul_op.cc
+++ b/paddle/operators/mul_op.cc
@@ -23,12 +23,16 @@ class MulOp : public OperatorWithKernel {
PADDLE_ENFORCE(ctx.InputSize() == 2, "The mul op must take two inputs");
auto dim0 = ctx.Input(0)->dims();
auto dim1 = ctx.Input(1)->dims();
- PADDLE_ENFORCE(dim0.size() == 2 && dim1.size() == 2,
- "The input of mul op must be matrix");
- PADDLE_ENFORCE(
- dim0[1] == dim1[0],
+ PADDLE_ENFORCE_EQ(dim0.size(), 2,
+ "input X(%s) should be a tensor with 2 dims, a matrix",
+ ctx.op_.Input("X"));
+ PADDLE_ENFORCE_EQ(dim1.size(), 2,
+ "input Y(%s) should be a tensor with 2 dims, a matrix",
+ ctx.op_.Input("Y"));
+ PADDLE_ENFORCE_EQ(
+ dim0[1], dim1[0],
"First matrix's width must be equal with second matrix's height.");
- PADDLE_ENFORCE(ctx.OutputSize() == 1, "The mul op must take one output");
+ PADDLE_ENFORCE_EQ(ctx.OutputSize(), 1, "The mul op takes only one output");
ctx.Output(0)->Resize({dim0[0], dim1[1]});
}
};
diff --git a/paddle/operators/recurrent_op.cc b/paddle/operators/recurrent_op.cc
index 389d4323950269b81912a7213ff64872aafb410f..5e9c15ca0e6a7c56611a0fadda6c3c0839f309e6 100644
--- a/paddle/operators/recurrent_op.cc
+++ b/paddle/operators/recurrent_op.cc
@@ -36,6 +36,7 @@ void RecurrentAlgorithm::InferShape(const Scope& scope) const {
InitMemories(step_scopes[0], true /*infer_shape_mode*/);
Variable* net = scope.FindVar(arg_->step_net);
PADDLE_ENFORCE(net != nullptr, "failed to get step net");
+
for (size_t i = 0; i < seq_len_; i++) {
if (i > 0) {
rnn::LinkMemories(step_scopes, arg_->memories, i, -1,
@@ -56,6 +57,7 @@ void RecurrentAlgorithm::Run(const Scope& scope,
Variable* net = scope.FindVar(arg_->step_net);
for (size_t step_id = 0; step_id < seq_len_; step_id++) {
+ // create output alias variables
if (step_id > 0) {
rnn::LinkMemories(step_scopes, arg_->memories, step_id, -1,
false /*infer_shape_mode*/);
@@ -67,22 +69,31 @@ void RecurrentAlgorithm::Run(const Scope& scope,
}
void RecurrentAlgorithm::CreateScopes(const Scope& scope) const {
- // TODO(xxx) Only two scopes are needed for inference, this case will be
+ // TODO(superjom) Only two scopes are needed for inference, this case will be
// supported later.
- auto step_scopes =
- scope.FindVar(arg_->step_scopes)->GetMutable>();
+ auto step_scopes_var = scope.FindVar(arg_->step_scopes);
+ PADDLE_ENFORCE(step_scopes_var != nullptr, "");
+ auto step_scopes = step_scopes_var->GetMutable>();
+
+ // Now all variables in scope must be created outside of op.
+ auto net_var = scope.FindVar(arg_->step_net);
+ PADDLE_ENFORCE(net_var != nullptr, "no stepnet called %s in scope",
+ arg_->step_net);
+ auto net_op = net_var->GetMutable();
+ PADDLE_ENFORCE(!net_op->outputs_.empty(), "net_op has no outputs");
if (seq_len_ > step_scopes->size()) {
for (size_t i = step_scopes->size(); i < seq_len_; ++i) {
auto& step_scope = scope.NewScope();
- // Now all variables in scope must be created outside of op.
- auto net_op = scope.FindVar(arg_->step_net)->GetMutable();
+ // create step net's temp inputs
for (auto& input : net_op->inputs_) {
// the weight are located in parent scope
- if (!step_scope.FindVar(input)) step_scope.NewVar(input);
+ if (!step_scope.FindVar(input))
+ step_scope.NewVar(input)->GetMutable();
}
- for (auto& output : net_op->outputs_) {
+ // create stepnet's outputs
+ for (const auto& output : net_op->outputs_) {
step_scope.NewVar(output);
}
step_scopes->emplace_back(&step_scope);
@@ -100,6 +111,7 @@ void RecurrentAlgorithm::InitMemories(Scope* step_scope,
Tensor* boot_mem = step_scope->FindVar(attr.boot_var)->GetMutable();
if (infer_shape_mode) {
pre_mem->Resize(boot_mem->dims());
+ PADDLE_ENFORCE_EQ(pre_mem->dims().size(), 2);
} else {
pre_mem->ShareDataWith(*boot_mem);
}
diff --git a/paddle/operators/rnn/recurrent_op_utils.cc b/paddle/operators/rnn/recurrent_op_utils.cc
index 43c97ba29f637828d717ac82516769deff52c7da..32c6c2dd4efa85359b4e95471e8ba09e56afec57 100644
--- a/paddle/operators/rnn/recurrent_op_utils.cc
+++ b/paddle/operators/rnn/recurrent_op_utils.cc
@@ -53,11 +53,13 @@ void ConcatOutputs(const std::vector& step_scopes,
PADDLE_ENFORCE(output_var != nullptr, "output link [%s] is not in scope.",
outlinks[i].external);
Tensor* output = output_var->GetMutable();
+
if (infer_shape_mode) {
- fmw::DDim step_dims = step_scopes[0]
- ->FindVar(outlinks[i].internal)
- ->GetMutable()
- ->dims();
+ auto step_scope_var = step_scopes[0]->FindVar(outlinks[i].internal);
+ PADDLE_ENFORCE(step_scope_var != nullptr, "%s not in scope",
+ outlinks[i].internal);
+ fmw::DDim step_dims =
+ step_scope_var->template GetMutable()->dims();
std::vector dims_vec = vectorize(step_dims);
dims_vec.insert(dims_vec.begin(), seq_len);
output->Resize(fmw::make_ddim(dims_vec));
@@ -79,14 +81,15 @@ void LinkMemories(const std::vector& scopes,
const std::vector& memories,
const size_t step_id, const int offset,
bool infer_shape_mode) {
- PADDLE_ENFORCE(step_id < scopes.size(),
- "step [%d] is out of range of step scopes' size [%d]", step_id,
- scopes.size());
- PADDLE_ENFORCE(static_cast(step_id) + offset >= 0,
- "offset [%d] must be large than -[%d]", offset, step_id);
- PADDLE_ENFORCE(step_id + offset < scopes.size(),
- "offset [%d] is out of range, it must be less than (%d - %d)",
- offset, scopes.size(), step_id);
+ PADDLE_ENFORCE_LT(step_id, scopes.size(),
+ "step [%d] is out of range of step scopes' size [%d]",
+ step_id, scopes.size());
+ PADDLE_ENFORCE_GE(static_cast(step_id) + offset, 0,
+ "offset [%d] must be large than -[%d]", offset, step_id);
+ PADDLE_ENFORCE_LT(
+ step_id + offset, scopes.size(),
+ "offset [%d] is out of range, it must be less than (%d - %d)", offset,
+ scopes.size(), step_id);
auto scope = scopes[step_id];
auto linked_scope = scopes[step_id + offset];
for (auto& attr : memories) {
diff --git a/paddle/operators/sigmoid_op.cc b/paddle/operators/sigmoid_op.cc
index 9d201eb93a2c0e34dd8e6869e97b43c4e278596e..1eb795faa858796f7a34aa495b43d043fdb5dd43 100644
--- a/paddle/operators/sigmoid_op.cc
+++ b/paddle/operators/sigmoid_op.cc
@@ -37,10 +37,8 @@ class SigmoidOpMaker : public OpProtoAndCheckerMaker {
class SigmoidOpGrad : public OperatorWithKernel {
protected:
- void InferShape(const InferShapeContext &ctx) const override {}
- std::string DebugString() const override {
- LOG(INFO) << "SigmoidGrad";
- return "";
+ void InferShape(const InferShapeContext &ctx) const override {
+ ctx.Output(0)->Resize(ctx.Input(0)->dims());
}
};
@@ -51,3 +49,5 @@ REGISTER_OP(sigmoid, ops::SigmoidOp, ops::SigmoidOpMaker);
REGISTER_GRADIENT_OP(sigmoid, sigmoid_grad, ops::SigmoidOpGrad);
REGISTER_OP_CPU_KERNEL(sigmoid, ops::SigmoidKernel);
+REGISTER_OP_CPU_KERNEL(sigmoid_grad,
+ ops::SigmoidGradKernel);
diff --git a/paddle/operators/sigmoid_op.cu b/paddle/operators/sigmoid_op.cu
index 2123b17e4b5e90c22c2d6e9177f2a8956f8a4ac9..e80ba081f2ff805664cf92f3cb47e9ad51889058 100644
--- a/paddle/operators/sigmoid_op.cu
+++ b/paddle/operators/sigmoid_op.cu
@@ -16,3 +16,5 @@
#include "paddle/operators/sigmoid_op.h"
REGISTER_OP_GPU_KERNEL(sigmoid, ops::SigmoidKernel);
+REGISTER_OP_GPU_KERNEL(sigmoid_grad,
+ ops::SigmoidGradKernel);
diff --git a/paddle/operators/sigmoid_op.h b/paddle/operators/sigmoid_op.h
index eb473920a5f866825b52ecb946653ccead7000ea..d513261e74423ce93a50eaaaec1c7d5fadb8f4a8 100644
--- a/paddle/operators/sigmoid_op.h
+++ b/paddle/operators/sigmoid_op.h
@@ -27,6 +27,7 @@ class SigmoidKernel : public OpKernel {
auto output = context.Output(0);
output->mutable_data(context.GetPlace());
+ // The clipping is used in Paddle's raw implenmention
auto X = EigenVector::Flatten(*input);
auto Y = EigenVector::Flatten(*output);
auto place = context.GetEigenDevice();
@@ -34,5 +35,23 @@ class SigmoidKernel : public OpKernel {
Y.device(place) = 1.0 / (1.0 + (-1.0 * X).exp());
}
};
+
+template
+class SigmoidGradKernel : public OpKernel {
+ public:
+ void Compute(const ExecutionContext& context) const override {
+ auto Y_t = context.Input("Y");
+ auto dY_t = context.Input(framework::GradVarName("Y"));
+ auto dX_t = context.Output(framework::GradVarName("X"));
+
+ dX_t->mutable_data(context.GetPlace());
+
+ auto dX = EigenVector::Flatten(*dX_t);
+ auto Y = EigenVector::Flatten(*Y_t);
+ auto dY = EigenVector::Flatten(*dY_t);
+ dX.device(context.GetEigenDevice()) = dY * Y * (1. - Y);
+ }
+};
+
} // namespace operators
} // namespace paddle
diff --git a/python/paddle/v2/framework/tests/CMakeLists.txt b/python/paddle/v2/framework/tests/CMakeLists.txt
index ee6726c31f278331df362616dbc1b6e6d9a8dbee..7eec37678815587b451008eef587b23bcb9beeaf 100644
--- a/python/paddle/v2/framework/tests/CMakeLists.txt
+++ b/python/paddle/v2/framework/tests/CMakeLists.txt
@@ -1,18 +1,23 @@
-add_python_test(test_framework
- test_protobuf.py
- test_scope.py
- test_default_scope_funcs.py
- test_op_creation_methods.py
- test_net.py
- test_tensor.py
- test_fc_op.py
- test_add_two_op.py
- test_sgd_op.py
- test_mul_op.py
- test_mean_op.py
- test_sigmoid_op.py
- test_softmax_op.py
- test_rowwise_add_op.py
- test_random_op.py
- test_network.py
- gradient_checker.py)
+py_test(test_net SRCS test_net.py)
+
+py_test(test_fc_op SRCS test_fc_op.py)
+py_test(test_scope SRCS test_scope.py)
+
+py_test(test_tensor SRCS test_tensor.py)
+py_test(test_mul_op SRCS test_mul_op.py)
+
+py_test(test_network SRCS test_network.py)
+py_test(test_mean_op SRCS test_mean_op.py)
+
+py_test(test_protobuf SRCS test_protobuf.py)
+
+py_test(test_add_two_op SRCS test_add_two_op.py)
+py_test(test_sigmoid_op SRCS test_sigmoid_op.py)
+py_test(test_softmax_op SRCS test_softmax_op.py)
+
+py_test(gradient_checker SRCS gradient_checker.py)
+
+py_test(test_rowwise_add_op SRCS test_rowwise_add_op.py)
+
+py_test(test_default_scope_funcs SRCS test_default_scope_funcs.py)
+py_test(test_op_creation_methods SRCS test_op_creation_methods.py)
diff --git a/python/paddle/v2/framework/tests/op_test_util.py b/python/paddle/v2/framework/tests/op_test_util.py
index 9ee66c2c5103811519c3a2c28653536f97009161..e6bc7d8a9b5ddd4582a5ef8a47cb63a7e5911892 100644
--- a/python/paddle/v2/framework/tests/op_test_util.py
+++ b/python/paddle/v2/framework/tests/op_test_util.py
@@ -33,23 +33,28 @@ class OpTestMeta(type):
for place in places:
for in_name in func.all_input_args:
- if hasattr(self, in_name):
+ if hasattr(self, "inputs") and in_name in self.inputs:
kwargs[in_name] = in_name
var = scope.new_var(in_name).get_tensor()
- arr = getattr(self, in_name)
+ arr = self.inputs[in_name]
var.set_dims(arr.shape)
var.set(arr, place)
else:
kwargs[in_name] = "@EMPTY@"
for out_name in func.all_output_args:
- if hasattr(self, out_name):
- kwargs[out_name] = out_name
- scope.new_var(out_name).get_tensor()
+ if not hasattr(self, "outputs"):
+ raise ValueError(
+ "The test op must set self.outputs dict.")
+ if out_name not in self.outputs:
+ raise ValueError("The %s is not in self.outputs dict." %
+ (out_name))
+ kwargs[out_name] = out_name
+ scope.new_var(out_name).get_tensor()
for attr_name in func.all_attr_args:
- if hasattr(self, attr_name):
- kwargs[attr_name] = getattr(self, attr_name)
+ if hasattr(self, "attrs") and attr_name in self.attrs:
+ kwargs[attr_name] = self.attrs[attr_name]
op = func(**kwargs)
@@ -60,7 +65,7 @@ class OpTestMeta(type):
for out_name in func.all_output_args:
actual = numpy.array(scope.find_var(out_name).get_tensor())
- expect = getattr(self, out_name)
+ expect = self.outputs[out_name]
numpy.isclose(actual, expect)
obj.test_all = test_all
diff --git a/python/paddle/v2/framework/tests/test_add_two_op.py b/python/paddle/v2/framework/tests/test_add_two_op.py
index 6e6643201bf361fce1bad7de10b2562f0525e00a..8ef48f4727b0af46a696c6f463045d98e7a08800 100644
--- a/python/paddle/v2/framework/tests/test_add_two_op.py
+++ b/python/paddle/v2/framework/tests/test_add_two_op.py
@@ -12,9 +12,11 @@ class TestAddOp(unittest.TestCase):
def setUp(self):
self.type = "add_two"
- self.X = numpy.random.random((102, 105)).astype("float32")
- self.Y = numpy.random.random((102, 105)).astype("float32")
- self.Out = self.X + self.Y
+ self.inputs = {
+ 'X': numpy.random.random((102, 105)).astype("float32"),
+ 'Y': numpy.random.random((102, 105)).astype("float32")
+ }
+ self.outputs = {'Out': self.inputs['X'] + self.inputs['Y']}
class TestAddGradOp(unittest.TestCase):
diff --git a/python/paddle/v2/framework/tests/test_cross_entropy_op.py b/python/paddle/v2/framework/tests/test_cross_entropy_op.py
index 6d022f6bc0be60dbf2f796780a969bff0e8bfded..b26e25d58b59bd1cb16e9ba2a1cccd27799b15f2 100644
--- a/python/paddle/v2/framework/tests/test_cross_entropy_op.py
+++ b/python/paddle/v2/framework/tests/test_cross_entropy_op.py
@@ -7,15 +7,17 @@ class TestSGD(unittest.TestCase):
__metaclass__ = OpTestMeta
def setUp(self):
+ # TODO this unit test is not passed
self.type = "onehot_cross_entropy"
batch_size = 100
class_num = 10
- self.X = numpy.random.random((batch_size, class_num)).astype("float32")
- self.label = 5 * numpy.ones(batch_size).astype("int32")
+ X = numpy.random.random((batch_size, class_num)).astype("float32")
+ label = 5 * numpy.ones(batch_size).astype("int32")
+ self.inputs = {'X': X, 'label': label}
Y = []
for i in range(0, batch_size):
- Y.append(-numpy.log(self.X[i][self.label[i]]))
- self.Y = numpy.array(Y).astype("float32")
+ Y.append(-numpy.log(X[i][label[i]]))
+ self.outputs = {'Y': numpy.array(Y).astype("float32")}
# TODO(superjom) add gradient check
diff --git a/python/paddle/v2/framework/tests/test_mean_op.py b/python/paddle/v2/framework/tests/test_mean_op.py
index 78fff1eeff998109a51ea662f963a102eff49d3a..b5d52b90567bcd0c9f376147145d8638049f7bab 100644
--- a/python/paddle/v2/framework/tests/test_mean_op.py
+++ b/python/paddle/v2/framework/tests/test_mean_op.py
@@ -8,8 +8,8 @@ class TestMeanOp(unittest.TestCase):
def setUp(self):
self.type = "mean"
- self.X = np.random.random((32, 784)).astype("float32")
- self.Out = np.mean(self.X)
+ self.inputs = {'X': np.random.random((32, 784)).astype("float32")}
+ self.outputs = {'Out': np.mean(self.inputs['X'])}
if __name__ == '__main__':
diff --git a/python/paddle/v2/framework/tests/test_mul_op.py b/python/paddle/v2/framework/tests/test_mul_op.py
index e1ac66d3a4d23d617f7c5a4d97d070b2660954c8..ec0ac99156a546dd3fb7b27778032bece38ab5a9 100644
--- a/python/paddle/v2/framework/tests/test_mul_op.py
+++ b/python/paddle/v2/framework/tests/test_mul_op.py
@@ -8,9 +8,11 @@ class TestMulOp(unittest.TestCase):
def setUp(self):
self.type = "mul"
- self.X = np.random.random((32, 84)).astype("float32")
- self.Y = np.random.random((84, 100)).astype("float32")
- self.Out = np.dot(self.X, self.Y)
+ self.inputs = {
+ 'X': np.random.random((32, 84)).astype("float32"),
+ 'Y': np.random.random((84, 100)).astype("float32")
+ }
+ self.outputs = {'Out': np.dot(self.inputs['X'], self.inputs['Y'])}
if __name__ == '__main__':
diff --git a/python/paddle/v2/framework/tests/test_recurrent_op.py b/python/paddle/v2/framework/tests/test_recurrent_op.py
index 0457e3f16a709140180ce433c1d56d146f0b6974..5c77c477b347f4713e4af2a8cb462b243d7a779c 100644
--- a/python/paddle/v2/framework/tests/test_recurrent_op.py
+++ b/python/paddle/v2/framework/tests/test_recurrent_op.py
@@ -1,3 +1,4 @@
+import logging
import paddle.v2.framework.core as core
import unittest
import numpy as np
@@ -7,10 +8,9 @@ ops = creation.op_creations
def create_tensor(scope, name, shape):
- tensor = scope.create_var(name).get_tensor()
+ tensor = scope.new_var(name).get_tensor()
tensor.set_dims(shape)
- tensor.alloc_float()
- tensor.set(np.random.random(shape))
+ tensor.set(np.random.random(shape), core.CPUPlace())
return tensor
@@ -31,40 +31,36 @@ class TestRNN(unittest.TestCase):
- h
'''
+ input_dim = 30
+ batch_size = 50
+ weight_dim = 15
+ sent_len = 11
+
def init(self):
- input_dim = 30
- batch_size = 50
- weight_dim = 15
-
- self.scope = core.Scope(None)
-
- # create vars
- create_tensor(self.scope, "x", [batch_size, input_dim])
- create_tensor(self.scope, "W", [input_dim, weight_dim])
- create_tensor(self.scope, "U", [weight_dim, weight_dim])
- create_tensor(self.scope, "h_boot", [batch_size, weight_dim])
-
- x_alias = "x@alias"
- y_alias = "y@alias"
- memory = "h@alias"
- prememory = "h@pre"
- output = "rnn_out"
- output_alias = "rnn_out@alias"
-
- # create step net
- stepnet_var = self.scope.create_var("stepnet")
- stepnet = stepnet_var.get_net()
- # stepnet = core.Net.create()
- x_fc_op = ops.fc(X=x_alias, W="W", Y="Wx")
- h_fc_op = ops.fc(X=prememory, W="U", Y="Uh")
- sum_op = ops.add_two(X="Wx", Y="Uh", Out="sum")
- sig_op = ops.sigmoid(X="sum", Y=memory)
- stepnet.add_op(x_fc_op)
- stepnet.add_op(h_fc_op)
- stepnet.add_op(sum_op)
- stepnet.add_op(sig_op)
- stepnet.complete_add_op(True)
+ self.scope = core.Scope()
+
+ self.create_global_variables()
+ self.create_step_net()
+ rnn_op = self.create_rnn_op()
+ ctx = core.DeviceContext.create(core.CPUPlace())
+ print 'infer_shape'
+ rnn_op.infer_shape(self.scope)
+
+ rnn_op.run(self.scope, ctx)
+
+ def create_global_variables(self):
+ # create inlink
+ create_tensor(self.scope, "x",
+ [self.sent_len, self.batch_size, self.input_dim])
+ create_tensor(self.scope, "W", [self.input_dim, self.input_dim])
+ create_tensor(self.scope, "U", [self.input_dim, self.input_dim])
+ create_tensor(self.scope, "h_boot", [self.batch_size, self.input_dim])
+ self.scope.new_var("step_scopes")
+ self.scope.new_var("h@alias")
+ self.scope.new_var("h")
+
+ def create_rnn_op(self):
# create RNNOp
rnnop = ops.recurrent_op(
# inputs
@@ -72,17 +68,27 @@ class TestRNN(unittest.TestCase):
boot_memories=["h_boot"],
step_net="stepnet",
# outputs
- outlinks=[output],
+ outlinks=["h"],
step_scopes="step_scopes",
# attributes
inlink_alias=["x@alias"],
- outlink_alias=[output_alias],
- pre_memories=[prememory],
- memories=[memory])
+ outlink_alias=["h@alias"],
+ pre_memories=["h@pre"],
+ memories=["h@alias"])
+ return rnnop
+
+ def create_step_net(self):
+ var = self.scope.new_var("stepnet")
+ stepnet = var.get_net()
- ctx = core.DeviceContext.cpu_context()
- rnnop.infer_shape(self.scope)
- rnnop.run(self.scope, ctx)
+ x_fc_op = ops.fc(X="x@alias", W="W", Y="Wx")
+ h_fc_op = ops.fc(X="h@pre", W="U", Y="Uh")
+ sum_op = ops.add_two(X="Wx", Y="Uh", Out="sum")
+ sig_op = ops.sigmoid(X="sum", Y="h@alias")
+
+ for op in [x_fc_op, h_fc_op, sum_op, sig_op]:
+ stepnet.add_op(op)
+ stepnet.complete_add_op(True)
def test_recurrent(self):
self.init()
diff --git a/python/paddle/v2/framework/tests/test_rowwise_add_op.py b/python/paddle/v2/framework/tests/test_rowwise_add_op.py
index 04abc14ee198fe4e2307e009c696a2b40ec271b6..f8521eb517057fbeb104b28af7da4fffe54f37de 100644
--- a/python/paddle/v2/framework/tests/test_rowwise_add_op.py
+++ b/python/paddle/v2/framework/tests/test_rowwise_add_op.py
@@ -8,9 +8,11 @@ class TestRowwiseAddOp(unittest.TestCase):
def setUp(self):
self.type = "rowwise_add"
- self.X = np.random.random((32, 84)).astype("float32")
- self.b = np.random.random(84).astype("float32")
- self.Out = np.add(self.X, self.b)
+ self.inputs = {
+ 'X': np.random.random((32, 84)).astype("float32"),
+ 'b': np.random.random(84).astype("float32")
+ }
+ self.outputs = {'Out': np.add(self.inputs['X'], self.inputs['b'])}
if __name__ == '__main__':
diff --git a/python/paddle/v2/framework/tests/test_sgd_op.py b/python/paddle/v2/framework/tests/test_sgd_op.py
index ca03cc11abe2ceb31b33a87797aa752943dd2a7d..e5f9ef865e84f1a78e28884ad7e2e758f9ca8054 100644
--- a/python/paddle/v2/framework/tests/test_sgd_op.py
+++ b/python/paddle/v2/framework/tests/test_sgd_op.py
@@ -8,10 +8,13 @@ class TestSGD(unittest.TestCase):
def setUp(self):
self.type = "sgd"
- self.param = numpy.random.random((102, 105)).astype("float32")
- self.grad = numpy.random.random((102, 105)).astype("float32")
- self.learning_rate = 0.1
- self.param_out = self.param - self.learning_rate * self.grad
+ w = numpy.random.random((102, 105)).astype("float32")
+ g = numpy.random.random((102, 105)).astype("float32")
+ lr = 0.1
+
+ self.inputs = {'param': w, 'grad': g}
+ self.attrs = {'learning_rate': lr}
+ self.outputs = {'param_out': w - lr * g}
if __name__ == "__main__":
diff --git a/python/paddle/v2/framework/tests/test_sigmoid_op.py b/python/paddle/v2/framework/tests/test_sigmoid_op.py
index 50044a122f1d66dd54a24f6cce76074a60ee2262..2a57a41ed8b718fd420062ba68e853a4861b7359 100644
--- a/python/paddle/v2/framework/tests/test_sigmoid_op.py
+++ b/python/paddle/v2/framework/tests/test_sigmoid_op.py
@@ -8,9 +8,12 @@ class TestSigmoidOp(unittest.TestCase):
def setUp(self):
self.type = "sigmoid"
- self.X = np.random.random((32, 100)).astype("float32")
- self.Y = 1 / (1 + np.exp(-self.X))
+ self.inputs = {'X': np.random.random((32, 100)).astype("float32")}
+ self.outputs = {'Y': 1 / (1 + np.exp(-self.inputs['X']))}
+#class TestSigmoidGradOp(unittest.TestCase):
+#TODO(qingqing) add unit test
+
if __name__ == '__main__':
unittest.main()
diff --git a/python/paddle/v2/framework/tests/test_softmax_op.py b/python/paddle/v2/framework/tests/test_softmax_op.py
index c80888128781d98e4ed30d845a30b39121f66459..98ca8ddc860c3825411b02b2f6ed612db46a18d7 100644
--- a/python/paddle/v2/framework/tests/test_softmax_op.py
+++ b/python/paddle/v2/framework/tests/test_softmax_op.py
@@ -19,8 +19,10 @@ class TestSoftmaxOp(unittest.TestCase):
def setUp(self):
self.type = "softmax"
- self.X = np.random.random((32, 100)).astype("float32")
- self.Y = np.apply_along_axis(stable_softmax, 1, self.X)
+ self.inputs = {'X': np.random.random((32, 100)).astype("float32")}
+ self.outputs = {
+ 'Y': np.apply_along_axis(stable_softmax, 1, self.inputs['X'])
+ }
class TestSoftmaxGradOp(unittest.TestCase):
diff --git a/python/paddle/v2/plot/tests/CMakeLists.txt b/python/paddle/v2/plot/tests/CMakeLists.txt
index da5cd764889b48a3af8461a2793d948aa609d6c1..4b6c1c80969182ccf6e0189b18bade8758bbbc30 100644
--- a/python/paddle/v2/plot/tests/CMakeLists.txt
+++ b/python/paddle/v2/plot/tests/CMakeLists.txt
@@ -1,5 +1,5 @@
if (NOT APPLE)
# The Mac OS X backend will not be able to function correctly if Python is
# not installed as a framework.
- add_python_test(test_ploter test_ploter.py)
+ py_test(test_ploter SRCS test_ploter.py)
endif()
diff --git a/python/paddle/v2/reader/tests/CMakeLists.txt b/python/paddle/v2/reader/tests/CMakeLists.txt
index 6a1d337b232c7a849a8793894bf16d26d609d3dd..107d5912e1567e0c8721987a281272c7feb51e63 100644
--- a/python/paddle/v2/reader/tests/CMakeLists.txt
+++ b/python/paddle/v2/reader/tests/CMakeLists.txt
@@ -1 +1,2 @@
-add_python_test(reader_tests creator_test.py decorator_test.py)
+py_test(creator_test SRCS creator_test.py)
+py_test(decorator_test SRCS decorator_test.py)
diff --git a/python/paddle/v2/tests/CMakeLists.txt b/python/paddle/v2/tests/CMakeLists.txt
index 058f22befd0657d06ff130ace55fe7322148213d..b7791559594321a85f41b508b69efeb077d69595 100644
--- a/python/paddle/v2/tests/CMakeLists.txt
+++ b/python/paddle/v2/tests/CMakeLists.txt
@@ -1,2 +1,7 @@
-add_python_test(test_v2_api test_data_feeder.py test_op.py test_parameters.py
-test_layer.py test_rnn_layer.py test_topology.py test_image.py)
+py_test(test_op SRCS test_op.py)
+py_test(test_image SRCS test_image.py)
+py_test(test_layer SRCS test_layer.py)
+py_test(test_topology SRCS test_topology.py)
+py_test(test_rnn_layer SRCS test_rnn_layer.py)
+py_test(test_parameters SRCS test_parameters.py)
+py_test(test_data_feeder SRCS test_data_feeder.py)